diff --git a/.github/workflows/doc-validator.yml b/.github/workflows/doc-validator.yml index bb074949e29ef..a496861e6e731 100644 --- a/.github/workflows/doc-validator.yml +++ b/.github/workflows/doc-validator.yml @@ -7,7 +7,7 @@ jobs: doc-validator: runs-on: "ubuntu-latest" container: - image: "grafana/doc-validator:v5.0.0" + image: "grafana/doc-validator:v5.1.0" steps: - name: "Checkout code" uses: "actions/checkout@v4" diff --git a/Makefile b/Makefile index 8f345f96d37a6..cadce23fe2624 100644 --- a/Makefile +++ b/Makefile @@ -38,6 +38,7 @@ BUILD_IN_CONTAINER ?= true # ensure you run `make drone` and `make release-workflows` after changing this BUILD_IMAGE_VERSION ?= 0.33.4 +GO_VERSION := 1.22.5 # Docker image info IMAGE_PREFIX ?= grafana @@ -141,7 +142,7 @@ logcli: cmd/logcli/logcli ## build logcli executable logcli-debug: cmd/logcli/logcli-debug ## build debug logcli executable logcli-image: ## build logcli docker image - $(SUDO) docker build -t $(IMAGE_PREFIX)/logcli:$(IMAGE_TAG) -f cmd/logcli/Dockerfile . + $(SUDO) docker build --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/logcli:$(IMAGE_TAG) -f cmd/logcli/Dockerfile . cmd/logcli/logcli: CGO_ENABLED=0 go build $(GO_FLAGS) -o $@ ./cmd/logcli @@ -597,9 +598,9 @@ endef # promtail promtail-image: ## build the promtail docker image - $(SUDO) docker build -t $(IMAGE_PREFIX)/promtail:$(IMAGE_TAG) -f clients/cmd/promtail/Dockerfile . + $(SUDO) docker build --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/promtail:$(IMAGE_TAG) -f clients/cmd/promtail/Dockerfile . promtail-image-cross: - $(SUDO) $(BUILD_OCI) -t $(IMAGE_PREFIX)/promtail:$(IMAGE_TAG) -f clients/cmd/promtail/Dockerfile.cross . + $(SUDO) $(BUILD_OCI) --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/promtail:$(IMAGE_TAG) -f clients/cmd/promtail/Dockerfile.cross . promtail-debug-image: ## build the promtail debug docker image $(SUDO) $(BUILD_OCI) -t $(IMAGE_PREFIX)/promtail:$(IMAGE_TAG)-debug -f clients/cmd/promtail/Dockerfile.debug . @@ -609,47 +610,47 @@ promtail-push: promtail-image-cross # loki loki-image: ## build the loki docker image - $(SUDO) docker build -t $(IMAGE_PREFIX)/loki:$(IMAGE_TAG) -f cmd/loki/Dockerfile . + $(SUDO) docker build --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki:$(IMAGE_TAG) -f cmd/loki/Dockerfile . loki-image-cross: - $(SUDO) $(BUILD_OCI) -t $(IMAGE_PREFIX)/loki:$(IMAGE_TAG) -f cmd/loki/Dockerfile.cross . + $(SUDO) $(BUILD_OCI) --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki:$(IMAGE_TAG) -f cmd/loki/Dockerfile.cross . loki-debug-image: ## build the debug loki docker image - $(SUDO) $(BUILD_OCI) -t $(IMAGE_PREFIX)/loki:$(IMAGE_TAG)-debug -f cmd/loki/Dockerfile.debug . + $(SUDO) $(BUILD_OCI) --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki:$(IMAGE_TAG)-debug -f cmd/loki/Dockerfile.debug . loki-push: loki-image-cross $(call push-image,loki) # loki-canary loki-canary-image: ## build the loki canary docker image - $(SUDO) docker build -t $(IMAGE_PREFIX)/loki-canary:$(IMAGE_TAG) -f cmd/loki-canary/Dockerfile . + $(SUDO) docker build --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki-canary:$(IMAGE_TAG) -f cmd/loki-canary/Dockerfile . loki-canary-image-cross: - $(SUDO) $(BUILD_OCI) -t $(IMAGE_PREFIX)/loki-canary:$(IMAGE_TAG) -f cmd/loki-canary/Dockerfile.cross . + $(SUDO) $(BUILD_OCI) --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki-canary:$(IMAGE_TAG) -f cmd/loki-canary/Dockerfile.cross . loki-canary-image-cross-boringcrypto: - $(SUDO) $(BUILD_OCI) -t $(IMAGE_PREFIX)/loki-canary-boringcrypto:$(IMAGE_TAG) -f cmd/loki-canary-boringcrypto/Dockerfile . + $(SUDO) $(BUILD_OCI) --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki-canary-boringcrypto:$(IMAGE_TAG) -f cmd/loki-canary-boringcrypto/Dockerfile . loki-canary-push: loki-canary-image-cross $(SUDO) $(PUSH_OCI) $(IMAGE_PREFIX)/loki-canary:$(IMAGE_TAG) loki-canary-push-boringcrypto: loki-canary-image-cross-boringcrypto $(SUDO) $(PUSH_OCI) $(IMAGE_PREFIX)/loki-canary-boringcrypto:$(IMAGE_TAG) helm-test-image: ## build the helm test image - $(SUDO) docker build -t $(IMAGE_PREFIX)/loki-helm-test:$(IMAGE_TAG) -f production/helm/loki/src/helm-test/Dockerfile . + $(SUDO) docker build --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki-helm-test:$(IMAGE_TAG) -f production/helm/loki/src/helm-test/Dockerfile . helm-test-push: helm-test-image ## push the helm test image $(SUDO) $(PUSH_OCI) $(IMAGE_PREFIX)/loki-helm-test:$(IMAGE_TAG) # loki-querytee loki-querytee-image: - $(SUDO) docker build -t $(IMAGE_PREFIX)/loki-query-tee:$(IMAGE_TAG) -f cmd/querytee/Dockerfile . + $(SUDO) docker build --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki-query-tee:$(IMAGE_TAG) -f cmd/querytee/Dockerfile . loki-querytee-image-cross: - $(SUDO) $(BUILD_OCI) -t $(IMAGE_PREFIX)/loki-query-tee:$(IMAGE_TAG) -f cmd/querytee/Dockerfile.cross . + $(SUDO) $(BUILD_OCI) --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki-query-tee:$(IMAGE_TAG) -f cmd/querytee/Dockerfile.cross . loki-querytee-push: loki-querytee-image-cross $(SUDO) $(PUSH_OCI) $(IMAGE_PREFIX)/loki-query-tee:$(IMAGE_TAG) # migrate-image migrate-image: - $(SUDO) docker build -t $(IMAGE_PREFIX)/loki-migrate:$(IMAGE_TAG) -f cmd/migrate/Dockerfile . + $(SUDO) docker build --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki-migrate:$(IMAGE_TAG) -f cmd/migrate/Dockerfile . # LogQL Analyzer logql-analyzer-image: ## build the LogQL Analyzer image - $(SUDO) docker build -t $(IMAGE_PREFIX)/logql-analyzer:$(IMAGE_TAG) -f cmd/logql-analyzer/Dockerfile . + $(SUDO) docker build --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/logql-analyzer:$(IMAGE_TAG) -f cmd/logql-analyzer/Dockerfile . logql-analyzer-push: logql-analyzer-image ## push the LogQL Analyzer image $(call push-image,logql-analyzer) @@ -663,7 +664,7 @@ else endif build-image: ensure-buildx-builder - $(SUDO) $(BUILD_OCI) -t $(IMAGE_PREFIX)/loki-build-image:$(IMAGE_TAG) ./loki-build-image + $(SUDO) $(BUILD_OCI) --build-arg=GO_VERSION=$(GO_VERSION) -t $(IMAGE_PREFIX)/loki-build-image:$(IMAGE_TAG) ./loki-build-image build-image-push: build-image ## push the docker build image ifneq (,$(findstring WIP,$(IMAGE_TAG))) @echo "Cannot push a WIP image, commit changes first"; \ diff --git a/clients/cmd/promtail/Dockerfile b/clients/cmd/promtail/Dockerfile index 0ac16613ea51a..3c9088bb83ba9 100644 --- a/clients/cmd/promtail/Dockerfile +++ b/clients/cmd/promtail/Dockerfile @@ -1,4 +1,5 @@ -FROM golang:1.22.5-bookworm as build +ARG GO_VERSION=1.22 +FROM golang:${GO_VERSION}-bookworm as build COPY . /src/loki WORKDIR /src/loki diff --git a/clients/cmd/promtail/Dockerfile.cross b/clients/cmd/promtail/Dockerfile.cross index ff9f3ac565e83..572e6394f4597 100644 --- a/clients/cmd/promtail/Dockerfile.cross +++ b/clients/cmd/promtail/Dockerfile.cross @@ -1,8 +1,9 @@ ARG BUILD_IMAGE=grafana/loki-build-image:0.33.4 +ARG GO_VERSION=1.22 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f clients/cmd/promtail/Dockerfile . -FROM 1.22.2-alpine as goenv +FROM golang:${GO_VERSION}-alpine as goenv RUN go env GOARCH > /goarch && \ go env GOARM > /goarm diff --git a/cmd/logcli/Dockerfile b/cmd/logcli/Dockerfile index ffcbef10ecfb5..999434d075a8b 100644 --- a/cmd/logcli/Dockerfile +++ b/cmd/logcli/Dockerfile @@ -1,13 +1,15 @@ -FROM golang:1.22.5 as build +ARG GO_VERSION=1.22 +FROM golang:${GO_VERSION} as build COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false logcli -FROM alpine:3.18.5 -RUN apk add --no-cache ca-certificates +FROM gcr.io/distroless/base-nossl:debug COPY --from=build /src/loki/cmd/logcli/logcli /usr/bin/logcli +SHELL [ "/busybox/sh", "-c" ] +RUN ln -s /busybox/sh /bin/sh ENTRYPOINT [ "/usr/bin/logcli" ] diff --git a/cmd/logql-analyzer/Dockerfile b/cmd/logql-analyzer/Dockerfile index 4908bbb7da1ea..53ba7bee94b4e 100644 --- a/cmd/logql-analyzer/Dockerfile +++ b/cmd/logql-analyzer/Dockerfile @@ -1,13 +1,14 @@ -FROM golang:1.22.5 as build +ARG GO_VERSION=1.22 +FROM golang:${GO_VERSION} as build COPY . /src/loki WORKDIR /src/loki RUN make clean && CGO_ENABLED=0 go build ./cmd/logql-analyzer/ -FROM alpine:3.18.5 - -RUN apk add --no-cache ca-certificates +FROM gcr.io/distroless/base-nossl:debug COPY --from=build /src/loki/logql-analyzer /usr/bin/logql-analyzer +SHELL [ "/busybox/sh", "-c" ] +RUN ln -s /busybox/sh /bin/sh ENTRYPOINT [ "/usr/bin/logql-analyzer" ] diff --git a/cmd/loki-canary-boringcrypto/Dockerfile b/cmd/loki-canary-boringcrypto/Dockerfile index c1217cc3f68ec..e69be2c0aeb63 100644 --- a/cmd/loki-canary-boringcrypto/Dockerfile +++ b/cmd/loki-canary-boringcrypto/Dockerfile @@ -1,12 +1,13 @@ -FROM golang:1.22.5 as build +ARG GO_VERSION=1.22 +FROM golang:${GO_VERSION} as build COPY . /src/loki WORKDIR /src/loki RUN go env GOARCH > /goarch RUN make clean && make GOARCH=$(cat /goarch) BUILD_IN_CONTAINER=true GOEXPERIMENT=boringcrypto loki-canary-boringcrypto -FROM alpine:3.18.5 -RUN apk add --update --no-cache ca-certificates -RUN apk add --no-cache libc6-compat +FROM gcr.io/distroless/base-nossl:debug COPY --from=build /src/loki/cmd/loki-canary-boringcrypto/loki-canary-boringcrypto /usr/bin/loki-canary +SHELL [ "/busybox/sh", "-c" ] +RUN ln -s /busybox/sh /bin/sh ENTRYPOINT [ "/usr/bin/loki-canary" ] diff --git a/cmd/loki-canary/Dockerfile b/cmd/loki-canary/Dockerfile index cf0f60c695e3a..f0dcf02d5d815 100644 --- a/cmd/loki-canary/Dockerfile +++ b/cmd/loki-canary/Dockerfile @@ -1,10 +1,13 @@ -FROM golang:1.22.5 as build +ARG GO_VERSION=1.22 +FROM golang:${GO_VERSION} as build COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false loki-canary -FROM alpine:3.18.5 -RUN apk add --update --no-cache ca-certificates +FROM gcr.io/distroless/base-nossl:debug + COPY --from=build /src/loki/cmd/loki-canary/loki-canary /usr/bin/loki-canary +SHELL [ "/busybox/sh", "-c" ] +RUN ln -s /busybox/sh /bin/sh ENTRYPOINT [ "/usr/bin/loki-canary" ] diff --git a/cmd/loki-canary/Dockerfile.cross b/cmd/loki-canary/Dockerfile.cross index b56f292e5561d..e1f7b133313a3 100644 --- a/cmd/loki-canary/Dockerfile.cross +++ b/cmd/loki-canary/Dockerfile.cross @@ -1,18 +1,20 @@ ARG BUILD_IMAGE=grafana/loki-build-image:0.33.4 +ARG GO_VERSION=1.22 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f cmd/promtail/Dockerfile . -FROM golang:1.22.5-alpine as goenv +FROM golang:${GO_VERSION} as goenv RUN go env GOARCH > /goarch && \ go env GOARM > /goarm -FROM --platform=linux/amd64 $BUILD_IMAGE as build +FROM $BUILD_IMAGE as build COPY --from=goenv /goarch /goarm / COPY . /src/loki WORKDIR /src/loki RUN make clean && GOARCH=$(cat /goarch) GOARM=$(cat /goarm) make BUILD_IN_CONTAINER=false loki-canary -FROM alpine:3.18.5 -RUN apk add --update --no-cache ca-certificates +FROM gcr.io/distroless/base-nossl:debug COPY --from=build /src/loki/cmd/loki-canary/loki-canary /usr/bin/loki-canary +SHELL [ "/busybox/sh", "-c" ] +RUN ln -s /busybox/sh /bin/sh ENTRYPOINT [ "/usr/bin/loki-canary" ] diff --git a/cmd/loki/Dockerfile b/cmd/loki/Dockerfile index 6052f41ca54f1..521c591595736 100644 --- a/cmd/loki/Dockerfile +++ b/cmd/loki/Dockerfile @@ -1,22 +1,23 @@ -FROM golang:1.22.5 as build +ARG GO_VERSION=1.22 +FROM golang:${GO_VERSION} as build COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false loki -FROM alpine:3.18.5 - -RUN apk add --no-cache ca-certificates libcap +FROM gcr.io/distroless/base-nossl:debug COPY --from=build /src/loki/cmd/loki/loki /usr/bin/loki COPY cmd/loki/loki-docker-config.yaml /etc/loki/local-config.yaml +SHELL [ "/busybox/sh", "-c" ] + RUN addgroup -g 10001 -S loki && \ adduser -u 10001 -S loki -G loki RUN mkdir -p /loki/rules && \ mkdir -p /loki/rules-temp && \ - chown -R loki:loki /etc/loki /loki - + chown -R loki:loki /etc/loki /loki && \ + ln -s /busybox/sh /bin/sh USER 10001 EXPOSE 3100 ENTRYPOINT [ "/usr/bin/loki" ] diff --git a/cmd/loki/Dockerfile.cross b/cmd/loki/Dockerfile.cross index 52b18a302bdbd..da9d358d28c5f 100644 --- a/cmd/loki/Dockerfile.cross +++ b/cmd/loki/Dockerfile.cross @@ -1,28 +1,27 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.4 +ARG GO_VERSION=1.22 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile . -FROM golang:1.22.5-alpine as goenv +FROM golang:${GO_VERSION} as goenv RUN go env GOARCH > /goarch && \ go env GOARM > /goarm -FROM --platform=linux/amd64 $BUILD_IMAGE as build -COPY --from=goenv /goarch /goarm / COPY . /src/loki WORKDIR /src/loki RUN make clean && GOARCH=$(cat /goarch) GOARM=$(cat /goarm) make BUILD_IN_CONTAINER=false loki -FROM alpine:3.18.5 +FROM gcr.io/distroless/base-nossl:debug -RUN apk add --no-cache ca-certificates - -COPY --from=build /src/loki/cmd/loki/loki /usr/bin/loki +COPY --from=goenv /src/loki/cmd/loki/loki /usr/bin/loki COPY cmd/loki/loki-local-config.yaml /etc/loki/local-config.yaml +SHELL [ "/busybox/sh", "-c" ] + RUN addgroup -g 10001 -S loki && \ adduser -u 10001 -S loki -G loki RUN mkdir -p /loki && \ - chown -R loki:loki /etc/loki /loki + chown -R loki:loki /etc/loki /loki && \ + ln -s /busybox/sh /bin/sh USER 10001 EXPOSE 3100 diff --git a/cmd/loki/Dockerfile.debug b/cmd/loki/Dockerfile.debug index a943cb57f0d16..bb23778d9af33 100644 --- a/cmd/loki/Dockerfile.debug +++ b/cmd/loki/Dockerfile.debug @@ -1,22 +1,22 @@ ARG BUILD_IMAGE=grafana/loki-build-image:0.33.4 +ARG GO_VERSION=1.22 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile.debug . -FROM golang:1.22.5-alpine as goenv +FROM golang:${GO_VERSION} as goenv RUN go env GOARCH > /goarch && \ go env GOARM > /goarm && \ go install github.com/go-delve/delve/cmd/dlv@latest -FROM --platform=linux/amd64 $BUILD_IMAGE as build +FROM $BUILD_IMAGE as build COPY --from=goenv /goarch /goarm / COPY . /src/loki WORKDIR /src/loki RUN make clean && \ GOARCH=$(cat /goarch) GOARM=$(cat /goarm) make BUILD_IN_CONTAINER=false loki-debug -FROM alpine:3.18.5 -RUN apk add --update --no-cache ca-certificates +FROM gcr.io/distroless/base-nossl:debug COPY --from=build /src/loki/cmd/loki/loki-debug /usr/bin/loki-debug COPY --from=goenv /go/bin/dlv /usr/bin/dlv COPY cmd/loki/loki-docker-config.yaml /etc/loki/local-config.yaml @@ -25,8 +25,9 @@ EXPOSE 3100 # Expose 40000 for delve EXPOSE 40000 -# Allow delve to run on Alpine based containers. -RUN apk add --no-cache libc6-compat +SHELL [ "/busybox/sh", "-c" ] +RUN ln -s /busybox/sh /bin/sh + # Run delve, ending with -- because we pass params via kubernetes, per the docs: # Pass flags to the program you are debugging using --, for example:` diff --git a/cmd/migrate/Dockerfile b/cmd/migrate/Dockerfile index e50554d50d6be..a24697a719f2b 100644 --- a/cmd/migrate/Dockerfile +++ b/cmd/migrate/Dockerfile @@ -1,10 +1,12 @@ -FROM golang:1.22.5 as build +ARG GO_VERSION=1.22 +FROM golang:${GO_VERSION} as build COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false migrate -FROM alpine:3.18.5 -RUN apk add --update --no-cache ca-certificates +FROM gcr.io/distroless/base-nossl:debug + COPY --from=build /src/loki/cmd/migrate/migrate /usr/bin/migrate -#ENTRYPOINT [ "/usr/bin/migrate" ] -CMD tail -f /dev/null +SHELL [ "/busybox/sh", "-c" ] +RUN ln -s /busybox/sh /bin/sh +ENTRYPOINT [ "/busybox/tail", "-f", "/dev/null" ] diff --git a/cmd/querytee/Dockerfile b/cmd/querytee/Dockerfile index f2bfcefa02660..ea86fe0249ee0 100644 --- a/cmd/querytee/Dockerfile +++ b/cmd/querytee/Dockerfile @@ -1,10 +1,14 @@ -FROM golang:1.22.5 as build +ARG GO_VERSION=1.22 +FROM golang:${GO_VERSION} as build COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false loki-querytee -FROM alpine:3.18.5 -RUN apk add --update --no-cache ca-certificates +FROM gcr.io/distroless/base-nossl:debug COPY --from=build /src/loki/cmd/querytee/querytee /usr/bin/querytee + +SHELL [ "/busybox/sh", "-c" ] +RUN ln -s /busybox/sh /bin/sh + ENTRYPOINT [ "/usr/bin/querytee" ] diff --git a/cmd/querytee/Dockerfile.cross b/cmd/querytee/Dockerfile.cross index 005ebabbc2175..e0cec2a4cbddb 100644 --- a/cmd/querytee/Dockerfile.cross +++ b/cmd/querytee/Dockerfile.cross @@ -2,17 +2,19 @@ ARG BUILD_IMAGE=grafana/loki-build-image:0.33.4 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f cmd/promtail/Dockerfile . -FROM golang:1.22.5-alpine as goenv +ARG GO_VERSION=1.22 +FROM golang:${GO_VERSION} as goenv RUN go env GOARCH > /goarch && \ go env GOARM > /goarm -FROM --platform=linux/amd64 $BUILD_IMAGE as build +FROM $BUILD_IMAGE as build COPY --from=goenv /goarch /goarm / COPY . /src/loki WORKDIR /src/loki RUN make clean && GOARCH=$(cat /goarch) GOARM=$(cat /goarm) make BUILD_IN_CONTAINER=false loki-querytee -FROM alpine:3.18.5 -RUN apk add --update --no-cache ca-certificates +FROM gcr.io/distroless/base-nossl:debug COPY --from=build /src/loki/cmd/querytee/querytee /usr/bin/querytee +SHELL [ "/busybox/sh", "-c" ] +RUN ln -s /busybox/sh /bin/sh ENTRYPOINT [ "/usr/bin/querytee" ] diff --git a/docs/sources/reference/loki-http-api.md b/docs/sources/reference/loki-http-api.md index 6acb11c0e529c..20125fbb6ca21 100644 --- a/docs/sources/reference/loki-http-api.md +++ b/docs/sources/reference/loki-http-api.md @@ -1346,7 +1346,7 @@ PUT /loki/api/v1/delete Create a new delete request for the authenticated tenant. The [log entry deletion]({{< relref "../operations/storage/logs-deletion" >}}) documentation has configuration details. -Log entry deletion is supported _only_ when the BoltDB Shipper is configured for the index store. +Log entry deletion is supported _only_ when TSDB or BoltDB Shipper is configured for the index store. Query parameters: @@ -1386,7 +1386,7 @@ GET /loki/api/v1/delete List the existing delete requests for the authenticated tenant. The [log entry deletion]({{< relref "../operations/storage/logs-deletion" >}}) documentation has configuration details. -Log entry deletion is supported _only_ when the BoltDB Shipper is configured for the index store. +Log entry deletion is supported _only_ when TSDB or BoltDB Shipper is configured for the index store. List the existing delete requests using the following API: @@ -1425,7 +1425,7 @@ The [log entry deletion]({{< relref "../operations/storage/logs-deletion" >}}) d Loki allows cancellation of delete requests until the requests are picked up for processing. It is controlled by the `delete_request_cancel_period` YAML configuration or the equivalent command line option when invoking Loki. To cancel a delete request that has been picked up for processing or is partially complete, pass the `force=true` query parameter to the API. -Log entry deletion is supported _only_ when the BoltDB Shipper is configured for the index store. +Log entry deletion is supported _only_ when TSDB or BoltDB Shipper is configured for the index store. Cancel a delete request using this compactor endpoint: diff --git a/docs/sources/send-data/fluentbit/_index.md b/docs/sources/send-data/fluentbit/_index.md index b981445a19ace..2c2845d766d3c 100644 --- a/docs/sources/send-data/fluentbit/_index.md +++ b/docs/sources/send-data/fluentbit/_index.md @@ -12,6 +12,8 @@ weight: 500 This plugin has more configuration options compared to the built-in Fluent Bit Loki plugin. You can define which log files you want to collect using the [`Tail`](https://docs.fluentbit.io/manual/pipeline/inputs/tail) or [`Stdin`](https://docs.fluentbit.io/manual/pipeline/inputs/standard-input) data pipeline input. Additionally, Fluent Bit supports multiple `Filter` and `Parser` plugins (`Kubernetes`, `JSON`, etc.) to structure and alter log lines. +{{< youtube id="s43IBSVyTpQ" >}} + ## Usage ### Docker diff --git a/docs/sources/send-data/fluentd/_index.md b/docs/sources/send-data/fluentd/_index.md index 6adcca920bd44..a42cf4d49b142 100644 --- a/docs/sources/send-data/fluentd/_index.md +++ b/docs/sources/send-data/fluentd/_index.md @@ -14,6 +14,8 @@ instance or [Grafana Cloud](/products/cloud/). The plugin source code is in the [fluentd directory of the repository](https://github.com/grafana/loki/tree/main/clients/cmd/fluentd). +{{< youtube id="s43IBSVyTpQ" >}} + ## Installation ### Local diff --git a/docs/sources/send-data/otel/native_otlp_vs_loki_exporter.md b/docs/sources/send-data/otel/native_otlp_vs_loki_exporter.md new file mode 100644 index 0000000000000..caad222ba27d9 --- /dev/null +++ b/docs/sources/send-data/otel/native_otlp_vs_loki_exporter.md @@ -0,0 +1,107 @@ +--- +title: How is native OTLP endpoint different from Loki Exporter +menuTitle: Native OTLP endpoint vs Loki Exporter +description: Native OTLP endpoint vs Loki Exporter +weight: 251 +--- + +# How is native OTLP endpoint different from Loki Exporter + +## Introduction + +OpenTelemetry (OTel) is quickly becoming an industry standard with increasing adoption. Prior to the Loki 3.0 release, there was no native support for ingesting OTel logs to Loki, which led to creation of the [LokiExporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/lokiexporter/README.md). While the LokiExporter got the job done of ingesting OTel logs to Loki, it did not provide a native user experience, and the querying experience was not optimal. As part of our effort to improve user experience with OTel, we added native OTel log ingestion support to Loki with 3.0 release. + +## What has changed? + +### Formatting of logs + +[LokiExporter's README](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/lokiexporter/README.md) explains in detail the formatting of OTel logs ingested via LokiExporter, while Loki’s native OTel log ingestion endpoint is described in detail in its [documentation](https://grafana.com/docs/loki//send-data/otel/). Here is a summary of how the logs are formatted between the two: + +**LokiExporter** + +- Index Labels: + - It supports label control via hints set by the OTel client/collector. + - Default labels: + - job=service.namespace/service.name + - instance=service.instance.id + - exporter=OTLP + - level=severity +- Log Body: Encodes log records and attributes in json(default) or logfmt format. + +**Loki’s native OTel log ingestion endpoint** + +- Index Labels: + - It supports label control via per-tenant OTLP configuration in Loki. + - By default, it picks some pre-configured resource attributes as index labels as explained [here](https://grafana.com/docs/loki//send-data/otel/#format-considerations). +- LogLine: Stringified LogRecord.Body. +- Structured Metadata: Anything not stored in Index labels and LogLine gets stored as [Structured Metadata](https://grafana.com/docs/loki//get-started/labels/structured-metadata/). + +### Sample Log + +Let us take the following sample OTel log and see how it would look after ingestion in both formats with the default configuration: +- Resource Attributes: + - service.name: "auth" + - service.namespace: "dev" + - service.kind: "app" +- Log Record: + - Timestamp: 1715247552000000000 + - Body: "user logged in" + - Severity Text: "INFO" + - Attributes: + - email: "foo@bar.com" + +**Ingested with LokiExporter:** + +- Index Labels: + - job="dev/auth" + - exporter="OTLP" +- Log Body: `{"body":"user logged in","severity":"INFO","attributes":{"email":"foo@bar.com"}, "resources":{"service.kind":"app","service.name":"auth","service.namespace":"dev"}}` + +**Ingested with Loki’s native OTel endpoint:** + +- Index Labels: + - service_name="auth" + - service_namespace="dev" +- Log Body: "user logged in" +- Structured Metadata: + - service_kind: "app" + - severity_text: "INFO" + - email: "foo@bar.com" + +### Querying experience + +As seen earlier, LokiExporter encodes data to json or logfmt blob, which requires decoding it at query time to interact with OTel attributes. +However, the native OTel endpoint doesn't do any encoding of data before storing it. It leverages [Structured Metadata](https://grafana.com/docs/loki//get-started/labels/structured-metadata/), which makes it easier to interact with OTel attributes without having to use any parsers in the queries. +Taking the above-ingested log line, let us look at how the querying experience would look between the two considering various scenarios: + +**Query all logs without any filters** + +- Ingested with LokiExporter: `{job="dev/auth"}` +- Ingested with Loki’s native OTel endpoint: `{service_name="auth", service_namespace="dev"}` + +**Query logs with severity INFO** + +- Ingested with LokiExporter: `{job="dev/auth"} | json | severity="INFO"` +- Ingested with Loki’s native OTel endpoint: `{service_name="auth", service_namespace="dev"} | severity_text="INFO"` + +**Display log message as log line in logs query results** + +- Ingested with LokiExporter: `{job="dev/auth"} | json | line_format "{{.body}}"` +- Ingested with Loki’s native OTel endpoint: `{service_name="auth", service_namespace="dev"}` + +**Count login events in the last hour by email** + +- Ingested with LokiExporter: `sum(count_over_time({job="dev/auth"} |= "user logged in" | json[1h])) by (email)` +- Ingested with Loki’s native OTel endpoint: `sum(count_over_time({service_name="auth", service_namespace="dev"} |= "user logged in"[1h])) by (email)` + +## Benefits of switching from LokiExporter to native OTel endpoint: + +- **Future improvements:** There is an ongoing discussion on deprecating LokiExporter, which would stop receiving future enhancements. Loki’s native OTel endpoint represents the future of our product, and all future development and enhancements will be focused there. Upgrading to the native endpoint will ensure you benefit from the latest enhancements and the best possible user experience. +- **Simplified client config:** LokiExporter requires setting hints for managing stream labels, which defeats the purpose of choosing OTel in the first place since you can’t switch from one OTel-compatible storage to another without having to reconfigure your clients. With Loki’s native OTel endpoint, there is no added complexity in setting hints for your client to manage the labels. +- **Simplified querying:** Loki’s native OTel endpoint leverages Structured Metadata, which makes it easier to reference the attributes and other fields from the OTel LogRecord without having to use json or logfmt parsers for decoding the data at query time. +- **More modern high context data model:** Better aligned with modern observability practices than the older json based model. + +## What do you need to do to switch from LokiExporter to native OTel ingestion format? + +- Point your OpenTelemetry Collector to Loki's native OTel ingestion endpoint as explained [here](https://grafana.com/docs/loki//send-data/otel/#loki-configuration). +- Rewrite your LogQL queries in various places, including dashboards, alerts, starred queries in Grafana Explore, etc. to query OTel logs as per the new format. diff --git a/docs/sources/setup/install/helm/concepts.md b/docs/sources/setup/install/helm/concepts.md index 581498af89b23..ceaff0a027c59 100644 --- a/docs/sources/setup/install/helm/concepts.md +++ b/docs/sources/setup/install/helm/concepts.md @@ -15,17 +15,27 @@ keywords: This section describes the components installed by the Helm Chart. -## Loki read and write +## 3 methods of deployment -By default Loki will be installed in the scalable mode. This consists of a read and write component. These can be scaled-out independently. +The Loki chart supports three methods of deployment: +- [Monolithic]({{< relref "./install-monolithic" >}}) +- [Simple Scalable]({{< relref "./install-scalable" >}}) +- [Microservice]({{< relref "./install-microservices" >}}) -## Dashboards +By default, the chart installs in [Simple Scalable]({{< relref "./install-scalable" >}}) mode. This is the recommended method for most users. To understand the differences between deployment methods, see the [Loki deployment modes]({{< relref "../../../get-started/deployment-modes" >}}) documentation. + +## Monitoring Loki + +The Loki Helm chart does not deploy self-monitoring by default. Loki clusters can be monitored using the meta-monitoring stack, which monitors the logs, metrics, and traces of the Loki cluster. There are two deployment options for this stack, see the installation instructions within [Monitoring]({{< relref "./monitor-and-alert" >}}). + +{{< admonition type="Note" >}} +The meta-monitoring stack replaces the monitoring section of the Loki helm chart which is now **DEPRECATED**. See the [Monitoring]({{< relref "./monitor-and-alert" >}}) section for more information. +{{< /admonition >}} -This chart includes dashboards for monitoring Loki. These require the scrape configs defined in the `monitoring.serviceMonitor` and `monitoring.selfMonitoring` sections described below. The dashboards are deployed via a config map which can be mounted on a Grafana instance. The Dashboard requires an installation of the Grafana Agent and the Prometheus operator. The agent is installed with this chart. ## Canary -This chart installs the [canary]({{< relref "../../../operations/loki-canary" >}}) and its alerts by default. This is another tool to verify the Loki deployment is in a healthy state. It can be disabled with `monitoring.lokiCanary.enabled=false`. +This chart installs the [Loki Canary app]({{< relref "../../../operations/loki-canary" >}}) by default. This is another tool to verify the Loki deployment is in a healthy state. It can be disabled by setting `lokiCanary.enabled=false`. ## Gateway @@ -38,4 +48,4 @@ If NetworkPolicies are enabled, they are more restrictive if the gateway is enab ## Caching -By default, this chart configures in-memory caching. If that caching does not work for your deployment, you should setup memcache. +By default, this chart configures in-memory caching. If that caching does not work for your deployment, you should setup [memcache]({{< relref "../../../operations/caching" >}}). diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index e319086bf8dd2..eeba5cd826e0b 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -6372,6 +6372,24 @@ false
 null
 
+ + + + monitoring.rules + object + DEPRECATED Recording rules for monitoring Loki, required for some dashboards +
+{
+  "additionalGroups": [],
+  "additionalRuleLabels": {},
+  "alerting": true,
+  "annotations": {},
+  "disabled": {},
+  "enabled": false,
+  "labels": {},
+  "namespace": null
+}
+
@@ -6444,6 +6462,60 @@ false
 null
 
+ + + + monitoring.selfMonitoring + object + DEPRECATED Self monitoring determines whether Loki should scrape its own logs. This feature currently relies on the Grafana Agent Operator being installed, which is installed by default using the grafana-agent-operator sub-chart. It will create custom resources for GrafanaAgent, LogsInstance, and PodLogs to configure scrape configs to scrape its own logs with the labels expected by the included dashboards. +
+{
+  "enabled": false,
+  "grafanaAgent": {
+    "annotations": {},
+    "enableConfigReadAPI": false,
+    "installOperator": false,
+    "labels": {},
+    "priorityClassName": null,
+    "resources": {},
+    "tolerations": []
+  },
+  "logsInstance": {
+    "annotations": {},
+    "clients": null,
+    "labels": {}
+  },
+  "podLogs": {
+    "additionalPipelineStages": [],
+    "annotations": {},
+    "apiVersion": "monitoring.grafana.com/v1alpha1",
+    "labels": {},
+    "relabelings": []
+  },
+  "tenant": {
+    "name": "self-monitoring",
+    "password": null,
+    "secretNamespace": "{{ .Release.Namespace }}"
+  }
+}
+
+ + + + monitoring.selfMonitoring.grafanaAgent + object + DEPRECATED Grafana Agent configuration +
+{
+  "annotations": {},
+  "enableConfigReadAPI": false,
+  "installOperator": false,
+  "labels": {},
+  "priorityClassName": null,
+  "resources": {},
+  "tolerations": []
+}
+
@@ -6467,7 +6539,7 @@ false monitoring.selfMonitoring.grafanaAgent.installOperator bool - Controls whether to install the Grafana Agent Operator and its CRDs. Note that helm will not install CRDs if this flag is enabled during an upgrade. In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds + DEPRECATED Controls whether to install the Grafana Agent Operator and its CRDs. Note that helm will not install CRDs if this flag is enabled during an upgrade. In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds
 false
 
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index 0a35ce73518f0..c0e830fce7757 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -114,6 +114,31 @@ Pass the `-config.expand-env` flag at the command line to enable this way of set # querier. [querier: ] +querier_rf1: + # Enable the RF1 querier. If set, replaces the usual querier with a RF-1 + # querier when using 'ALL' target. + # CLI flag: -querier-rf1.enabled + [enabled: | default = false] + + # Time to wait before sending more than the minimum successful query requests. + # CLI flag: -querier-rf1.extra-query-delay + [extra_query_delay: | default = 0s] + + engine: + # The maximum amount of time to look back for log lines. Used only for + # instant log queries. + # CLI flag: -querier-rf1.engine.max-lookback-period + [max_look_back_period: | default = 30s] + + # The maximum number of queries that can be simultaneously processed by the + # querier. + # CLI flag: -querier-rf1.max-concurrent + [max_concurrent: | default = 4] + + # When true, querier limits sent via a header are enforced. + # CLI flag: -querier-rf1.per-request-limits-enabled + [per_request_limits_enabled: | default = false] + # The query_scheduler block configures the Loki query scheduler. When configured # it separates the tenant query queues from the query-frontend. [query_scheduler: ] @@ -845,6 +870,33 @@ compactor_grpc_client: # a ring unless otherwise specified in the component's configuration section. [memberlist: ] +metastore: + # CLI flag: -metastore.data-dir + [data_dir: | default = "./data-metastore/data"] + + raft: + # CLI flag: -metastore.raft.dir + [dir: | default = "./data-metastore/raft"] + + # CLI flag: -metastore.raft.bootstrap-peers + [bootstrap_peers: | default = []] + + # CLI flag: -metastore.raft.server-id + [server_id: | default = "localhost:9099"] + + # CLI flag: -metastore.raft.bind-address + [bind_address: | default = "localhost:9099"] + + # CLI flag: -metastore.raft.advertise-address + [advertise_address: | default = "localhost:9099"] + +metastore_client: + # CLI flag: -metastore.address + [address: | default = "localhost:9095"] + + # Configures the gRPC client used to communicate with the metastore. + [grpc_client_config: ] + # Configuration for 'runtime config' module, responsible for reloading runtime # configuration file. [runtime_config: ] @@ -2598,6 +2650,8 @@ The `frontend_worker` configures the worker - running within the Loki querier - # Configures the querier gRPC client used to communicate with the # query-scheduler. This can't be used in conjunction with 'grpc_client_config'. +# The CLI flags prefix for this block configuration is: +# metastore.grpc-client-config [query_scheduler_grpc_client: ] ``` @@ -2656,6 +2710,7 @@ The `grpc_client` block configures the gRPC client used to communicate between a - `frontend.grpc-client-config` - `ingester-rf1.client` - `ingester.client` +- `metastore.grpc-client-config` - `pattern-ingester.client` - `querier.frontend-client` - `querier.frontend-grpc-client` diff --git a/go.mod b/go.mod index 6387a6ebab063..5f62193aebcc4 100644 --- a/go.mod +++ b/go.mod @@ -23,11 +23,11 @@ require ( github.com/bmatcuk/doublestar v1.3.4 github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b github.com/cespare/xxhash v1.1.0 - github.com/cespare/xxhash/v2 v2.2.0 + github.com/cespare/xxhash/v2 v2.3.0 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/cristalhq/hedgedhttp v0.9.1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/docker/docker v25.0.3+incompatible + github.com/docker/docker v25.0.5+incompatible github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 github.com/drone/envsubst v1.0.3 github.com/dustin/go-humanize v1.0.1 @@ -42,12 +42,12 @@ require ( github.com/gocql/gocql v0.0.0-20200526081602-cd04bd7f22a7 github.com/gogo/protobuf v1.3.2 // remember to update loki-build-image/Dockerfile too github.com/gogo/status v1.1.1 - github.com/golang/protobuf v1.5.3 + github.com/golang/protobuf v1.5.4 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.6.0 github.com/google/renameio/v2 v2.0.0 github.com/google/uuid v1.6.0 - github.com/gorilla/mux v1.8.0 + github.com/gorilla/mux v1.8.1 github.com/gorilla/websocket v1.5.0 github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 github.com/grafana/dskit v0.0.0-20240626184720-35810fdf1c6d @@ -96,7 +96,7 @@ require ( github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/xdg-go/scram v1.1.2 - go.etcd.io/bbolt v1.3.8 + go.etcd.io/bbolt v1.3.10 go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.3.0 golang.org/x/crypto v0.24.0 @@ -129,6 +129,8 @@ require ( github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d github.com/grafana/loki/pkg/push v0.0.0-20231124142027-e52380921608 github.com/hashicorp/golang-lru/v2 v2.0.7 + github.com/hashicorp/raft v1.7.0 + github.com/hashicorp/raft-wal v0.4.1 github.com/heroku/x v0.0.61 github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db @@ -151,9 +153,14 @@ require ( ) require ( + github.com/benbjohnson/immutable v0.4.0 // indirect + github.com/containerd/containerd v1.7.20 // indirect + github.com/coreos/etcd v3.3.27+incompatible // indirect + github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf // indirect github.com/dlclark/regexp2 v1.4.0 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/pires/go-proxyproto v0.7.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect @@ -170,7 +177,6 @@ require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.6 // indirect cloud.google.com/go/longrunning v0.5.5 // indirect - github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect @@ -187,7 +193,7 @@ require ( github.com/Code-Hex/go-generics-cache v1.3.1 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.0 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/alecthomas/chroma v0.10.0 github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect @@ -210,7 +216,7 @@ require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect - github.com/containerd/fifo v1.0.0 // indirect + github.com/containerd/fifo v1.1.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -219,7 +225,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/digitalocean/godo v1.109.0 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/distribution/reference v0.5.0 // indirect + github.com/distribution/reference v0.6.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -266,7 +272,7 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-msgpack v1.1.5 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect @@ -302,7 +308,7 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/oschwald/maxminddb-golang v1.11.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect diff --git a/go.sum b/go.sum index ea4dc9a96737a..8071a7676d1d4 100644 --- a/go.sum +++ b/go.sum @@ -262,8 +262,8 @@ github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0T github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg= github.com/Microsoft/go-winio v0.4.3/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= @@ -388,6 +388,8 @@ github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiU github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/immutable v0.4.0 h1:CTqXbEerYso8YzVPxmWxh2gnoRQbbB9X1quUC8+vGZA= +github.com/benbjohnson/immutable v0.4.0/go.mod h1:iAr8OjJGLnLmVUr9MZ/rz4PWUy6Ouc2JLYuMArmvAJM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -422,8 +424,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -455,14 +457,18 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE github.com/coder/quartz v0.1.0 h1:cLL+0g5l7xTf6ordRnUMMiZtRE8Sq5LxpghS63vEXrQ= github.com/coder/quartz v0.1.0/go.mod h1:vsiCc+AHViMKH2CQpGIpFgdHIEQsxwm8yCscqKmzbRA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU= -github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/containerd v1.7.20 h1:Sl6jQYk3TRavaU83h66QMbI2Nqg9Jm6qzwX57Vsn1SQ= +github.com/containerd/containerd v1.7.20/go.mod h1:52GsS5CwquuqPuLncsXwG0t2CiUce+KsNHJZQJvAgR0= +github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= +github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coredns/coredns v1.6.6/go.mod h1:Bdcnka9HmKGYj12ZIDF3lpQSfDHSsMc85Wj9xEyZUts= github.com/coredns/federation v0.0.0-20190818181423-e032b096babe/go.mod h1:MoqTEFX8GlnKkyq8eBCF94VzkNAOgjdlCJ+Pz/oCLPk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.27+incompatible h1:QIudLb9KeBsE5zyYxd1mjzRSkzLg9Wf9QlRwFgd6oTA= +github.com/coreos/etcd v3.3.27+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -478,6 +484,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf h1:GOPo6vn/vTN+3IwZBvXX0y5doJfSC7My0cdzelyOCsQ= +github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= @@ -518,8 +526,8 @@ github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH1 github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= @@ -528,8 +536,8 @@ github.com/dnsimple/dnsimple-go v0.30.0/go.mod h1:O5TJ0/U6r7AfT8niYNlmohpLbCSG+c github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11/go.mod h1:s1PfVYYVmTMgCSPtho4LKBDecEHJWtiVDPNv78Z985U= github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= -github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -897,8 +905,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -1007,8 +1015,8 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51 github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -1096,9 +1104,12 @@ github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-memdb v1.3.4/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= +github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= github.com/hashicorp/go-msgpack/v2 v2.0.0/go.mod h1:JIxYkkFJRDDRSoWQBSh7s9QAVThq+82iWmUpmE4jKak= +github.com/hashicorp/go-msgpack/v2 v2.1.1 h1:xQEY9yB2wnHitoSzk/B9UjXWRQ67QKu5AOm8aFp8N3I= +github.com/hashicorp/go-msgpack/v2 v2.1.1/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -1154,11 +1165,15 @@ github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7H github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.3.11/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlCU3LtEO4= +github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o= +github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= github.com/hashicorp/raft-autopilot v0.1.6/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/raft-boltdb v0.0.0-20210409134258-03c10cc3d4ea/go.mod h1:qRd6nFJYYS6Iqnc/8HcUmko2/2Gw8qTFEmxDLii6W5I= github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42/go.mod h1:wcXL8otVu5cpJVLjcmq7pmfdRCdaP+xnvu7WQcKJAhs= github.com/hashicorp/raft-boltdb/v2 v2.2.2/go.mod h1:N8YgaZgNJLpZC+h+by7vDu5rzsRgONThTEeUS3zWbfY= +github.com/hashicorp/raft-wal v0.4.1 h1:aU8XZ6x8R9BAIB/83Z1dTDtXvDVmv9YVYeXxd/1QBSA= +github.com/hashicorp/raft-wal v0.4.1/go.mod h1:A6vP5o8hGOs1LHfC1Okh9xPwWDcmb6Vvuz/QyqUXlOE= github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= @@ -1493,8 +1508,8 @@ github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= @@ -1840,8 +1855,8 @@ go.einride.tech/aip v0.66.0/go.mod h1:qAhMsfT7plxBX+Oy7Huol6YUvZ0ZzdUz26yZsQwfl1 go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= -go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= +go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= @@ -2398,8 +2413,9 @@ golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q= gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= +gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= diff --git a/loki-build-image/Dockerfile b/loki-build-image/Dockerfile index 25c95578e4534..6b49181606c48 100644 --- a/loki-build-image/Dockerfile +++ b/loki-build-image/Dockerfile @@ -4,9 +4,9 @@ # tag of the Docker image in `../.drone/drone.jsonnet` and run `make drone`. # See ../docs/sources/community/maintaining/release-loki-build-image.md for instructions # on how to publish a new build image. - +ARG GO_VERSION=1.22 # Install helm (https://helm.sh/) and helm-docs (https://github.com/norwoodj/helm-docs) for generating Helm Chart reference. -FROM golang:1.22.5-bookworm as helm +FROM golang:${GO_VERSION}-bookworm as helm ARG TARGETARCH ARG HELM_VER="v3.2.3" RUN curl -L "https://get.helm.sh/helm-${HELM_VER}-linux-$TARGETARCH.tar.gz" | tar zx && \ @@ -38,7 +38,7 @@ RUN apk add --no-cache curl && \ FROM alpine:3.18.6 as docker RUN apk add --no-cache docker-cli docker-cli-buildx -FROM golang:1.22.5-bookworm as drone +FROM golang:${GO_VERSION}-bookworm as drone ARG TARGETARCH RUN curl -L "https://github.com/drone/drone-cli/releases/download/v1.7.0/drone_linux_$TARGETARCH".tar.gz | tar zx && \ install -t /usr/local/bin drone @@ -48,35 +48,35 @@ RUN curl -L "https://github.com/drone/drone-cli/releases/download/v1.7.0/drone_l # Error: # github.com/fatih/faillint@v1.5.0 requires golang.org/x/tools@v0.0.0-20200207224406-61798d64f025 # (not golang.org/x/tools@v0.0.0-20190918214920-58d531046acd from golang.org/x/tools/cmd/goyacc@58d531046acdc757f177387bc1725bfa79895d69) -FROM golang:1.22.5-bookworm as faillint +FROM golang:${GO_VERSION}-bookworm as faillint RUN GO111MODULE=on go install github.com/fatih/faillint@v1.12.0 RUN GO111MODULE=on go install golang.org/x/tools/cmd/goimports@v0.7.0 -FROM golang:1.22.5-bookworm as delve +FROM golang:${GO_VERSION}-bookworm as delve RUN GO111MODULE=on go install github.com/go-delve/delve/cmd/dlv@latest # Install ghr used to push binaries and template the release # This collides with the version of go tools used in the base image, thus we install it in its own image and copy it over. -FROM golang:1.22.5-bookworm as ghr +FROM golang:${GO_VERSION}-bookworm as ghr RUN GO111MODULE=on go install github.com/tcnksm/ghr@9349474 # Install nfpm (https://nfpm.goreleaser.com) for creating .deb and .rpm packages. -FROM golang:1.22.5-bookworm as nfpm +FROM golang:${GO_VERSION}-bookworm as nfpm RUN GO111MODULE=on go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.11.3 # Install gotestsum -FROM golang:1.22.5-bookworm as gotestsum +FROM golang:${GO_VERSION}-bookworm as gotestsum RUN GO111MODULE=on go install gotest.tools/gotestsum@v1.8.2 # Install tools used to compile jsonnet. -FROM golang:1.22.5-bookworm as jsonnet +FROM golang:${GO_VERSION}-bookworm as jsonnet RUN GO111MODULE=on go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@v0.5.1 RUN GO111MODULE=on go install github.com/monitoring-mixins/mixtool/cmd/mixtool@16dc166166d91e93475b86b9355a4faed2400c18 RUN GO111MODULE=on go install github.com/google/go-jsonnet/cmd/jsonnet@v0.20.0 FROM aquasec/trivy as trivy -FROM golang:1.22.5-bookworm +FROM golang:${GO_VERSION}-bookworm RUN apt-get update && \ apt-get install -qy \ musl gnupg ragel \ diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go index 1d52d98692df9..5e119e6661d5e 100644 --- a/operator/internal/manifests/internal/config/build_test.go +++ b/operator/internal/manifests/internal/config/build_test.go @@ -1833,6 +1833,7 @@ compactor: retention_enabled: true retention_delete_delay: 4h retention_delete_worker_count: 50 + delete_request_store: s3 frontend: tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100 compress_responses: true diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml index da13920b770e0..dfdf5c9003258 100644 --- a/operator/internal/manifests/internal/config/loki-config.yaml +++ b/operator/internal/manifests/internal/config/loki-config.yaml @@ -99,11 +99,14 @@ common: compactor: compaction_interval: 2h working_directory: {{ .StorageDirectory }}/compactor -{{- if .Retention.Enabled }}{{- with .Retention }} +{{- if .Retention.Enabled }} +{{- with .Retention }} retention_enabled: true retention_delete_delay: 4h retention_delete_worker_count: {{.DeleteWorkerCount}} -{{- end }}{{- end }} +{{- end }} + delete_request_store: {{.ObjectStorage.SharedStore}} +{{- end }} frontend: tail_proxy_url: {{ .Querier.Protocol }}://{{ .Querier.FQDN }}:{{ .Querier.Port }} {{- if .Gates.HTTPEncryption }} diff --git a/pkg/bloombuild/builder/builder.go b/pkg/bloombuild/builder/builder.go index 982d14388e28d..5f2c3b5ab5fd8 100644 --- a/pkg/bloombuild/builder/builder.go +++ b/pkg/bloombuild/builder/builder.go @@ -191,6 +191,7 @@ func (b *Builder) builderLoop(c protos.PlannerForBuilder_BuilderLoopClient) erro return fmt.Errorf("failed to receive task from planner: %w", err) } + b.metrics.processingTask.Set(1) b.metrics.taskStarted.Inc() start := time.Now() @@ -212,8 +213,11 @@ func (b *Builder) builderLoop(c protos.PlannerForBuilder_BuilderLoopClient) erro b.logTaskCompleted(task, newMetas, err, start) if err = b.notifyTaskCompletedToPlanner(c, task, newMetas, err); err != nil { + b.metrics.processingTask.Set(0) return fmt.Errorf("failed to notify task completion to planner: %w", err) } + + b.metrics.processingTask.Set(0) } level.Debug(b.logger).Log("msg", "builder loop stopped") diff --git a/pkg/bloombuild/builder/metrics.go b/pkg/bloombuild/builder/metrics.go index f94d92353ee1f..8d4545c00bf5e 100644 --- a/pkg/bloombuild/builder/metrics.go +++ b/pkg/bloombuild/builder/metrics.go @@ -14,7 +14,8 @@ const ( ) type Metrics struct { - running prometheus.Gauge + running prometheus.Gauge + processingTask prometheus.Gauge taskStarted prometheus.Counter taskCompleted *prometheus.CounterVec @@ -38,6 +39,12 @@ func NewMetrics(r prometheus.Registerer) *Metrics { Name: "running", Help: "Value will be 1 if the bloom builder is currently running on this instance", }), + processingTask: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "processing_task", + Help: "Value will be 1 if the bloom builder is currently processing a task", + }), taskStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{ Namespace: metricsNamespace, diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go index 08f6bb1c40bb7..285795a8327d2 100644 --- a/pkg/bloombuild/planner/planner.go +++ b/pkg/bloombuild/planner/planner.go @@ -360,7 +360,7 @@ func (p *Planner) computeTasks( // In case the planner restarted before deleting outdated metas in the previous iteration, // we delete them during the planning phase to avoid reprocessing them. - metas, err = p.deleteOutdatedMetasAndBlocks(ctx, table, tenant, metas, phasePlanning) + metas, err = p.deleteOutdatedMetasAndBlocks(ctx, table, tenant, nil, metas, phasePlanning) if err != nil { return nil, nil, fmt.Errorf("failed to delete outdated metas during planning: %w", err) } @@ -446,8 +446,7 @@ func (p *Planner) processTenantTaskResults( return tasksSucceed, nil } - combined := append(originalMetas, newMetas...) - if _, err := p.deleteOutdatedMetasAndBlocks(ctx, table, tenant, combined, phaseBuilding); err != nil { + if _, err := p.deleteOutdatedMetasAndBlocks(ctx, table, tenant, newMetas, originalMetas, phaseBuilding); err != nil { return 0, fmt.Errorf("failed to delete outdated metas: %w", err) } @@ -460,12 +459,14 @@ func (p *Planner) deleteOutdatedMetasAndBlocks( ctx context.Context, table config.DayTable, tenant string, - metas []bloomshipper.Meta, + newMetas []bloomshipper.Meta, + originalMetas []bloomshipper.Meta, phase string, ) ([]bloomshipper.Meta, error) { logger := log.With(p.logger, "table", table.Addr(), "tenant", tenant, "phase", phase) - upToDate, outdated := outdatedMetas(metas) + combined := append(originalMetas, newMetas...) + upToDate, outdated := outdatedMetas(combined) if len(outdated) == 0 { level.Debug(logger).Log( "msg", "no outdated metas found", @@ -497,17 +498,25 @@ func (p *Planner) deleteOutdatedMetasAndBlocks( for _, meta := range outdated { for _, block := range meta.Blocks { + logger := log.With(logger, "block", block.String()) + + // Prevent deleting blocks that are reused in new metas + if isBlockInMetas(block, upToDate) { + level.Debug(logger).Log("msg", "block is still in use in new meta, skipping delete") + continue + } + if err := client.DeleteBlocks(ctx, []bloomshipper.BlockRef{block}); err != nil { if client.IsObjectNotFoundErr(err) { - level.Debug(logger).Log("msg", "block not found while attempting delete, continuing", "block", block.String()) + level.Debug(logger).Log("msg", "block not found while attempting delete, continuing") } else { - level.Error(logger).Log("msg", "failed to delete block", "err", err, "block", block.String()) + level.Error(logger).Log("msg", "failed to delete block", "err", err) return nil, errors.Wrap(err, "failed to delete block") } } deletedBlocks++ - level.Debug(logger).Log("msg", "removed outdated block", "block", block.String()) + level.Debug(logger).Log("msg", "removed outdated block") } err = client.DeleteMetas(ctx, []bloomshipper.MetaRef{meta.MetaRef}) @@ -532,6 +541,22 @@ func (p *Planner) deleteOutdatedMetasAndBlocks( return upToDate, nil } +func isBlockInMetas(block bloomshipper.BlockRef, metas []bloomshipper.Meta) bool { + // Blocks are sorted within a meta, so we can find it with binary search + for _, meta := range metas { + // Search for the first block whose bound is >= than the target block min bound. + i := sort.Search(len(meta.Blocks), func(i int) bool { + return meta.Blocks[i].Cmp(uint64(block.Bounds.Max)) <= v1.Overlap + }) + + if i < len(meta.Blocks) && meta.Blocks[i] == block { + return true + } + } + + return false +} + func (p *Planner) tables(ts time.Time) *dayRangeIterator { // adjust the minimum by one to make it inclusive, which is more intuitive // for a configuration variable diff --git a/pkg/bloombuild/planner/planner_test.go b/pkg/bloombuild/planner/planner_test.go index ab34c82c6940d..ca5c1d0c15b09 100644 --- a/pkg/bloombuild/planner/planner_test.go +++ b/pkg/bloombuild/planner/planner_test.go @@ -1,6 +1,7 @@ package planner import ( + "bytes" "context" "fmt" "io" @@ -20,6 +21,8 @@ import ( "google.golang.org/grpc" "github.com/grafana/loki/v3/pkg/bloombuild/protos" + "github.com/grafana/loki/v3/pkg/chunkenc" + iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/chunk/cache" @@ -166,11 +169,36 @@ func genBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef { } } -func genBlock(ref bloomshipper.BlockRef) bloomshipper.Block { +func genBlock(ref bloomshipper.BlockRef) (bloomshipper.Block, error) { + indexBuf := bytes.NewBuffer(nil) + bloomsBuf := bytes.NewBuffer(nil) + writer := v1.NewMemoryBlockWriter(indexBuf, bloomsBuf) + reader := v1.NewByteReader(indexBuf, bloomsBuf) + + blockOpts := v1.NewBlockOptions(chunkenc.EncNone, 4, 1, 0, 0) + + builder, err := v1.NewBlockBuilder(blockOpts, writer) + if err != nil { + return bloomshipper.Block{}, err + } + + if _, err = builder.BuildFrom(iter.NewEmptyIter[v1.SeriesWithBlooms]()); err != nil { + return bloomshipper.Block{}, err + } + + block := v1.NewBlock(reader, v1.NewMetrics(nil)) + + buf := bytes.NewBuffer(nil) + if err := v1.TarGz(buf, block.Reader()); err != nil { + return bloomshipper.Block{}, err + } + + tarReader := bytes.NewReader(buf.Bytes()) + return bloomshipper.Block{ BlockRef: ref, - Data: &DummyReadSeekCloser{}, - } + Data: bloomshipper.ClosableReadSeekerAdapter{ReadSeeker: tarReader}, + }, nil } func Test_blockPlansForGaps(t *testing.T) { @@ -612,7 +640,12 @@ func putMetas(bloomClient bloomshipper.Client, metas []bloomshipper.Meta) error } for _, block := range meta.Blocks { - err := bloomClient.PutBlock(context.Background(), genBlock(block)) + writtenBlock, err := genBlock(block) + if err != nil { + return err + } + + err = bloomClient.PutBlock(context.Background(), writtenBlock) if err != nil { return err } @@ -826,6 +859,7 @@ func Test_deleteOutdatedMetas(t *testing.T) { for _, tc := range []struct { name string originalMetas []bloomshipper.Meta + newMetas []bloomshipper.Meta expectedUpToDateMetas []bloomshipper.Meta }{ { @@ -835,6 +869,8 @@ func Test_deleteOutdatedMetas(t *testing.T) { name: "only up to date metas", originalMetas: []bloomshipper.Meta{ genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), + }, + newMetas: []bloomshipper.Meta{ genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}), }, expectedUpToDateMetas: []bloomshipper.Meta{ @@ -846,13 +882,52 @@ func Test_deleteOutdatedMetas(t *testing.T) { name: "outdated metas", originalMetas: []bloomshipper.Meta{ genMeta(0, 5, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 5)}), - genMeta(6, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(6, 10)}), + }, + newMetas: []bloomshipper.Meta{ genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), }, expectedUpToDateMetas: []bloomshipper.Meta{ genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), }, }, + { + name: "new metas reuse blocks from outdated meta", + originalMetas: []bloomshipper.Meta{ + genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{ // Outdated + genBlockRef(0, 5), // Reuse + genBlockRef(5, 10), // Delete + }), + genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{ // Outdated + genBlockRef(10, 20), // Reuse + }), + genMeta(20, 30, []int{0}, []bloomshipper.BlockRef{ // Up to date + genBlockRef(20, 30), + }), + }, + newMetas: []bloomshipper.Meta{ + genMeta(0, 5, []int{1}, []bloomshipper.BlockRef{ + genBlockRef(0, 5), // Reused block + }), + genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{ + genBlockRef(5, 7), // New block + genBlockRef(7, 10), // New block + genBlockRef(10, 20), // Reused block + }), + }, + expectedUpToDateMetas: []bloomshipper.Meta{ + genMeta(0, 5, []int{1}, []bloomshipper.BlockRef{ + genBlockRef(0, 5), + }), + genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{ + genBlockRef(5, 7), + genBlockRef(7, 10), + genBlockRef(10, 20), + }), + genMeta(20, 30, []int{0}, []bloomshipper.BlockRef{ + genBlockRef(20, 30), + }), + }, + }, } { t.Run(tc.name, func(t *testing.T) { logger := log.NewNopLogger() @@ -867,9 +942,11 @@ func Test_deleteOutdatedMetas(t *testing.T) { bloomClient, err := planner.bloomStore.Client(testDay.ModelTime()) require.NoError(t, err) - // Create original metas and blocks + // Create original/new metas and blocks err = putMetas(bloomClient, tc.originalMetas) require.NoError(t, err) + err = putMetas(bloomClient, tc.newMetas) + require.NoError(t, err) // Get all metas metas, err := planner.bloomStore.FetchMetas( @@ -882,9 +959,9 @@ func Test_deleteOutdatedMetas(t *testing.T) { ) require.NoError(t, err) removeLocFromMetasSources(metas) - require.ElementsMatch(t, tc.originalMetas, metas) + require.ElementsMatch(t, append(tc.originalMetas, tc.newMetas...), metas) - upToDate, err := planner.deleteOutdatedMetasAndBlocks(context.Background(), testTable, "fakeTenant", tc.originalMetas, phasePlanning) + upToDate, err := planner.deleteOutdatedMetasAndBlocks(context.Background(), testTable, "fakeTenant", tc.newMetas, tc.originalMetas, phasePlanning) require.NoError(t, err) require.ElementsMatch(t, tc.expectedUpToDateMetas, upToDate) @@ -900,6 +977,13 @@ func Test_deleteOutdatedMetas(t *testing.T) { require.NoError(t, err) removeLocFromMetasSources(metas) require.ElementsMatch(t, tc.expectedUpToDateMetas, metas) + + // Fetch all blocks from the metas + for _, meta := range metas { + blocks, err := planner.bloomStore.FetchBlocks(context.Background(), meta.Blocks) + require.NoError(t, err) + require.Len(t, blocks, len(meta.Blocks)) + } }) } } diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go index b852896bfd27c..d53ba80b0123b 100644 --- a/pkg/bloomcompactor/controller.go +++ b/pkg/bloomcompactor/controller.go @@ -431,6 +431,9 @@ func (s *SimpleBloomController) buildGaps( built, err := bloomshipper.BlockFrom(tenant, table.Addr(), blk) if err != nil { level.Error(logger).Log("msg", "failed to build block", "err", err) + if err = blk.Reader().Cleanup(); err != nil { + level.Error(logger).Log("msg", "failed to cleanup block directory", "err", err) + } return nil, errors.Wrap(err, "failed to build block") } @@ -439,10 +442,17 @@ func (s *SimpleBloomController) buildGaps( built, ); err != nil { level.Error(logger).Log("msg", "failed to write block", "err", err) + if err = blk.Reader().Cleanup(); err != nil { + level.Error(logger).Log("msg", "failed to cleanup block directory", "err", err) + } return nil, errors.Wrap(err, "failed to write block") } s.metrics.blocksCreated.Inc() + if err := blk.Reader().Cleanup(); err != nil { + level.Error(logger).Log("msg", "failed to cleanup block directory", "err", err) + } + totalGapKeyspace := (gap.bounds.Max - gap.bounds.Min) progress := (built.Bounds.Max - gap.bounds.Min) pct := float64(progress) / float64(totalGapKeyspace) * 100 diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go index 9f0b2b8b8151d..edd882e1e4210 100644 --- a/pkg/bloomgateway/processor.go +++ b/pkg/bloomgateway/processor.go @@ -5,7 +5,6 @@ import ( "time" "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/grafana/dskit/concurrency" @@ -37,7 +36,6 @@ type processor struct { func (p *processor) processTasks(ctx context.Context, tasks []Task) error { tenant := tasks[0].tenant - level.Info(p.logger).Log("msg", "process tasks", "tenant", tenant, "tasks", len(tasks)) for ts, tasks := range group(tasks, func(t Task) config.DayTime { return t.table }) { err := p.processTasksForDay(ctx, tenant, ts, tasks) @@ -54,8 +52,7 @@ func (p *processor) processTasks(ctx context.Context, tasks []Task) error { return nil } -func (p *processor) processTasksForDay(ctx context.Context, tenant string, day config.DayTime, tasks []Task) error { - level.Info(p.logger).Log("msg", "process tasks for day", "tenant", tenant, "tasks", len(tasks), "day", day.String()) +func (p *processor) processTasksForDay(ctx context.Context, _ string, _ config.DayTime, tasks []Task) error { var duration time.Duration blocksRefs := make([]bloomshipper.BlockRef, 0, len(tasks[0].blocks)*len(tasks)) @@ -83,7 +80,6 @@ func (p *processor) processTasksForDay(ctx context.Context, tenant string, day c bloomshipper.WithPool(p.store.Allocator()), ) duration = time.Since(startBlocks) - level.Debug(p.logger).Log("msg", "fetched blocks", "count", len(refs), "duration", duration, "err", err) for _, t := range tasks { FromContext(t.ctx).AddBlocksFetchTime(duration) diff --git a/pkg/bloomgateway/worker.go b/pkg/bloomgateway/worker.go index 0fa154c3b413f..6aa1082b89334 100644 --- a/pkg/bloomgateway/worker.go +++ b/pkg/bloomgateway/worker.go @@ -86,7 +86,6 @@ func (w *worker) running(_ context.Context) error { continue } - level.Debug(w.logger).Log("msg", "dequeued tasks", "count", len(items)) w.metrics.tasksDequeued.WithLabelValues(w.id, labelSuccess).Add(float64(len(items))) tasks := make([]Task, 0, len(items)) diff --git a/pkg/ingester-rf1/flush.go b/pkg/ingester-rf1/flush.go index efd8f72df726f..2242569c2c25e 100644 --- a/pkg/ingester-rf1/flush.go +++ b/pkg/ingester-rf1/flush.go @@ -15,6 +15,7 @@ import ( "github.com/oklog/ulid" "golang.org/x/net/context" + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" "github.com/grafana/loki/v3/pkg/storage/wal" ) @@ -68,7 +69,7 @@ func (i *Ingester) flushWorker(j int) { continue } - err = i.flushItem(l, j, it) + err = i.flush(l, j, it) if err != nil { level.Error(l).Log("msg", "failed to flush", "err", err) } @@ -78,7 +79,7 @@ func (i *Ingester) flushWorker(j int) { } } -func (i *Ingester) flushItem(l log.Logger, j int, it *wal.PendingItem) error { +func (i *Ingester) flush(l log.Logger, j int, it *wal.PendingSegment) error { ctx, cancelFunc := context.WithCancel(context.Background()) defer cancelFunc() @@ -94,30 +95,33 @@ func (i *Ingester) flushItem(l log.Logger, j int, it *wal.PendingItem) error { return b.Err() } -// flushChunk flushes the given chunk to the store. -// -// If the flush is successful, metrics for this flush are to be reported. -// If the flush isn't successful, the operation for this userID is requeued allowing this and all other unflushed -// segments to have another opportunity to be flushed. func (i *Ingester) flushSegment(ctx context.Context, j int, w *wal.SegmentWriter) error { - id := ulid.MustNew(ulid.Timestamp(time.Now()), rand.Reader) - start := time.Now() defer func() { i.metrics.flushDuration.Observe(time.Since(start).Seconds()) - w.Observe() + w.ReportMetrics() }() + i.metrics.flushesTotal.Add(1) + buf := i.flushBuffers[j] defer buf.Reset() if _, err := w.WriteTo(buf); err != nil { + i.metrics.flushFailuresTotal.Inc() return err } - i.metrics.flushesTotal.Add(1) - if err := i.store.PutObject(ctx, fmt.Sprintf("loki-v2/wal/anon/"+id.String()), buf); err != nil { + id := ulid.MustNew(ulid.Timestamp(time.Now()), rand.Reader).String() + if err := i.store.PutObject(ctx, fmt.Sprintf("loki-v2/wal/anon/"+id), buf); err != nil { + i.metrics.flushFailuresTotal.Inc() + return fmt.Errorf("failed to put object: %w", err) + } + + if _, err := i.metastoreClient.AddBlock(ctx, &metastorepb.AddBlockRequest{ + Block: w.Meta(id), + }); err != nil { i.metrics.flushFailuresTotal.Inc() - return fmt.Errorf("store put chunk: %w", err) + return fmt.Errorf("metastore add block: %w", err) } return nil diff --git a/pkg/ingester-rf1/ingester.go b/pkg/ingester-rf1/ingester.go index 802934665af35..8182449c6d596 100644 --- a/pkg/ingester-rf1/ingester.go +++ b/pkg/ingester-rf1/ingester.go @@ -3,6 +3,7 @@ package ingesterrf1 import ( "bytes" "context" + "errors" "flag" "fmt" "io" @@ -17,6 +18,7 @@ import ( "github.com/opentracing/opentracing-go" "github.com/grafana/loki/v3/pkg/ingester-rf1/clientpool" + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" "github.com/grafana/loki/v3/pkg/ingester-rf1/objstore" "github.com/grafana/loki/v3/pkg/ingester/index" "github.com/grafana/loki/v3/pkg/loghttp/push" @@ -33,7 +35,6 @@ import ( "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" "github.com/grafana/dskit/tenant" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "google.golang.org/grpc/health/grpc_health_v1" @@ -181,6 +182,7 @@ type Ingester struct { lifecyclerWatcher *services.FailureWatcher store Storage + metastoreClient metastorepb.MetastoreServiceClient periodicConfigs []config.PeriodConfig loopQuit chan struct{} @@ -222,6 +224,7 @@ func New(cfg Config, clientConfig client.Config, storageConfig storage.Config, clientMetrics storage.ClientMetrics, limits Limits, configs *runtime.TenantConfigs, + metastoreClient metastorepb.MetastoreServiceClient, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg, metricsNamespace string, @@ -247,18 +250,19 @@ func New(cfg Config, clientConfig client.Config, } i := &Ingester{ - cfg: cfg, - logger: logger, - clientConfig: clientConfig, - tenantConfigs: configs, - instances: map[string]*instance{}, - store: storage, - periodicConfigs: periodConfigs, - flushWorkersDone: sync.WaitGroup{}, - loopQuit: make(chan struct{}), - tailersQuit: make(chan struct{}), - metrics: metrics, - // flushOnShutdownSwitch: &OnceSwitch{}, + cfg: cfg, + logger: logger, + clientConfig: clientConfig, + tenantConfigs: configs, + instances: map[string]*instance{}, + store: storage, + periodicConfigs: periodConfigs, + flushBuffers: make([]*bytes.Buffer, cfg.ConcurrentFlushes), + flushWorkersDone: sync.WaitGroup{}, + loopQuit: make(chan struct{}), + tailersQuit: make(chan struct{}), + metrics: metrics, + metastoreClient: metastoreClient, terminateOnShutdown: false, streamRateCalculator: NewStreamRateCalculator(), writeLogManager: writefailures.NewManager(logger, registerer, writeFailuresCfg, configs, "ingester_rf1"), @@ -284,19 +288,7 @@ func New(cfg Config, clientConfig client.Config, i.setupAutoForget() - //if i.cfg.ChunkFilterer != nil { - // i.SetChunkFilterer(i.cfg.ChunkFilterer) - //} - // - //if i.cfg.PipelineWrapper != nil { - // i.SetPipelineWrapper(i.cfg.PipelineWrapper) - //} - // - //if i.cfg.SampleExtractorWrapper != nil { - // i.SetExtractorWrapper(i.cfg.SampleExtractorWrapper) - //} - // - //i.recalculateOwnedStreams = newRecalculateOwnedStreams(i.getInstances, i.lifecycler.ID, i.readRing, cfg.OwnedStreamsCheckInterval, util_log.Logger) + // i.recalculateOwnedStreams = newRecalculateOwnedStreams(i.getInstances, i.lifecycler.ID, i.readRing, cfg.OwnedStreamsCheckInterval, util_log.Logger) return i, nil } @@ -394,7 +386,7 @@ func (i *Ingester) starting(ctx context.Context) error { shutdownMarkerPath := path.Join(i.cfg.ShutdownMarkerPath, shutdownMarkerFilename) shutdownMarker, err := shutdownMarkerExists(shutdownMarkerPath) if err != nil { - return errors.Wrap(err, "failed to check ingester shutdown marker") + return fmt.Errorf("failed to check ingester shutdown marker: %w", err) } if shutdownMarker { @@ -705,11 +697,6 @@ func (i *Ingester) Push(ctx context.Context, req *logproto.PushRequest) (*logpro return nil, ErrReadOnly } - // Set profiling tags - defer pprof.SetGoroutineLabels(ctx) - ctx = pprof.WithLabels(ctx, pprof.Labels("path", "write", "tenant", instanceID)) - pprof.SetGoroutineLabels(ctx) - instance, err := i.GetOrCreateInstance(instanceID) if err != nil { return &logproto.PushResponse{}, err diff --git a/pkg/ingester-rf1/metastore/client/client.go b/pkg/ingester-rf1/metastore/client/client.go new file mode 100644 index 0000000000000..044ad613d7ed5 --- /dev/null +++ b/pkg/ingester-rf1/metastore/client/client.go @@ -0,0 +1,118 @@ +package metastoreclient + +import ( + "flag" + "fmt" + + "github.com/grafana/dskit/grpcclient" + "github.com/grafana/dskit/instrument" + "github.com/grafana/dskit/middleware" + "github.com/grafana/dskit/services" + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + + metastorepb "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" + "github.com/grafana/loki/v3/pkg/util/constants" +) + +type Config struct { + MetastoreAddress string `yaml:"address"` + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config" doc:"description=Configures the gRPC client used to communicate with the metastore."` +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.MetastoreAddress, "metastore.address", "localhost:9095", "") + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("metastore.grpc-client-config", f) +} + +func (cfg *Config) Validate() error { + if cfg.MetastoreAddress == "" { + return fmt.Errorf("metastore.address is required") + } + return cfg.GRPCClientConfig.Validate() +} + +type Client struct { + metastorepb.MetastoreServiceClient + service services.Service + conn *grpc.ClientConn + config Config +} + +func New(config Config, r prometheus.Registerer) (c *Client, err error) { + c = &Client{config: config} + c.conn, err = dial(c.config, r) + if err != nil { + return nil, err + } + c.MetastoreServiceClient = metastorepb.NewMetastoreServiceClient(c.conn) + c.service = services.NewIdleService(nil, c.stopping) + return c, nil +} + +func (c *Client) stopping(error) error { return c.conn.Close() } + +func (c *Client) Service() services.Service { return c.service } + +func dial(cfg Config, r prometheus.Registerer) (*grpc.ClientConn, error) { + latency := prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: constants.Loki, + Name: "metastore_request_duration_seconds", + Help: "Time (in seconds) spent serving requests when using the metastore", + Buckets: instrument.DefBuckets, + }, []string{"operation", "status_code"}) + if r != nil { + err := r.Register(latency) + if err != nil { + alreadyErr, ok := err.(prometheus.AlreadyRegisteredError) + if !ok { + return nil, err + } + latency = alreadyErr.ExistingCollector.(*prometheus.HistogramVec) + } + } + if err := cfg.Validate(); err != nil { + return nil, err + } + options, err := cfg.GRPCClientConfig.DialOption(instrumentation(latency)) + if err != nil { + return nil, err + } + // TODO: https://github.com/grpc/grpc-proto/blob/master/grpc/service_config/service_config.proto + options = append(options, grpc.WithDefaultServiceConfig(grpcServiceConfig)) + return grpc.Dial(cfg.MetastoreAddress, options...) +} + +const grpcServiceConfig = `{ + "healthCheckConfig": { + "serviceName": "metastorepb.MetastoreService.RaftLeader" + }, + "loadBalancingPolicy":"round_robin", + "methodConfig": [{ + "name": [{"service": "metastorepb.MetastoreService"}], + "waitForReady": true, + "retryPolicy": { + "MaxAttempts": 4, + "InitialBackoff": ".01s", + "MaxBackoff": ".01s", + "BackoffMultiplier": 1.0, + "RetryableStatusCodes": [ "UNAVAILABLE" ] + } + }] +}` + +func instrumentation(latency *prometheus.HistogramVec) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { + var unaryInterceptors []grpc.UnaryClientInterceptor + unaryInterceptors = append(unaryInterceptors, otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer())) + unaryInterceptors = append(unaryInterceptors, middleware.ClientUserHeaderInterceptor) + unaryInterceptors = append(unaryInterceptors, middleware.UnaryClientInstrumentInterceptor(latency)) + + var streamInterceptors []grpc.StreamClientInterceptor + streamInterceptors = append(streamInterceptors, otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer())) + streamInterceptors = append(streamInterceptors, middleware.StreamClientUserHeaderInterceptor) + streamInterceptors = append(streamInterceptors, middleware.StreamClientInstrumentInterceptor(latency)) + + return unaryInterceptors, streamInterceptors +} diff --git a/pkg/ingester-rf1/metastore/health/health.go b/pkg/ingester-rf1/metastore/health/health.go new file mode 100644 index 0000000000000..675596e1bf2f1 --- /dev/null +++ b/pkg/ingester-rf1/metastore/health/health.go @@ -0,0 +1,33 @@ +package health + +import ( + "github.com/grafana/dskit/services" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +type Service interface { + SetServingStatus(string, grpc_health_v1.HealthCheckResponse_ServingStatus) +} + +type noopService struct{} + +var NoOpService = noopService{} + +func (noopService) SetServingStatus(string, grpc_health_v1.HealthCheckResponse_ServingStatus) {} + +func NewGRPCHealthService() *GRPCHealthService { + s := health.NewServer() + return &GRPCHealthService{ + Server: s, + Service: services.NewIdleService(nil, func(error) error { + s.Shutdown() + return nil + }), + } +} + +type GRPCHealthService struct { + services.Service + *health.Server +} diff --git a/pkg/ingester-rf1/metastore/metastore.go b/pkg/ingester-rf1/metastore/metastore.go new file mode 100644 index 0000000000000..6d999b8edd518 --- /dev/null +++ b/pkg/ingester-rf1/metastore/metastore.go @@ -0,0 +1,264 @@ +package metastore + +import ( + "context" + "errors" + "flag" + "fmt" + "net" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/flagext" + "github.com/grafana/dskit/services" + "github.com/hashicorp/raft" + raftwal "github.com/hashicorp/raft-wal" + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/health" + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/raftleader" +) + +const metastoreRaftLeaderHealthServiceName = "metastorepb.MetastoreService.RaftLeader" + +type Config struct { + DataDir string `yaml:"data_dir"` + Raft RaftConfig `yaml:"raft"` +} + +type RaftConfig struct { + Dir string `yaml:"dir"` + + BootstrapPeers []string `yaml:"bootstrap_peers"` + ServerID string `yaml:"server_id"` + BindAddress string `yaml:"bind_address"` + AdvertiseAddress string `yaml:"advertise_address"` + + ApplyTimeout time.Duration `yaml:"apply_timeout" doc:"hidden"` +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + const prefix = "metastore." + f.StringVar(&cfg.DataDir, prefix+"data-dir", "./data-metastore/data", "") + cfg.Raft.RegisterFlags(f) +} + +func (cfg *RaftConfig) RegisterFlags(f *flag.FlagSet) { + const prefix = "metastore.raft." + f.StringVar(&cfg.Dir, prefix+"dir", "./data-metastore/raft", "") + f.Var((*flagext.StringSlice)(&cfg.BootstrapPeers), prefix+"bootstrap-peers", "") + f.StringVar(&cfg.BindAddress, prefix+"bind-address", "localhost:9099", "") + f.StringVar(&cfg.ServerID, prefix+"server-id", "localhost:9099", "") + f.StringVar(&cfg.AdvertiseAddress, prefix+"advertise-address", "localhost:9099", "") + f.DurationVar(&cfg.ApplyTimeout, prefix+"apply-timeout", 5*time.Second, "") +} + +type Metastore struct { + services.Service + metastorepb.MetastoreServiceServer + + config Config + logger log.Logger + reg prometheus.Registerer + + // In-memory state. + state *metastoreState + + // Persistent state. + db *boltdb + + // Raft module. + wal *raftwal.WAL + snapshots *raft.FileSnapshotStore + transport *raft.NetworkTransport + raft *raft.Raft + leaderhealth *raftleader.HealthObserver + + logStore raft.LogStore + stableStore raft.StableStore + snapshotStore raft.SnapshotStore + + walDir string + + done chan struct{} + wg sync.WaitGroup +} + +func New(config Config, logger log.Logger, reg prometheus.Registerer, hs health.Service) (*Metastore, error) { + m := &Metastore{ + config: config, + logger: logger, + reg: reg, + db: newDB(config, logger), + done: make(chan struct{}), + } + m.leaderhealth = raftleader.NewRaftLeaderHealthObserver(hs, logger) + m.state = newMetastoreState(logger, m.db) + m.Service = services.NewBasicService(m.starting, m.running, m.stopping) + return m, nil +} + +func (m *Metastore) Shutdown() error { + m.shutdownRaft() + m.db.shutdown() + return nil +} + +func (m *Metastore) starting(_ context.Context) error { + if err := m.db.open(false); err != nil { + return fmt.Errorf("failed to initialize database: %w", err) + } + if err := m.initRaft(); err != nil { + return fmt.Errorf("failed to initialize raft: %w", err) + } + m.wg.Add(1) + go m.cleanupLoop() + return nil +} + +func (m *Metastore) stopping(_ error) error { + close(m.done) + m.wg.Wait() + return m.Shutdown() +} + +func (m *Metastore) running(ctx context.Context) error { + <-ctx.Done() + return nil +} + +const ( + snapshotsRetain = 3 + walCacheEntries = 512 + transportConnPoolSize = 10 + transportTimeout = 10 * time.Second + + raftTrailingLogs = 32 << 10 + raftSnapshotInterval = 300 * time.Second + raftSnapshotThreshold = 16 << 10 +) + +func (m *Metastore) initRaft() (err error) { + defer func() { + if err != nil { + // If the initialization fails, initialized components + // should be de-initialized gracefully. + m.shutdownRaft() + } + }() + + hasState, err := m.openRaftStore() + if err != nil { + return err + } + + addr, err := net.ResolveTCPAddr("tcp", m.config.Raft.AdvertiseAddress) + if err != nil { + return err + } + m.transport, err = raft.NewTCPTransport(m.config.Raft.BindAddress, addr, transportConnPoolSize, transportTimeout, os.Stderr) + if err != nil { + return err + } + + config := raft.DefaultConfig() + // TODO: Wrap gokit + // config.Logger + config.LogLevel = "debug" + config.TrailingLogs = raftTrailingLogs + config.SnapshotThreshold = raftSnapshotThreshold + config.SnapshotInterval = raftSnapshotInterval + // TODO: We don't need to restore the latest snapshot + // on start, because the FSM is already disk-based. + // config.NoSnapshotRestoreOnStart = true + config.LocalID = raft.ServerID(m.config.Raft.ServerID) + + fsm := newFSM(m.logger, m.db, m.state) + m.raft, err = raft.NewRaft(config, fsm, m.logStore, m.stableStore, m.snapshotStore, m.transport) + if err != nil { + return fmt.Errorf("starting raft node: %w", err) + } + + if !hasState { + _ = level.Warn(m.logger).Log("msg", "no existing state found, bootstrapping cluster") + if err = m.bootstrap(); err != nil { + return fmt.Errorf("failed to bootstrap cluster: %w", err) + } + } + + m.leaderhealth.Register(m.raft, metastoreRaftLeaderHealthServiceName) + return nil +} + +func (m *Metastore) openRaftStore() (hasState bool, err error) { + if err = m.createRaftDirs(); err != nil { + return false, err + } + m.wal, err = raftwal.Open(m.walDir) + if err != nil { + return false, fmt.Errorf("failed to open WAL: %w", err) + } + m.snapshots, err = raft.NewFileSnapshotStore(m.config.Raft.Dir, snapshotsRetain, os.Stderr) + if err != nil { + return false, fmt.Errorf("failed to open shapshot store: %w", err) + } + m.logStore = m.wal + m.logStore, _ = raft.NewLogCache(walCacheEntries, m.logStore) + m.stableStore = m.wal + m.snapshotStore = m.snapshots + if hasState, err = raft.HasExistingState(m.logStore, m.stableStore, m.snapshotStore); err != nil { + return hasState, fmt.Errorf("failed to check for existing state: %w", err) + } + return hasState, nil +} + +func (m *Metastore) createRaftDirs() (err error) { + m.walDir = filepath.Join(m.config.Raft.Dir, "wal") + if err = os.MkdirAll(m.walDir, 0o755); err != nil { + return fmt.Errorf("WAL dir: %w", err) + } + if err = os.MkdirAll(m.config.Raft.Dir, 0o755); err != nil { + return fmt.Errorf("snapshot directory: %w", err) + } + return nil +} + +func (m *Metastore) shutdownRaft() { + if m.raft != nil { + // If raft has been initialized, try to transfer leadership. + // Only after this we remove the leader health observer and + // shutdown the raft. + // There is a chance that client will still be trying to connect + // to this instance, therefore retrying is still required. + if err := m.raft.LeadershipTransfer().Error(); err != nil { + switch { + case errors.Is(err, raft.ErrNotLeader): + // Not a leader, nothing to do. + case strings.Contains(err.Error(), "cannot find peer"): + // It's likely that there's just one node in the cluster. + default: + _ = level.Error(m.logger).Log("msg", "failed to transfer leadership", "err", err) + } + } + m.leaderhealth.Deregister(m.raft, metastoreRaftLeaderHealthServiceName) + if err := m.raft.Shutdown().Error(); err != nil { + _ = level.Error(m.logger).Log("msg", "failed to shutdown raft", "err", err) + } + } + if m.transport != nil { + if err := m.transport.Close(); err != nil { + _ = level.Error(m.logger).Log("msg", "failed to close transport", "err", err) + } + } + if m.wal != nil { + if err := m.wal.Close(); err != nil { + _ = level.Error(m.logger).Log("msg", "failed to close WAL", "err", err) + } + } +} diff --git a/pkg/ingester-rf1/metastore/metastore_boltdb.go b/pkg/ingester-rf1/metastore/metastore_boltdb.go new file mode 100644 index 0000000000000..9762d5568d007 --- /dev/null +++ b/pkg/ingester-rf1/metastore/metastore_boltdb.go @@ -0,0 +1,208 @@ +package metastore + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/hashicorp/raft" + "go.etcd.io/bbolt" +) + +const ( + boltDBFileName = "metastore.boltdb" + boltDBSnapshotName = "metastore_snapshot.boltdb" +) + +type boltdb struct { + logger log.Logger + boltdb *bbolt.DB + config Config + path string +} + +type snapshot struct { + logger log.Logger + tx *bbolt.Tx +} + +func newDB(config Config, logger log.Logger) *boltdb { + return &boltdb{ + logger: logger, + config: config, + } +} + +func (db *boltdb) open(readOnly bool) (err error) { + defer func() { + if err != nil { + // If the initialization fails, initialized components + // should be de-initialized gracefully. + db.shutdown() + } + }() + + if err = os.MkdirAll(db.config.DataDir, 0o755); err != nil { + return fmt.Errorf("db dir: %w", err) + } + + if db.path == "" { + db.path = filepath.Join(db.config.DataDir, boltDBFileName) + } + + opts := *bbolt.DefaultOptions + opts.ReadOnly = readOnly + if db.boltdb, err = bbolt.Open(db.path, 0o644, &opts); err != nil { + return fmt.Errorf("failed to open db: %w", err) + } + + if !readOnly { + err = db.boltdb.Update(func(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(blockMetadataBucketNameBytes) + return err + }) + if err != nil { + return fmt.Errorf("failed to create bucket: %w", err) + } + } + + return nil +} + +func (db *boltdb) shutdown() { + if db.boltdb != nil { + if err := db.boltdb.Close(); err != nil { + _ = level.Error(db.logger).Log("msg", "failed to close database", "err", err) + } + } +} + +func (db *boltdb) restore(snapshot io.Reader) error { + // Snapshot is a full copy of the database, therefore we copy + // it on disk and use it instead of the current database. + path, err := db.copySnapshot(snapshot) + if err == nil { + // First check the snapshot. + restored := *db + restored.path = path + err = restored.open(true) + // Also check applied index. + restored.shutdown() + } + if err != nil { + return fmt.Errorf("failed to restore snapshot: %w", err) + } + // Note that we do not keep the previous database: in case if the + // snapshot is corrupted, we should try another one. + return db.openSnapshot(path) +} + +func (db *boltdb) copySnapshot(snapshot io.Reader) (path string, err error) { + path = filepath.Join(db.config.DataDir, boltDBSnapshotName) + snapFile, err := os.Create(path) + if err != nil { + return "", err + } + _, err = io.Copy(snapFile, snapshot) + if syncErr := syncFD(snapFile); err == nil { + err = syncErr + } + return path, err +} + +func (db *boltdb) openSnapshot(path string) (err error) { + db.shutdown() + if err = os.Rename(path, db.path); err != nil { + return err + } + if err = syncPath(db.path); err != nil { + return err + } + return db.open(false) +} + +func syncPath(path string) (err error) { + d, err := os.Open(path) + if err != nil { + return err + } + return syncFD(d) +} + +func syncFD(f *os.File) (err error) { + err = f.Sync() + if closeErr := f.Close(); err == nil { + return closeErr + } + return err +} + +func (db *boltdb) createSnapshot() (*snapshot, error) { + s := snapshot{logger: db.logger} + tx, err := db.boltdb.Begin(false) + if err != nil { + return nil, fmt.Errorf("failed to open a transaction for snapshot: %w", err) + } + s.tx = tx + return &s, nil +} + +func (s *snapshot) Persist(sink raft.SnapshotSink) (err error) { + _ = s.logger.Log("msg", "persisting snapshot", "sink_id", sink.ID()) + defer func() { + if err != nil { + _ = s.logger.Log("msg", "failed to persist snapshot", "err", err) + if err = sink.Cancel(); err != nil { + _ = s.logger.Log("msg", "failed to cancel snapshot sink", "err", err) + return + } + } + if err = sink.Close(); err != nil { + _ = s.logger.Log("msg", "failed to close sink", "err", err) + } + }() + _ = level.Info(s.logger).Log("msg", "persisting snapshot") + if _, err = s.tx.WriteTo(sink); err != nil { + _ = level.Error(s.logger).Log("msg", "failed to write snapshot", "err", err) + return err + } + return nil +} + +func (s *snapshot) Release() { + if s.tx != nil { + // This is an in-memory rollback, no error expected. + _ = s.tx.Rollback() + } +} + +func getOrCreateSubBucket(parent *bbolt.Bucket, name []byte) (*bbolt.Bucket, error) { + bucket := parent.Bucket(name) + if bucket == nil { + return parent.CreateBucket(name) + } + return bucket, nil +} + +const blockMetadataBucketName = "block_metadata" + +var blockMetadataBucketNameBytes = []byte(blockMetadataBucketName) + +func getBlockMetadataBucket(tx *bbolt.Tx) (*bbolt.Bucket, error) { + mdb := tx.Bucket(blockMetadataBucketNameBytes) + if mdb == nil { + return nil, bbolt.ErrBucketNotFound + } + return mdb, nil +} + +func updateBlockMetadataBucket(tx *bbolt.Tx, fn func(*bbolt.Bucket) error) error { + mdb, err := getBlockMetadataBucket(tx) + if err != nil { + return err + } + return fn(mdb) +} diff --git a/pkg/ingester-rf1/metastore/metastore_bootstrap.go b/pkg/ingester-rf1/metastore/metastore_bootstrap.go new file mode 100644 index 0000000000000..b1e05d1b5a6ba --- /dev/null +++ b/pkg/ingester-rf1/metastore/metastore_bootstrap.go @@ -0,0 +1,124 @@ +package metastore + +import ( + "context" + "errors" + "fmt" + "net" + "slices" + "strings" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/dns" + "github.com/hashicorp/raft" +) + +func (m *Metastore) bootstrap() error { + peers, err := m.bootstrapPeers() + if err != nil { + return fmt.Errorf("failed to resolve peers: %w", err) + } + logger := log.With(m.logger, + "server_id", m.config.Raft.ServerID, + "advertise_address", m.config.Raft.AdvertiseAddress, + "peers", fmt.Sprint(peers)) + if raft.ServerAddress(m.config.Raft.AdvertiseAddress) != peers[0].Address { + _ = level.Info(logger).Log("msg", "not the bootstrap node, skipping") + return nil + } + _ = level.Info(logger).Log("msg", "bootstrapping raft") + bootstrap := m.raft.BootstrapCluster(raft.Configuration{Servers: peers}) + if bootstrapErr := bootstrap.Error(); bootstrapErr != nil { + if !errors.Is(bootstrapErr, raft.ErrCantBootstrap) { + return fmt.Errorf("failed to bootstrap raft: %w", bootstrapErr) + } + } + return nil +} + +func (m *Metastore) bootstrapPeers() ([]raft.Server, error) { + // The peer list always includes the local node. + peers := make([]raft.Server, 0, len(m.config.Raft.BootstrapPeers)+1) + peers = append(peers, raft.Server{ + Suffrage: raft.Voter, + ID: raft.ServerID(m.config.Raft.ServerID), + Address: raft.ServerAddress(m.config.Raft.AdvertiseAddress), + }) + // Note that raft requires stable node IDs, therefore we're using + // the node FQDN:port for both purposes: as the identifier and as the + // address. This requires a DNS SRV record lookup without further + // resolution of A records (dnssrvnoa+). + // + // Alternatively, peers may be specified explicitly in the + // "{addr}" format, where the node is the optional node + // identifier. + var resolve []string + for _, peer := range m.config.Raft.BootstrapPeers { + if strings.Contains(peer, "+") { + resolve = append(resolve, peer) + } else { + peers = append(peers, parsePeer(peer)) + } + } + if len(resolve) > 0 { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + prov := dns.NewProvider(m.logger, m.reg, dns.MiekgdnsResolverType) + if err := prov.Resolve(ctx, resolve); err != nil { + return nil, fmt.Errorf("failed to resolve bootstrap peers: %w", err) + } + resolvedPeers := prov.Addresses() + if len(resolvedPeers) == 0 { + // The local node is the only one in the cluster, but peers + // were supposed to be present. Stop here to avoid bootstrapping + // a single-node cluster. + return nil, fmt.Errorf("bootstrap peers can't be resolved") + } + for _, peer := range resolvedPeers { + peers = append(peers, raft.Server{ + Suffrage: raft.Voter, + ID: raft.ServerID(peer), + Address: raft.ServerAddress(peer), + }) + } + } + // Finally, we sort and deduplicate the peers: the first one + // is to boostrap the cluster. If there are nodes with distinct + // IDs but the same address, bootstrapping will fail. + slices.SortFunc(peers, func(a, b raft.Server) int { + return strings.Compare(string(a.ID), string(b.ID)) + }) + peers = slices.CompactFunc(peers, func(a, b raft.Server) bool { + return a.ID == b.ID + }) + return peers, nil +} + +func parsePeer(raw string) raft.Server { + // The string may be "{addr}" or "{addr}/{node_id}". + parts := strings.SplitN(raw, "/", 2) + var addr string + var node string + if len(parts) == 2 { + addr = parts[0] + node = parts[1] + } else { + addr = raw + } + host, _, err := net.SplitHostPort(addr) + if err != nil { + // No port specified. + host = addr + } + if node == "" { + // No node_id specified. + node = host + } + return raft.Server{ + Suffrage: raft.Voter, + ID: raft.ServerID(node), + Address: raft.ServerAddress(addr), + } +} diff --git a/pkg/ingester-rf1/metastore/metastore_fsm.go b/pkg/ingester-rf1/metastore/metastore_fsm.go new file mode 100644 index 0000000000000..656d8d8c34ed7 --- /dev/null +++ b/pkg/ingester-rf1/metastore/metastore_fsm.go @@ -0,0 +1,206 @@ +package metastore + +import ( + "fmt" + "io" + "reflect" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/gogo/protobuf/proto" + "github.com/hashicorp/raft" + + metastorepb "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/raftlogpb" +) + +// The map is used to determine the type of the given command, +// when the request is converted to a Raft log entry. +var commandTypeMap = map[reflect.Type]raftlogpb.CommandType{ + reflect.TypeOf(new(metastorepb.AddBlockRequest)): raftlogpb.COMMAND_TYPE_ADD_BLOCK, + reflect.TypeOf(new(raftlogpb.TruncateCommand)): raftlogpb.COMMAND_TYPE_TRUNCATE, +} + +// The map is used to determine the handler for the given command, +// read from the Raft log entry. +var commandHandlers = map[raftlogpb.CommandType]commandHandler{ + raftlogpb.COMMAND_TYPE_ADD_BLOCK: func(fsm *FSM, raw []byte) fsmResponse { + return handleCommand(raw, fsm.state.applyAddBlock) + }, + raftlogpb.COMMAND_TYPE_TRUNCATE: func(fsm *FSM, raw []byte) fsmResponse { + return handleCommand(raw, fsm.state.applyTruncate) + }, +} + +// TODO: Add registration functions. + +type FSM struct { + logger log.Logger + state *metastoreState + db *boltdb +} + +type fsmResponse struct { + msg proto.Message + err error +} + +type fsmError struct { + log *raft.Log + err error +} + +func errResponse(l *raft.Log, err error) fsmResponse { + return fsmResponse{err: &fsmError{log: l, err: err}} +} + +func (e *fsmError) Error() string { + if e.err == nil { + return "" + } + if e.log == nil { + return e.err.Error() + } + return fmt.Sprintf("term: %d; index: %d; appended_at: %v; error: %v", + e.log.Index, e.log.Term, e.log.AppendedAt, e.err) +} + +type commandHandler func(*FSM, []byte) fsmResponse + +// TODO(kolesnikovae): replace commandCall with interface: +// type command[Req, Resp proto.Message] interface { +// apply(Req) (Resp, error) +// } + +type commandCall[Req, Resp proto.Message] func(Req) (Resp, error) + +func newFSM(logger log.Logger, db *boltdb, state *metastoreState) *FSM { + return &FSM{ + logger: logger, + db: db, + state: state, + } +} + +// TODO(kolesnikovae): Implement BatchingFSM. + +func (fsm *FSM) Apply(l *raft.Log) interface{} { + switch l.Type { + case raft.LogNoop: + case raft.LogBarrier: + case raft.LogConfiguration: + case raft.LogCommand: + return fsm.applyCommand(l) + default: + _ = level.Warn(fsm.logger).Log("msg", "unexpected log entry, ignoring", "type", l.Type.String()) + } + return nil +} + +// applyCommand receives raw command from the raft log (FSM.Apply), +// and calls the corresponding handler on the _local_ FSM, based on +// the command type. +func (fsm *FSM) applyCommand(l *raft.Log) interface{} { + var e raftlogpb.RaftLogEntry + if err := proto.Unmarshal(l.Data, &e); err != nil { + return errResponse(l, err) + } + if handler, ok := commandHandlers[e.Type]; ok { + return handler(fsm, e.Payload) + } + return errResponse(l, fmt.Errorf("unknown command type: %v", e.Type.String())) +} + +// handleCommand receives payload of the command from the raft log (FSM.Apply), +// and the function that processes the command. Returned response is wrapped in +// fsmResponse and is available to the FSM.Apply caller. +func handleCommand[Req, Resp proto.Message](raw []byte, call commandCall[Req, Resp]) fsmResponse { + var resp fsmResponse + defer func() { + if r := recover(); r != nil { + resp.err = panicError(r) + } + }() + req := newProto[Req]() + if resp.err = proto.Unmarshal(raw, req); resp.err != nil { + return resp + } + resp.msg, resp.err = call(req) + return resp +} + +func newProto[T proto.Message]() T { + var msg T + msgType := reflect.TypeOf(msg).Elem() + return reflect.New(msgType).Interface().(T) +} + +func (fsm *FSM) Snapshot() (raft.FSMSnapshot, error) { + // Snapshot should only capture a pointer to the state, and any + // expensive IO should happen as part of FSMSnapshot.Persist. + return fsm.db.createSnapshot() +} + +func (fsm *FSM) Restore(snapshot io.ReadCloser) error { + _ = level.Info(fsm.logger).Log("msg", "restoring snapshot") + defer func() { + _ = snapshot.Close() + }() + if err := fsm.db.restore(snapshot); err != nil { + return fmt.Errorf("failed to restore from snapshot: %w", err) + } + if err := fsm.state.restore(fsm.db); err != nil { + return fmt.Errorf("failed to restore state: %w", err) + } + return nil +} + +// applyCommand issues the command to the raft log based on the request type, +// and returns the response of FSM.Apply. +func applyCommand[Req, Resp proto.Message]( + log *raft.Raft, + req Req, + timeout time.Duration, +) ( + future raft.ApplyFuture, + resp Resp, + err error, +) { + defer func() { + if r := recover(); r != nil { + err = panicError(r) + } + }() + raw, err := marshallRequest(req) + if err != nil { + return nil, resp, err + } + future = log.Apply(raw, timeout) + if err = future.Error(); err != nil { + return nil, resp, err + } + fsmResp := future.Response().(fsmResponse) + if fsmResp.msg != nil { + resp, _ = fsmResp.msg.(Resp) + } + return future, resp, fsmResp.err +} + +func marshallRequest[Req proto.Message](req Req) ([]byte, error) { + cmdType, ok := commandTypeMap[reflect.TypeOf(req)] + if !ok { + return nil, fmt.Errorf("unknown command type: %T", req) + } + var err error + entry := raftlogpb.RaftLogEntry{Type: cmdType} + entry.Payload, err = proto.Marshal(req) + if err != nil { + return nil, err + } + raw, err := proto.Marshal(&entry) + if err != nil { + return nil, err + } + return raw, nil +} diff --git a/pkg/ingester-rf1/metastore/metastore_hack.go b/pkg/ingester-rf1/metastore/metastore_hack.go new file mode 100644 index 0000000000000..faef242d0217f --- /dev/null +++ b/pkg/ingester-rf1/metastore/metastore_hack.go @@ -0,0 +1,74 @@ +package metastore + +import ( + "time" + + "github.com/go-kit/log/level" + "github.com/hashicorp/raft" + "github.com/oklog/ulid" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/raftlogpb" +) + +// FIXME(kolesnikovae): +// Remove once compaction is implemented. +// Or use index instead of the timestamp. + +func (m *Metastore) cleanupLoop() { + t := time.NewTicker(10 * time.Minute) + defer func() { + t.Stop() + m.wg.Done() + }() + for { + select { + case <-m.done: + return + case <-t.C: + if m.raft.State() != raft.Leader { + continue + } + timestamp := uint64(time.Now().Add(-1 * time.Hour).UnixMilli()) + req := &raftlogpb.TruncateCommand{Timestamp: timestamp} + _, _, err := applyCommand[*raftlogpb.TruncateCommand, *anypb.Any](m.raft, req, m.config.Raft.ApplyTimeout) + if err != nil { + _ = level.Error(m.logger).Log("msg", "failed to apply truncate command", "err", err) + } + } + } +} + +func (m *metastoreState) applyTruncate(request *raftlogpb.TruncateCommand) (*anypb.Any, error) { + m.segmentsMutex.Lock() + defer m.segmentsMutex.Unlock() + tx, err := m.db.boltdb.Begin(true) + if err != nil { + _ = level.Error(m.logger).Log("msg", "failed to start transaction", "err", err) + return nil, err + } + var truncated int + defer func() { + if err = tx.Commit(); err != nil { + _ = level.Error(m.logger).Log("msg", "failed to commit transaction", "err", err) + return + } + _ = level.Info(m.logger).Log("msg", "stale segments truncated", "segments", truncated) + }() + bucket, err := getBlockMetadataBucket(tx) + if err != nil { + _ = level.Error(m.logger).Log("msg", "failed to get metadata bucket", "err", err) + return nil, err + } + for k, segment := range m.segments { + if ulid.MustParse(segment.Id).Time() < request.Timestamp { + if err = bucket.Delete([]byte(segment.Id)); err != nil { + _ = level.Error(m.logger).Log("msg", "failed to delete stale segments", "err", err) + continue + } + delete(m.segments, k) + truncated++ + } + } + return &anypb.Any{}, nil +} diff --git a/pkg/ingester-rf1/metastore/metastore_state.go b/pkg/ingester-rf1/metastore/metastore_state.go new file mode 100644 index 0000000000000..b450067fb9f68 --- /dev/null +++ b/pkg/ingester-rf1/metastore/metastore_state.go @@ -0,0 +1,78 @@ +package metastore + +import ( + "errors" + "fmt" + "sync" + + "github.com/go-kit/log" + "github.com/gogo/protobuf/proto" + "go.etcd.io/bbolt" + + metastorepb "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" +) + +type metastoreState struct { + logger log.Logger + + segmentsMutex sync.Mutex + segments map[string]*metastorepb.BlockMeta + + db *boltdb +} + +func newMetastoreState(logger log.Logger, db *boltdb) *metastoreState { + return &metastoreState{ + logger: logger, + segments: make(map[string]*metastorepb.BlockMeta), + db: db, + } +} + +func (m *metastoreState) reset() { + m.segmentsMutex.Lock() + clear(m.segments) + m.segmentsMutex.Unlock() +} + +func (m *metastoreState) restore(db *boltdb) error { + m.reset() + return db.boltdb.View(func(tx *bbolt.Tx) error { + if err := m.restoreMetadata(tx); err != nil { + return fmt.Errorf("failed to restore metadata entries: %w", err) + } + return nil + }) +} + +func (m *metastoreState) restoreMetadata(tx *bbolt.Tx) error { + mdb, err := getBlockMetadataBucket(tx) + switch { + case err == nil: + case errors.Is(err, bbolt.ErrBucketNotFound): + return nil + default: + return err + } + return m.loadSegments(mdb) +} + +func (m *metastoreState) putSegment(segment *metastorepb.BlockMeta) { + m.segmentsMutex.Lock() + m.segments[segment.Id] = segment + m.segmentsMutex.Unlock() +} + +func (m *metastoreState) loadSegments(b *bbolt.Bucket) error { + m.segmentsMutex.Lock() + defer m.segmentsMutex.Unlock() + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + var md metastorepb.BlockMeta + if err := proto.Unmarshal(v, &md); err != nil { + return fmt.Errorf("failed to block %q: %w", string(k), err) + } + m.segments[md.Id] = &md + } + return nil +} diff --git a/pkg/ingester-rf1/metastore/metastore_state_add_block.go b/pkg/ingester-rf1/metastore/metastore_state_add_block.go new file mode 100644 index 0000000000000..ddf8eb2732763 --- /dev/null +++ b/pkg/ingester-rf1/metastore/metastore_state_add_block.go @@ -0,0 +1,47 @@ +package metastore + +import ( + "context" + + "github.com/go-kit/log/level" + "github.com/gogo/protobuf/proto" + "go.etcd.io/bbolt" + + metastorepb "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" +) + +func (m *Metastore) AddBlock(_ context.Context, req *metastorepb.AddBlockRequest) (*metastorepb.AddBlockResponse, error) { + _, resp, err := applyCommand[*metastorepb.AddBlockRequest, *metastorepb.AddBlockResponse](m.raft, req, m.config.Raft.ApplyTimeout) + return resp, err +} + +func (m *metastoreState) applyAddBlock(request *metastorepb.AddBlockRequest) (*metastorepb.AddBlockResponse, error) { + _ = level.Info(m.logger).Log("msg", "adding block", "block_id", request.Block.Id) + if request.Block.CompactionLevel != 0 { + _ = level.Error(m.logger).Log( + "msg", "compaction not implemented, ignoring block with non-zero compaction level", + "compaction_level", request.Block.CompactionLevel, + "block", request.Block.Id, + ) + return &metastorepb.AddBlockResponse{}, nil + } + value, err := proto.Marshal(request.Block) + if err != nil { + return nil, err + } + err = m.db.boltdb.Update(func(tx *bbolt.Tx) error { + return updateBlockMetadataBucket(tx, func(bucket *bbolt.Bucket) error { + return bucket.Put([]byte(request.Block.Id), value) + }) + }) + if err != nil { + _ = level.Error(m.logger).Log( + "msg", "failed to add block", + "block", request.Block.Id, + "err", err, + ) + return nil, err + } + m.putSegment(request.Block) + return &metastorepb.AddBlockResponse{}, nil +} diff --git a/pkg/ingester-rf1/metastore/metastore_state_list_blocks_for_query.go b/pkg/ingester-rf1/metastore/metastore_state_list_blocks_for_query.go new file mode 100644 index 0000000000000..cae86229cad63 --- /dev/null +++ b/pkg/ingester-rf1/metastore/metastore_state_list_blocks_for_query.go @@ -0,0 +1,79 @@ +package metastore + +import ( + "context" + "slices" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" +) + +func (m *Metastore) ListBlocksForQuery( + ctx context.Context, + request *metastorepb.ListBlocksForQueryRequest, +) ( + *metastorepb.ListBlocksForQueryResponse, error, +) { + return m.state.listBlocksForQuery(ctx, request) +} + +func (m *metastoreState) listBlocksForQuery( + _ context.Context, + request *metastorepb.ListBlocksForQueryRequest, +) ( + *metastorepb.ListBlocksForQueryResponse, error, +) { + if len(request.TenantId) == 0 { + return nil, status.Error(codes.InvalidArgument, "tenant_id is required") + } + + if request.StartTime > request.EndTime { + return nil, status.Error(codes.InvalidArgument, "start_time must be less than or equal to end_time") + } + var resp metastorepb.ListBlocksForQueryResponse + m.segmentsMutex.Lock() + defer m.segmentsMutex.Unlock() + + for _, segment := range m.segments { + for _, tenants := range segment.TenantStreams { + if tenants.TenantId == request.TenantId && inRange(segment.MinTime, segment.MaxTime, request.StartTime, request.EndTime) { + resp.Blocks = append(resp.Blocks, cloneBlockForQuery(segment)) + break + } + } + } + slices.SortFunc(resp.Blocks, func(a, b *metastorepb.BlockMeta) int { + return strings.Compare(a.Id, b.Id) + }) + return &resp, nil +} + +func inRange(blockStart, blockEnd, queryStart, queryEnd int64) bool { + return blockStart <= queryEnd && blockEnd >= queryStart +} + +func cloneBlockForQuery(b *metastorepb.BlockMeta) *metastorepb.BlockMeta { + res := &metastorepb.BlockMeta{ + Id: b.Id, + MinTime: b.MinTime, + MaxTime: b.MaxTime, + CompactionLevel: b.CompactionLevel, + FormatVersion: b.FormatVersion, + IndexRef: metastorepb.DataRef{ + Offset: b.IndexRef.Offset, + Length: b.IndexRef.Length, + }, + TenantStreams: make([]*metastorepb.TenantStreams, 0, len(b.TenantStreams)), + } + for _, svc := range b.TenantStreams { + res.TenantStreams = append(res.TenantStreams, &metastorepb.TenantStreams{ + TenantId: svc.TenantId, + MinTime: svc.MinTime, + MaxTime: svc.MaxTime, + }) + } + return res +} diff --git a/pkg/ingester-rf1/metastore/metastore_state_list_blocks_for_query_test.go b/pkg/ingester-rf1/metastore/metastore_state_list_blocks_for_query_test.go new file mode 100644 index 0000000000000..3bd8aeb7e4f3a --- /dev/null +++ b/pkg/ingester-rf1/metastore/metastore_state_list_blocks_for_query_test.go @@ -0,0 +1,128 @@ +package metastore + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + metastorepb "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" +) + +func TestMetastore_ListBlocksForQuery(t *testing.T) { + block1, block2, block3 := &metastorepb.BlockMeta{ + Id: "block1", + MinTime: 0, + MaxTime: 100, + TenantStreams: []*metastorepb.TenantStreams{ + { + TenantId: "tenant1", + MinTime: 0, + MaxTime: 50, + }, + }, + }, &metastorepb.BlockMeta{ + Id: "block2", + MinTime: 100, + MaxTime: 200, + TenantStreams: []*metastorepb.TenantStreams{ + { + TenantId: "tenant1", + MinTime: 100, + MaxTime: 150, + }, + }, + }, &metastorepb.BlockMeta{ + Id: "block3", + MinTime: 200, + MaxTime: 300, + TenantStreams: []*metastorepb.TenantStreams{ + { + TenantId: "tenant2", + MinTime: 200, + MaxTime: 250, + }, + { + TenantId: "tenant1", + MinTime: 200, + MaxTime: 250, + }, + }, + } + m := &Metastore{ + state: &metastoreState{ + segments: map[string]*metastorepb.BlockMeta{ + "block1": block1, + "block2": block2, + "block3": block3, + }, + }, + } + + tests := []struct { + name string + request *metastorepb.ListBlocksForQueryRequest + expectedResponse *metastorepb.ListBlocksForQueryResponse + }{ + { + name: "Matching tenant and time range", + request: &metastorepb.ListBlocksForQueryRequest{ + TenantId: "tenant1", + StartTime: 0, + EndTime: 100, + }, + expectedResponse: &metastorepb.ListBlocksForQueryResponse{ + Blocks: []*metastorepb.BlockMeta{ + block1, + block2, + }, + }, + }, + { + name: "Matching tenant but partial time range", + request: &metastorepb.ListBlocksForQueryRequest{ + TenantId: "tenant1", + StartTime: 50, + EndTime: 150, + }, + expectedResponse: &metastorepb.ListBlocksForQueryResponse{ + Blocks: []*metastorepb.BlockMeta{ + block1, + block2, + }, + }, + }, + { + name: "Non-matching tenant", + request: &metastorepb.ListBlocksForQueryRequest{ + TenantId: "tenant3", + StartTime: 0, + EndTime: 100, + }, + expectedResponse: &metastorepb.ListBlocksForQueryResponse{}, + }, + { + name: "Matching one tenant but not the other", + request: &metastorepb.ListBlocksForQueryRequest{ + TenantId: "tenant1", + StartTime: 100, + EndTime: 550, + }, + expectedResponse: &metastorepb.ListBlocksForQueryResponse{ + Blocks: []*metastorepb.BlockMeta{ + block1, + block2, + block3, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + resp, err := m.ListBlocksForQuery(context.Background(), test.request) + require.NoError(t, err) + require.Equal(t, test.expectedResponse, resp) + }) + } +} diff --git a/pkg/ingester-rf1/metastore/metastorepb/metastore.pb.go b/pkg/ingester-rf1/metastore/metastorepb/metastore.pb.go new file mode 100644 index 0000000000000..8e19fcaa38628 --- /dev/null +++ b/pkg/ingester-rf1/metastore/metastorepb/metastore.pb.go @@ -0,0 +1,2310 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/ingester-rf1/metastore/metastorepb/metastore.proto + +package metastorepb + +import ( + context "context" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type AddBlockRequest struct { + Block *BlockMeta `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` +} + +func (m *AddBlockRequest) Reset() { *m = AddBlockRequest{} } +func (*AddBlockRequest) ProtoMessage() {} +func (*AddBlockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_43ce85359599db4e, []int{0} +} +func (m *AddBlockRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AddBlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AddBlockRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AddBlockRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddBlockRequest.Merge(m, src) +} +func (m *AddBlockRequest) XXX_Size() int { + return m.Size() +} +func (m *AddBlockRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddBlockRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AddBlockRequest proto.InternalMessageInfo + +func (m *AddBlockRequest) GetBlock() *BlockMeta { + if m != nil { + return m.Block + } + return nil +} + +type AddBlockResponse struct { +} + +func (m *AddBlockResponse) Reset() { *m = AddBlockResponse{} } +func (*AddBlockResponse) ProtoMessage() {} +func (*AddBlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_43ce85359599db4e, []int{1} +} +func (m *AddBlockResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AddBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AddBlockResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AddBlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddBlockResponse.Merge(m, src) +} +func (m *AddBlockResponse) XXX_Size() int { + return m.Size() +} +func (m *AddBlockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AddBlockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AddBlockResponse proto.InternalMessageInfo + +type BlockMeta struct { + FormatVersion uint64 `protobuf:"varint,1,opt,name=format_version,json=formatVersion,proto3" json:"format_version,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + MinTime int64 `protobuf:"varint,3,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` + MaxTime int64 `protobuf:"varint,4,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` + CompactionLevel uint32 `protobuf:"varint,6,opt,name=compaction_level,json=compactionLevel,proto3" json:"compaction_level,omitempty"` + IndexRef DataRef `protobuf:"bytes,7,opt,name=indexRef,proto3" json:"indexRef"` + TenantStreams []*TenantStreams `protobuf:"bytes,8,rep,name=tenant_streams,json=tenantStreams,proto3" json:"tenant_streams,omitempty"` +} + +func (m *BlockMeta) Reset() { *m = BlockMeta{} } +func (*BlockMeta) ProtoMessage() {} +func (*BlockMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_43ce85359599db4e, []int{2} +} +func (m *BlockMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockMeta.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockMeta.Merge(m, src) +} +func (m *BlockMeta) XXX_Size() int { + return m.Size() +} +func (m *BlockMeta) XXX_DiscardUnknown() { + xxx_messageInfo_BlockMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockMeta proto.InternalMessageInfo + +func (m *BlockMeta) GetFormatVersion() uint64 { + if m != nil { + return m.FormatVersion + } + return 0 +} + +func (m *BlockMeta) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *BlockMeta) GetMinTime() int64 { + if m != nil { + return m.MinTime + } + return 0 +} + +func (m *BlockMeta) GetMaxTime() int64 { + if m != nil { + return m.MaxTime + } + return 0 +} + +func (m *BlockMeta) GetCompactionLevel() uint32 { + if m != nil { + return m.CompactionLevel + } + return 0 +} + +func (m *BlockMeta) GetIndexRef() DataRef { + if m != nil { + return m.IndexRef + } + return DataRef{} +} + +func (m *BlockMeta) GetTenantStreams() []*TenantStreams { + if m != nil { + return m.TenantStreams + } + return nil +} + +type DataRef struct { + Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length int64 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` +} + +func (m *DataRef) Reset() { *m = DataRef{} } +func (*DataRef) ProtoMessage() {} +func (*DataRef) Descriptor() ([]byte, []int) { + return fileDescriptor_43ce85359599db4e, []int{3} +} +func (m *DataRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DataRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DataRef.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DataRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_DataRef.Merge(m, src) +} +func (m *DataRef) XXX_Size() int { + return m.Size() +} +func (m *DataRef) XXX_DiscardUnknown() { + xxx_messageInfo_DataRef.DiscardUnknown(m) +} + +var xxx_messageInfo_DataRef proto.InternalMessageInfo + +func (m *DataRef) GetOffset() int64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *DataRef) GetLength() int64 { + if m != nil { + return m.Length + } + return 0 +} + +// TenantStreams object points to the offset in the block at which +// the tenant streams data is located. +type TenantStreams struct { + TenantId string `protobuf:"bytes,1,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` + MinTime int64 `protobuf:"varint,3,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` + MaxTime int64 `protobuf:"varint,4,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` +} + +func (m *TenantStreams) Reset() { *m = TenantStreams{} } +func (*TenantStreams) ProtoMessage() {} +func (*TenantStreams) Descriptor() ([]byte, []int) { + return fileDescriptor_43ce85359599db4e, []int{4} +} +func (m *TenantStreams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TenantStreams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TenantStreams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TenantStreams) XXX_Merge(src proto.Message) { + xxx_messageInfo_TenantStreams.Merge(m, src) +} +func (m *TenantStreams) XXX_Size() int { + return m.Size() +} +func (m *TenantStreams) XXX_DiscardUnknown() { + xxx_messageInfo_TenantStreams.DiscardUnknown(m) +} + +var xxx_messageInfo_TenantStreams proto.InternalMessageInfo + +func (m *TenantStreams) GetTenantId() string { + if m != nil { + return m.TenantId + } + return "" +} + +func (m *TenantStreams) GetMinTime() int64 { + if m != nil { + return m.MinTime + } + return 0 +} + +func (m *TenantStreams) GetMaxTime() int64 { + if m != nil { + return m.MaxTime + } + return 0 +} + +type ListBlocksForQueryRequest struct { + TenantId string `protobuf:"bytes,1,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` + StartTime int64 `protobuf:"varint,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime int64 `protobuf:"varint,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` +} + +func (m *ListBlocksForQueryRequest) Reset() { *m = ListBlocksForQueryRequest{} } +func (*ListBlocksForQueryRequest) ProtoMessage() {} +func (*ListBlocksForQueryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_43ce85359599db4e, []int{5} +} +func (m *ListBlocksForQueryRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListBlocksForQueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListBlocksForQueryRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListBlocksForQueryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListBlocksForQueryRequest.Merge(m, src) +} +func (m *ListBlocksForQueryRequest) XXX_Size() int { + return m.Size() +} +func (m *ListBlocksForQueryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListBlocksForQueryRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListBlocksForQueryRequest proto.InternalMessageInfo + +func (m *ListBlocksForQueryRequest) GetTenantId() string { + if m != nil { + return m.TenantId + } + return "" +} + +func (m *ListBlocksForQueryRequest) GetStartTime() int64 { + if m != nil { + return m.StartTime + } + return 0 +} + +func (m *ListBlocksForQueryRequest) GetEndTime() int64 { + if m != nil { + return m.EndTime + } + return 0 +} + +type ListBlocksForQueryResponse struct { + Blocks []*BlockMeta `protobuf:"bytes,1,rep,name=blocks,proto3" json:"blocks,omitempty"` +} + +func (m *ListBlocksForQueryResponse) Reset() { *m = ListBlocksForQueryResponse{} } +func (*ListBlocksForQueryResponse) ProtoMessage() {} +func (*ListBlocksForQueryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_43ce85359599db4e, []int{6} +} +func (m *ListBlocksForQueryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListBlocksForQueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListBlocksForQueryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListBlocksForQueryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListBlocksForQueryResponse.Merge(m, src) +} +func (m *ListBlocksForQueryResponse) XXX_Size() int { + return m.Size() +} +func (m *ListBlocksForQueryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListBlocksForQueryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListBlocksForQueryResponse proto.InternalMessageInfo + +func (m *ListBlocksForQueryResponse) GetBlocks() []*BlockMeta { + if m != nil { + return m.Blocks + } + return nil +} + +func init() { + proto.RegisterType((*AddBlockRequest)(nil), "metastorepb.AddBlockRequest") + proto.RegisterType((*AddBlockResponse)(nil), "metastorepb.AddBlockResponse") + proto.RegisterType((*BlockMeta)(nil), "metastorepb.BlockMeta") + proto.RegisterType((*DataRef)(nil), "metastorepb.DataRef") + proto.RegisterType((*TenantStreams)(nil), "metastorepb.TenantStreams") + proto.RegisterType((*ListBlocksForQueryRequest)(nil), "metastorepb.ListBlocksForQueryRequest") + proto.RegisterType((*ListBlocksForQueryResponse)(nil), "metastorepb.ListBlocksForQueryResponse") +} + +func init() { + proto.RegisterFile("pkg/ingester-rf1/metastore/metastorepb/metastore.proto", fileDescriptor_43ce85359599db4e) +} + +var fileDescriptor_43ce85359599db4e = []byte{ + // 558 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xc1, 0x6e, 0x13, 0x3b, + 0x14, 0x1d, 0x27, 0x7d, 0x69, 0xe2, 0x2a, 0x6d, 0x65, 0x55, 0xd5, 0x34, 0x8f, 0x9a, 0x68, 0x24, + 0x20, 0x48, 0x90, 0x8a, 0x82, 0x2a, 0xb1, 0x42, 0x8d, 0x10, 0x52, 0xa5, 0x74, 0x81, 0x5b, 0xb1, + 0x8d, 0x26, 0x99, 0x3b, 0x83, 0xd5, 0x8c, 0x1d, 0x6c, 0x37, 0x0a, 0x3b, 0x3e, 0x81, 0x2f, 0x60, + 0xcd, 0x97, 0xa0, 0x2e, 0xb3, 0xec, 0x0a, 0x91, 0xc9, 0x86, 0x65, 0x3f, 0x01, 0xc5, 0x33, 0x69, + 0x32, 0x40, 0x40, 0x62, 0xe7, 0x7b, 0xce, 0x99, 0x73, 0xaf, 0xef, 0x1c, 0xe3, 0xa3, 0xc1, 0x45, + 0x74, 0xc0, 0x45, 0x04, 0xda, 0x80, 0x7a, 0xac, 0xc2, 0x27, 0x07, 0x31, 0x18, 0x5f, 0x1b, 0xa9, + 0x60, 0x71, 0x1a, 0x74, 0x17, 0xe7, 0xe6, 0x40, 0x49, 0x23, 0xc9, 0xc6, 0x12, 0x59, 0xdb, 0x89, + 0x64, 0x24, 0x2d, 0x7e, 0x30, 0x3b, 0xa5, 0x12, 0xef, 0x05, 0xde, 0x3a, 0x0e, 0x82, 0x56, 0x5f, + 0xf6, 0x2e, 0x18, 0xbc, 0xbb, 0x04, 0x6d, 0xc8, 0x23, 0xfc, 0x5f, 0x77, 0x56, 0xbb, 0xa8, 0x8e, + 0x1a, 0x1b, 0x87, 0xbb, 0xcd, 0x25, 0x97, 0xa6, 0x55, 0x9e, 0x82, 0xf1, 0x59, 0x2a, 0xf2, 0x08, + 0xde, 0x5e, 0x18, 0xe8, 0x81, 0x14, 0x1a, 0xbc, 0x4f, 0x05, 0x5c, 0xb9, 0x15, 0x92, 0x7b, 0x78, + 0x33, 0x94, 0x2a, 0xf6, 0x4d, 0x67, 0x08, 0x4a, 0x73, 0x29, 0xac, 0xf1, 0x1a, 0xab, 0xa6, 0xe8, + 0x9b, 0x14, 0x24, 0x9b, 0xb8, 0xc0, 0x03, 0xb7, 0x50, 0x47, 0x8d, 0x0a, 0x2b, 0xf0, 0x80, 0xec, + 0xe1, 0x72, 0xcc, 0x45, 0xc7, 0xf0, 0x18, 0xdc, 0x62, 0x1d, 0x35, 0x8a, 0x6c, 0x3d, 0xe6, 0xe2, + 0x9c, 0xc7, 0x60, 0x29, 0x7f, 0x94, 0x52, 0x6b, 0x19, 0xe5, 0x8f, 0x2c, 0xf5, 0x10, 0x6f, 0xf7, + 0x64, 0x3c, 0xf0, 0x7b, 0x86, 0x4b, 0xd1, 0xe9, 0xc3, 0x10, 0xfa, 0x6e, 0xa9, 0x8e, 0x1a, 0x55, + 0xb6, 0xb5, 0xc0, 0xdb, 0x33, 0x98, 0x1c, 0xe1, 0x32, 0x17, 0x01, 0x8c, 0x18, 0x84, 0xee, 0xba, + 0xbd, 0xea, 0x4e, 0xee, 0xaa, 0x2f, 0x7d, 0xe3, 0x33, 0x08, 0x5b, 0x6b, 0x57, 0x5f, 0xef, 0x3a, + 0xec, 0x56, 0x4b, 0x8e, 0xf1, 0xa6, 0x01, 0xe1, 0x0b, 0xd3, 0xd1, 0x46, 0x81, 0x1f, 0x6b, 0xb7, + 0x5c, 0x2f, 0x36, 0x36, 0x0e, 0x6b, 0xb9, 0xaf, 0xcf, 0xad, 0xe4, 0x2c, 0x55, 0xb0, 0xaa, 0x59, + 0x2e, 0xbd, 0xe7, 0x78, 0x3d, 0x73, 0x27, 0xbb, 0xb8, 0x24, 0xc3, 0x50, 0x83, 0xb1, 0x5b, 0x29, + 0xb2, 0xac, 0x9a, 0xe1, 0x7d, 0x10, 0x91, 0x79, 0x6b, 0x57, 0x52, 0x64, 0x59, 0xe5, 0x75, 0x71, + 0x35, 0x67, 0x4d, 0xfe, 0xc7, 0x95, 0x6c, 0x1c, 0x1e, 0x58, 0x8f, 0x0a, 0x2b, 0xa7, 0xc0, 0xc9, + 0x3f, 0x2e, 0xd1, 0x53, 0x78, 0xaf, 0xcd, 0xb5, 0xb1, 0xbf, 0x50, 0xbf, 0x92, 0xea, 0xf5, 0x25, + 0xa8, 0xf7, 0xf3, 0x78, 0xfc, 0xb1, 0xdf, 0x3e, 0xc6, 0xda, 0xf8, 0xca, 0xa4, 0xb6, 0xe9, 0xe4, + 0x15, 0x8b, 0xcc, 0x7b, 0x82, 0x08, 0x72, 0xe3, 0x80, 0x08, 0x6c, 0xcf, 0x36, 0xae, 0xfd, 0xae, + 0x67, 0x9a, 0x28, 0xd2, 0xc4, 0x25, 0x1b, 0x37, 0xed, 0x22, 0xbb, 0xeb, 0x55, 0xa1, 0xcc, 0x54, + 0x87, 0x5f, 0x10, 0xde, 0x3e, 0x9d, 0x2b, 0xce, 0x40, 0x0d, 0x79, 0x0f, 0xc8, 0x09, 0x2e, 0xcf, + 0xa3, 0x4a, 0xee, 0xe4, 0x0c, 0x7e, 0x7a, 0x02, 0xb5, 0xfd, 0x15, 0x6c, 0x96, 0x6f, 0x87, 0x44, + 0x98, 0xfc, 0x3a, 0x2d, 0xb9, 0x9f, 0xfb, 0x6c, 0xe5, 0x0a, 0x6b, 0x0f, 0xfe, 0xaa, 0x9b, 0x37, + 0x6a, 0x3d, 0x1b, 0x4f, 0xa8, 0x73, 0x3d, 0xa1, 0xce, 0xcd, 0x84, 0xa2, 0x0f, 0x09, 0x45, 0x9f, + 0x13, 0x8a, 0xae, 0x12, 0x8a, 0xc6, 0x09, 0x45, 0xdf, 0x12, 0x8a, 0xbe, 0x27, 0xd4, 0xb9, 0x49, + 0x28, 0xfa, 0x38, 0xa5, 0xce, 0x78, 0x4a, 0x9d, 0xeb, 0x29, 0x75, 0xba, 0x25, 0xfb, 0xb8, 0x9f, + 0xfe, 0x08, 0x00, 0x00, 0xff, 0xff, 0xfa, 0xbc, 0xd9, 0xf7, 0x39, 0x04, 0x00, 0x00, +} + +func (this *AddBlockRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*AddBlockRequest) + if !ok { + that2, ok := that.(AddBlockRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Block.Equal(that1.Block) { + return false + } + return true +} +func (this *AddBlockResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*AddBlockResponse) + if !ok { + that2, ok := that.(AddBlockResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *BlockMeta) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BlockMeta) + if !ok { + that2, ok := that.(BlockMeta) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FormatVersion != that1.FormatVersion { + return false + } + if this.Id != that1.Id { + return false + } + if this.MinTime != that1.MinTime { + return false + } + if this.MaxTime != that1.MaxTime { + return false + } + if this.CompactionLevel != that1.CompactionLevel { + return false + } + if !this.IndexRef.Equal(&that1.IndexRef) { + return false + } + if len(this.TenantStreams) != len(that1.TenantStreams) { + return false + } + for i := range this.TenantStreams { + if !this.TenantStreams[i].Equal(that1.TenantStreams[i]) { + return false + } + } + return true +} +func (this *DataRef) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DataRef) + if !ok { + that2, ok := that.(DataRef) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Offset != that1.Offset { + return false + } + if this.Length != that1.Length { + return false + } + return true +} +func (this *TenantStreams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TenantStreams) + if !ok { + that2, ok := that.(TenantStreams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TenantId != that1.TenantId { + return false + } + if this.MinTime != that1.MinTime { + return false + } + if this.MaxTime != that1.MaxTime { + return false + } + return true +} +func (this *ListBlocksForQueryRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ListBlocksForQueryRequest) + if !ok { + that2, ok := that.(ListBlocksForQueryRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TenantId != that1.TenantId { + return false + } + if this.StartTime != that1.StartTime { + return false + } + if this.EndTime != that1.EndTime { + return false + } + return true +} +func (this *ListBlocksForQueryResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ListBlocksForQueryResponse) + if !ok { + that2, ok := that.(ListBlocksForQueryResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Blocks) != len(that1.Blocks) { + return false + } + for i := range this.Blocks { + if !this.Blocks[i].Equal(that1.Blocks[i]) { + return false + } + } + return true +} +func (this *AddBlockRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&metastorepb.AddBlockRequest{") + if this.Block != nil { + s = append(s, "Block: "+fmt.Sprintf("%#v", this.Block)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *AddBlockResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&metastorepb.AddBlockResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BlockMeta) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&metastorepb.BlockMeta{") + s = append(s, "FormatVersion: "+fmt.Sprintf("%#v", this.FormatVersion)+",\n") + s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") + s = append(s, "MinTime: "+fmt.Sprintf("%#v", this.MinTime)+",\n") + s = append(s, "MaxTime: "+fmt.Sprintf("%#v", this.MaxTime)+",\n") + s = append(s, "CompactionLevel: "+fmt.Sprintf("%#v", this.CompactionLevel)+",\n") + s = append(s, "IndexRef: "+strings.Replace(this.IndexRef.GoString(), `&`, ``, 1)+",\n") + if this.TenantStreams != nil { + s = append(s, "TenantStreams: "+fmt.Sprintf("%#v", this.TenantStreams)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DataRef) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&metastorepb.DataRef{") + s = append(s, "Offset: "+fmt.Sprintf("%#v", this.Offset)+",\n") + s = append(s, "Length: "+fmt.Sprintf("%#v", this.Length)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TenantStreams) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&metastorepb.TenantStreams{") + s = append(s, "TenantId: "+fmt.Sprintf("%#v", this.TenantId)+",\n") + s = append(s, "MinTime: "+fmt.Sprintf("%#v", this.MinTime)+",\n") + s = append(s, "MaxTime: "+fmt.Sprintf("%#v", this.MaxTime)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ListBlocksForQueryRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&metastorepb.ListBlocksForQueryRequest{") + s = append(s, "TenantId: "+fmt.Sprintf("%#v", this.TenantId)+",\n") + s = append(s, "StartTime: "+fmt.Sprintf("%#v", this.StartTime)+",\n") + s = append(s, "EndTime: "+fmt.Sprintf("%#v", this.EndTime)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ListBlocksForQueryResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&metastorepb.ListBlocksForQueryResponse{") + if this.Blocks != nil { + s = append(s, "Blocks: "+fmt.Sprintf("%#v", this.Blocks)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringMetastore(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetastoreServiceClient is the client API for MetastoreService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetastoreServiceClient interface { + AddBlock(ctx context.Context, in *AddBlockRequest, opts ...grpc.CallOption) (*AddBlockResponse, error) + ListBlocksForQuery(ctx context.Context, in *ListBlocksForQueryRequest, opts ...grpc.CallOption) (*ListBlocksForQueryResponse, error) +} + +type metastoreServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetastoreServiceClient(cc *grpc.ClientConn) MetastoreServiceClient { + return &metastoreServiceClient{cc} +} + +func (c *metastoreServiceClient) AddBlock(ctx context.Context, in *AddBlockRequest, opts ...grpc.CallOption) (*AddBlockResponse, error) { + out := new(AddBlockResponse) + err := c.cc.Invoke(ctx, "/metastorepb.MetastoreService/AddBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metastoreServiceClient) ListBlocksForQuery(ctx context.Context, in *ListBlocksForQueryRequest, opts ...grpc.CallOption) (*ListBlocksForQueryResponse, error) { + out := new(ListBlocksForQueryResponse) + err := c.cc.Invoke(ctx, "/metastorepb.MetastoreService/ListBlocksForQuery", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetastoreServiceServer is the server API for MetastoreService service. +type MetastoreServiceServer interface { + AddBlock(context.Context, *AddBlockRequest) (*AddBlockResponse, error) + ListBlocksForQuery(context.Context, *ListBlocksForQueryRequest) (*ListBlocksForQueryResponse, error) +} + +// UnimplementedMetastoreServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetastoreServiceServer struct { +} + +func (*UnimplementedMetastoreServiceServer) AddBlock(ctx context.Context, req *AddBlockRequest) (*AddBlockResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddBlock not implemented") +} +func (*UnimplementedMetastoreServiceServer) ListBlocksForQuery(ctx context.Context, req *ListBlocksForQueryRequest) (*ListBlocksForQueryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListBlocksForQuery not implemented") +} + +func RegisterMetastoreServiceServer(s *grpc.Server, srv MetastoreServiceServer) { + s.RegisterService(&_MetastoreService_serviceDesc, srv) +} + +func _MetastoreService_AddBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddBlockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetastoreServiceServer).AddBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/metastorepb.MetastoreService/AddBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetastoreServiceServer).AddBlock(ctx, req.(*AddBlockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetastoreService_ListBlocksForQuery_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBlocksForQueryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetastoreServiceServer).ListBlocksForQuery(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/metastorepb.MetastoreService/ListBlocksForQuery", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetastoreServiceServer).ListBlocksForQuery(ctx, req.(*ListBlocksForQueryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetastoreService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "metastorepb.MetastoreService", + HandlerType: (*MetastoreServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AddBlock", + Handler: _MetastoreService_AddBlock_Handler, + }, + { + MethodName: "ListBlocksForQuery", + Handler: _MetastoreService_ListBlocksForQuery_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/ingester-rf1/metastore/metastorepb/metastore.proto", +} + +func (m *AddBlockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddBlockRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AddBlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Block != nil { + { + size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetastore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AddBlockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddBlockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AddBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *BlockMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TenantStreams) > 0 { + for iNdEx := len(m.TenantStreams) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TenantStreams[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetastore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + { + size, err := m.IndexRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetastore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + if m.CompactionLevel != 0 { + i = encodeVarintMetastore(dAtA, i, uint64(m.CompactionLevel)) + i-- + dAtA[i] = 0x30 + } + if m.MaxTime != 0 { + i = encodeVarintMetastore(dAtA, i, uint64(m.MaxTime)) + i-- + dAtA[i] = 0x20 + } + if m.MinTime != 0 { + i = encodeVarintMetastore(dAtA, i, uint64(m.MinTime)) + i-- + dAtA[i] = 0x18 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintMetastore(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0x12 + } + if m.FormatVersion != 0 { + i = encodeVarintMetastore(dAtA, i, uint64(m.FormatVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DataRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DataRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Length != 0 { + i = encodeVarintMetastore(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintMetastore(dAtA, i, uint64(m.Offset)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TenantStreams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TenantStreams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TenantStreams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxTime != 0 { + i = encodeVarintMetastore(dAtA, i, uint64(m.MaxTime)) + i-- + dAtA[i] = 0x20 + } + if m.MinTime != 0 { + i = encodeVarintMetastore(dAtA, i, uint64(m.MinTime)) + i-- + dAtA[i] = 0x18 + } + if len(m.TenantId) > 0 { + i -= len(m.TenantId) + copy(dAtA[i:], m.TenantId) + i = encodeVarintMetastore(dAtA, i, uint64(len(m.TenantId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListBlocksForQueryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListBlocksForQueryRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListBlocksForQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.EndTime != 0 { + i = encodeVarintMetastore(dAtA, i, uint64(m.EndTime)) + i-- + dAtA[i] = 0x18 + } + if m.StartTime != 0 { + i = encodeVarintMetastore(dAtA, i, uint64(m.StartTime)) + i-- + dAtA[i] = 0x10 + } + if len(m.TenantId) > 0 { + i -= len(m.TenantId) + copy(dAtA[i:], m.TenantId) + i = encodeVarintMetastore(dAtA, i, uint64(len(m.TenantId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListBlocksForQueryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListBlocksForQueryResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListBlocksForQueryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Blocks) > 0 { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Blocks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetastore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintMetastore(dAtA []byte, offset int, v uint64) int { + offset -= sovMetastore(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AddBlockRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Block != nil { + l = m.Block.Size() + n += 1 + l + sovMetastore(uint64(l)) + } + return n +} + +func (m *AddBlockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *BlockMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FormatVersion != 0 { + n += 1 + sovMetastore(uint64(m.FormatVersion)) + } + l = len(m.Id) + if l > 0 { + n += 1 + l + sovMetastore(uint64(l)) + } + if m.MinTime != 0 { + n += 1 + sovMetastore(uint64(m.MinTime)) + } + if m.MaxTime != 0 { + n += 1 + sovMetastore(uint64(m.MaxTime)) + } + if m.CompactionLevel != 0 { + n += 1 + sovMetastore(uint64(m.CompactionLevel)) + } + l = m.IndexRef.Size() + n += 1 + l + sovMetastore(uint64(l)) + if len(m.TenantStreams) > 0 { + for _, e := range m.TenantStreams { + l = e.Size() + n += 1 + l + sovMetastore(uint64(l)) + } + } + return n +} + +func (m *DataRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sovMetastore(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovMetastore(uint64(m.Length)) + } + return n +} + +func (m *TenantStreams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TenantId) + if l > 0 { + n += 1 + l + sovMetastore(uint64(l)) + } + if m.MinTime != 0 { + n += 1 + sovMetastore(uint64(m.MinTime)) + } + if m.MaxTime != 0 { + n += 1 + sovMetastore(uint64(m.MaxTime)) + } + return n +} + +func (m *ListBlocksForQueryRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TenantId) + if l > 0 { + n += 1 + l + sovMetastore(uint64(l)) + } + if m.StartTime != 0 { + n += 1 + sovMetastore(uint64(m.StartTime)) + } + if m.EndTime != 0 { + n += 1 + sovMetastore(uint64(m.EndTime)) + } + return n +} + +func (m *ListBlocksForQueryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Blocks) > 0 { + for _, e := range m.Blocks { + l = e.Size() + n += 1 + l + sovMetastore(uint64(l)) + } + } + return n +} + +func sovMetastore(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetastore(x uint64) (n int) { + return sovMetastore(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AddBlockRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AddBlockRequest{`, + `Block:` + strings.Replace(this.Block.String(), "BlockMeta", "BlockMeta", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AddBlockResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AddBlockResponse{`, + `}`, + }, "") + return s +} +func (this *BlockMeta) String() string { + if this == nil { + return "nil" + } + repeatedStringForTenantStreams := "[]*TenantStreams{" + for _, f := range this.TenantStreams { + repeatedStringForTenantStreams += strings.Replace(f.String(), "TenantStreams", "TenantStreams", 1) + "," + } + repeatedStringForTenantStreams += "}" + s := strings.Join([]string{`&BlockMeta{`, + `FormatVersion:` + fmt.Sprintf("%v", this.FormatVersion) + `,`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `MinTime:` + fmt.Sprintf("%v", this.MinTime) + `,`, + `MaxTime:` + fmt.Sprintf("%v", this.MaxTime) + `,`, + `CompactionLevel:` + fmt.Sprintf("%v", this.CompactionLevel) + `,`, + `IndexRef:` + strings.Replace(strings.Replace(this.IndexRef.String(), "DataRef", "DataRef", 1), `&`, ``, 1) + `,`, + `TenantStreams:` + repeatedStringForTenantStreams + `,`, + `}`, + }, "") + return s +} +func (this *DataRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DataRef{`, + `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, + `Length:` + fmt.Sprintf("%v", this.Length) + `,`, + `}`, + }, "") + return s +} +func (this *TenantStreams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TenantStreams{`, + `TenantId:` + fmt.Sprintf("%v", this.TenantId) + `,`, + `MinTime:` + fmt.Sprintf("%v", this.MinTime) + `,`, + `MaxTime:` + fmt.Sprintf("%v", this.MaxTime) + `,`, + `}`, + }, "") + return s +} +func (this *ListBlocksForQueryRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListBlocksForQueryRequest{`, + `TenantId:` + fmt.Sprintf("%v", this.TenantId) + `,`, + `StartTime:` + fmt.Sprintf("%v", this.StartTime) + `,`, + `EndTime:` + fmt.Sprintf("%v", this.EndTime) + `,`, + `}`, + }, "") + return s +} +func (this *ListBlocksForQueryResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForBlocks := "[]*BlockMeta{" + for _, f := range this.Blocks { + repeatedStringForBlocks += strings.Replace(f.String(), "BlockMeta", "BlockMeta", 1) + "," + } + repeatedStringForBlocks += "}" + s := strings.Join([]string{`&ListBlocksForQueryResponse{`, + `Blocks:` + repeatedStringForBlocks + `,`, + `}`, + }, "") + return s +} +func valueToStringMetastore(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AddBlockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddBlockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddBlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetastore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetastore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Block == nil { + m.Block = &BlockMeta{} + } + if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetastore(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddBlockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddBlockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddBlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMetastore(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType) + } + m.FormatVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FormatVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetastore + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetastore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType) + } + m.MinTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType) + } + m.MaxTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactionLevel", wireType) + } + m.CompactionLevel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactionLevel |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IndexRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetastore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetastore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.IndexRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantStreams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetastore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetastore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantStreams = append(m.TenantStreams, &TenantStreams{}) + if err := m.TenantStreams[len(m.TenantStreams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetastore(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DataRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DataRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DataRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetastore(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TenantStreams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TenantStreams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TenantStreams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetastore + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetastore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType) + } + m.MinTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType) + } + m.MaxTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetastore(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListBlocksForQueryRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListBlocksForQueryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListBlocksForQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetastore + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetastore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + m.StartTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTime", wireType) + } + m.EndTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetastore(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListBlocksForQueryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListBlocksForQueryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListBlocksForQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetastore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetastore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetastore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, &BlockMeta{}) + if err := m.Blocks[len(m.Blocks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetastore(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetastore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetastore(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetastore + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetastore + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetastore + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetastore + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthMetastore + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetastore + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMetastore(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthMetastore + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMetastore = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetastore = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/ingester-rf1/metastore/metastorepb/metastore.proto b/pkg/ingester-rf1/metastore/metastorepb/metastore.proto new file mode 100644 index 0000000000000..bf8573f23d711 --- /dev/null +++ b/pkg/ingester-rf1/metastore/metastorepb/metastore.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +package metastorepb; + +import "gogoproto/gogo.proto"; + +service MetastoreService { + rpc AddBlock(AddBlockRequest) returns (AddBlockResponse) {} + rpc ListBlocksForQuery(ListBlocksForQueryRequest) returns (ListBlocksForQueryResponse) {} +} + +message AddBlockRequest { + BlockMeta block = 1; +} + +message AddBlockResponse {} + +message BlockMeta { + uint64 format_version = 1; + string id = 2; + int64 min_time = 3; + int64 max_time = 4; + uint32 compaction_level = 6; + DataRef indexRef = 7 [(gogoproto.nullable) = false]; + + repeated TenantStreams tenant_streams = 8; +} + +message DataRef { + int64 offset = 1; + int64 length = 2; +} + +// TenantStreams object points to the offset in the block at which +// the tenant streams data is located. +message TenantStreams { + string tenant_id = 1; + int64 min_time = 3; + int64 max_time = 4; + //todo offset in the block. +} + +message ListBlocksForQueryRequest { + string tenant_id = 1; + int64 start_time = 2; + int64 end_time = 3; +} + +message ListBlocksForQueryResponse { + repeated BlockMeta blocks = 1; +} diff --git a/pkg/ingester-rf1/metastore/raftleader/raftleader.go b/pkg/ingester-rf1/metastore/raftleader/raftleader.go new file mode 100644 index 0000000000000..dfef0811a8ce2 --- /dev/null +++ b/pkg/ingester-rf1/metastore/raftleader/raftleader.go @@ -0,0 +1,109 @@ +package raftleader + +import ( + "sync" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/hashicorp/raft" + "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/health" +) + +type HealthObserver struct { + server health.Service + logger log.Logger + mu sync.Mutex + registered map[serviceKey]*raftService +} + +func NewRaftLeaderHealthObserver(hs health.Service, logger log.Logger) *HealthObserver { + return &HealthObserver{ + server: hs, + logger: logger, + registered: make(map[serviceKey]*raftService), + } +} + +func (hs *HealthObserver) Register(r *raft.Raft, service string) { + hs.mu.Lock() + defer hs.mu.Unlock() + k := serviceKey{raft: r, service: service} + if _, ok := hs.registered[k]; ok { + return + } + svc := &raftService{ + server: hs.server, + logger: log.With(hs.logger, "service", service), + service: service, + raft: r, + c: make(chan raft.Observation, 1), + stop: make(chan struct{}), + done: make(chan struct{}), + } + _ = level.Debug(svc.logger).Log("msg", "registering health check") + svc.updateStatus() + go svc.run() + svc.observer = raft.NewObserver(svc.c, true, func(o *raft.Observation) bool { + _, ok := o.Data.(raft.LeaderObservation) + return ok + }) + r.RegisterObserver(svc.observer) + hs.registered[k] = svc +} + +func (hs *HealthObserver) Deregister(r *raft.Raft, service string) { + hs.mu.Lock() + k := serviceKey{raft: r, service: service} + svc, ok := hs.registered[k] + delete(hs.registered, k) + hs.mu.Unlock() + if ok { + close(svc.stop) + <-svc.done + } +} + +type serviceKey struct { + raft *raft.Raft + service string +} + +type raftService struct { + server health.Service + logger log.Logger + service string + raft *raft.Raft + observer *raft.Observer + c chan raft.Observation + stop chan struct{} + done chan struct{} +} + +func (svc *raftService) run() { + defer func() { + close(svc.done) + }() + for { + select { + case <-svc.c: + svc.updateStatus() + case <-svc.stop: + _ = level.Debug(svc.logger).Log("msg", "deregistering health check") + // We explicitly remove the service from serving when we stop observing it. + svc.server.SetServingStatus(svc.service, grpc_health_v1.HealthCheckResponse_NOT_SERVING) + svc.raft.DeregisterObserver(svc.observer) + return + } + } +} + +func (svc *raftService) updateStatus() { + status := grpc_health_v1.HealthCheckResponse_NOT_SERVING + if svc.raft.State() == raft.Leader { + status = grpc_health_v1.HealthCheckResponse_SERVING + } + _ = level.Info(svc.logger).Log("msg", "updating health status", "status", status) + svc.server.SetServingStatus(svc.service, status) +} diff --git a/pkg/ingester-rf1/metastore/raftlogpb/raflog.pb.go b/pkg/ingester-rf1/metastore/raftlogpb/raflog.pb.go new file mode 100644 index 0000000000000..f4aebccc271a0 --- /dev/null +++ b/pkg/ingester-rf1/metastore/raftlogpb/raflog.pb.go @@ -0,0 +1,693 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/ingester-rf1/metastore/raftlogpb/raflog.proto + +package raftlogpb + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type CommandType int32 + +const ( + COMMAND_TYPE_UNKNOWN CommandType = 0 + COMMAND_TYPE_ADD_BLOCK CommandType = 1 + // This is a temporary solution. + COMMAND_TYPE_TRUNCATE CommandType = 4196 +) + +var CommandType_name = map[int32]string{ + 0: "COMMAND_TYPE_UNKNOWN", + 1: "COMMAND_TYPE_ADD_BLOCK", + 4196: "COMMAND_TYPE_TRUNCATE", +} + +var CommandType_value = map[string]int32{ + "COMMAND_TYPE_UNKNOWN": 0, + "COMMAND_TYPE_ADD_BLOCK": 1, + "COMMAND_TYPE_TRUNCATE": 4196, +} + +func (CommandType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd4a889a2dc1d0a7, []int{0} +} + +type RaftLogEntry struct { + Type CommandType `protobuf:"varint,1,opt,name=type,proto3,enum=raftlogpb.CommandType" json:"type,omitempty"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (m *RaftLogEntry) Reset() { *m = RaftLogEntry{} } +func (*RaftLogEntry) ProtoMessage() {} +func (*RaftLogEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_dd4a889a2dc1d0a7, []int{0} +} +func (m *RaftLogEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RaftLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RaftLogEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RaftLogEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_RaftLogEntry.Merge(m, src) +} +func (m *RaftLogEntry) XXX_Size() int { + return m.Size() +} +func (m *RaftLogEntry) XXX_DiscardUnknown() { + xxx_messageInfo_RaftLogEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_RaftLogEntry proto.InternalMessageInfo + +func (m *RaftLogEntry) GetType() CommandType { + if m != nil { + return m.Type + } + return COMMAND_TYPE_UNKNOWN +} + +func (m *RaftLogEntry) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +type TruncateCommand struct { + Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *TruncateCommand) Reset() { *m = TruncateCommand{} } +func (*TruncateCommand) ProtoMessage() {} +func (*TruncateCommand) Descriptor() ([]byte, []int) { + return fileDescriptor_dd4a889a2dc1d0a7, []int{1} +} +func (m *TruncateCommand) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TruncateCommand) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TruncateCommand.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TruncateCommand) XXX_Merge(src proto.Message) { + xxx_messageInfo_TruncateCommand.Merge(m, src) +} +func (m *TruncateCommand) XXX_Size() int { + return m.Size() +} +func (m *TruncateCommand) XXX_DiscardUnknown() { + xxx_messageInfo_TruncateCommand.DiscardUnknown(m) +} + +var xxx_messageInfo_TruncateCommand proto.InternalMessageInfo + +func (m *TruncateCommand) GetTimestamp() uint64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func init() { + proto.RegisterEnum("raftlogpb.CommandType", CommandType_name, CommandType_value) + proto.RegisterType((*RaftLogEntry)(nil), "raftlogpb.RaftLogEntry") + proto.RegisterType((*TruncateCommand)(nil), "raftlogpb.TruncateCommand") +} + +func init() { + proto.RegisterFile("pkg/ingester-rf1/metastore/raftlogpb/raflog.proto", fileDescriptor_dd4a889a2dc1d0a7) +} + +var fileDescriptor_dd4a889a2dc1d0a7 = []byte{ + // 306 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xc1, 0x4a, 0x02, 0x41, + 0x18, 0xc7, 0x67, 0x42, 0x0a, 0x27, 0x29, 0x19, 0x4a, 0x16, 0x89, 0x0f, 0xf1, 0x24, 0x42, 0x2e, + 0x56, 0x2f, 0xa0, 0xab, 0x27, 0x75, 0x8d, 0x65, 0x24, 0x3a, 0x2d, 0x63, 0x8e, 0x8b, 0xe4, 0xee, + 0x0c, 0xe3, 0x74, 0xd8, 0x5b, 0x8f, 0xd0, 0x63, 0xf4, 0x28, 0x1d, 0x3d, 0x7a, 0xcc, 0xb1, 0x43, + 0x47, 0x1f, 0x21, 0x5a, 0xca, 0xf2, 0xf6, 0x7d, 0xff, 0xff, 0xef, 0xfb, 0x1d, 0x3e, 0xd2, 0x54, + 0x8f, 0x91, 0x3b, 0x4b, 0x22, 0xb1, 0x30, 0x42, 0x5f, 0xea, 0x69, 0xd3, 0x8d, 0x85, 0xe1, 0x0b, + 0x23, 0xb5, 0x70, 0x35, 0x9f, 0x9a, 0xb9, 0x8c, 0xd4, 0xf8, 0x7b, 0x9a, 0xcb, 0xa8, 0xa1, 0xb4, + 0x34, 0x92, 0xe6, 0x77, 0x79, 0x95, 0x91, 0x42, 0xc0, 0xa7, 0xa6, 0x2f, 0xa3, 0x6e, 0x62, 0x74, + 0x4a, 0xeb, 0x24, 0x67, 0x52, 0x25, 0x1c, 0x5c, 0xc1, 0xb5, 0x93, 0xab, 0x52, 0x63, 0x47, 0x36, + 0x3c, 0x19, 0xc7, 0x3c, 0x99, 0xb0, 0x54, 0x89, 0x20, 0x63, 0xa8, 0x43, 0x8e, 0x14, 0x4f, 0xe7, + 0x92, 0x4f, 0x9c, 0x83, 0x0a, 0xae, 0x15, 0x82, 0xdf, 0xb5, 0xea, 0x92, 0x53, 0xa6, 0x9f, 0x92, + 0x07, 0x6e, 0xc4, 0xcf, 0x19, 0xbd, 0x20, 0x79, 0x33, 0x8b, 0xc5, 0xc2, 0xf0, 0x58, 0x65, 0xf6, + 0x5c, 0xf0, 0x17, 0xd4, 0x43, 0x72, 0xfc, 0xcf, 0x4f, 0x1d, 0x72, 0xe6, 0x0d, 0x07, 0x83, 0x96, + 0xdf, 0x09, 0xd9, 0xfd, 0x6d, 0x37, 0x1c, 0xf9, 0x3d, 0x7f, 0x78, 0xe7, 0x17, 0x11, 0x2d, 0x93, + 0xd2, 0x5e, 0xd3, 0xea, 0x74, 0xc2, 0x76, 0x7f, 0xe8, 0xf5, 0x8a, 0x98, 0x96, 0xc9, 0xf9, 0x5e, + 0xc7, 0x82, 0x91, 0xef, 0xb5, 0x58, 0xb7, 0xf8, 0x51, 0x69, 0xdf, 0x2c, 0xd7, 0x80, 0x56, 0x6b, + 0x40, 0xdb, 0x35, 0xe0, 0x67, 0x0b, 0xf8, 0xd5, 0x02, 0x7e, 0xb3, 0x80, 0x97, 0x16, 0xf0, 0xbb, + 0x05, 0xfc, 0x69, 0x01, 0x6d, 0x2d, 0xe0, 0x97, 0x0d, 0xa0, 0xe5, 0x06, 0xd0, 0x6a, 0x03, 0x68, + 0x7c, 0x98, 0xfd, 0xeb, 0xfa, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x48, 0x2c, 0x6d, 0x12, 0x64, 0x01, + 0x00, 0x00, +} + +func (x CommandType) String() string { + s, ok := CommandType_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *RaftLogEntry) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RaftLogEntry) + if !ok { + that2, ok := that.(RaftLogEntry) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !bytes.Equal(this.Payload, that1.Payload) { + return false + } + return true +} +func (this *TruncateCommand) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TruncateCommand) + if !ok { + that2, ok := that.(TruncateCommand) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Timestamp != that1.Timestamp { + return false + } + return true +} +func (this *RaftLogEntry) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&raftlogpb.RaftLogEntry{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Payload: "+fmt.Sprintf("%#v", this.Payload)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TruncateCommand) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&raftlogpb.TruncateCommand{") + s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringRaflog(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *RaftLogEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftLogEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RaftLogEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintRaflog(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintRaflog(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TruncateCommand) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TruncateCommand) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TruncateCommand) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintRaflog(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintRaflog(dAtA []byte, offset int, v uint64) int { + offset -= sovRaflog(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RaftLogEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovRaflog(uint64(m.Type)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovRaflog(uint64(l)) + } + return n +} + +func (m *TruncateCommand) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovRaflog(uint64(m.Timestamp)) + } + return n +} + +func sovRaflog(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRaflog(x uint64) (n int) { + return sovRaflog(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RaftLogEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftLogEntry{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Payload:` + fmt.Sprintf("%v", this.Payload) + `,`, + `}`, + }, "") + return s +} +func (this *TruncateCommand) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TruncateCommand{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `}`, + }, "") + return s +} +func valueToStringRaflog(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RaftLogEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaflog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftLogEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftLogEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaflog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= CommandType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaflog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaflog + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthRaflog + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) + if m.Payload == nil { + m.Payload = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaflog(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaflog + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRaflog + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TruncateCommand) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaflog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TruncateCommand: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TruncateCommand: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaflog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaflog(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaflog + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRaflog + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaflog(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaflog + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaflog + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaflog + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRaflog + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthRaflog + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaflog + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaflog(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthRaflog + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaflog = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaflog = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/ingester-rf1/metastore/raftlogpb/raflog.proto b/pkg/ingester-rf1/metastore/raftlogpb/raflog.proto new file mode 100644 index 0000000000000..651111fd0bb71 --- /dev/null +++ b/pkg/ingester-rf1/metastore/raftlogpb/raflog.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package raftlogpb; + +message RaftLogEntry { + CommandType type = 1; + bytes payload = 2; +} + +enum CommandType { + COMMAND_TYPE_UNKNOWN = 0; + COMMAND_TYPE_ADD_BLOCK = 1; + + // This is a temporary solution. + COMMAND_TYPE_TRUNCATE = 4196; +} + +message TruncateCommand { + uint64 timestamp = 1; +} diff --git a/pkg/ingester-rf1/metastore/util.go b/pkg/ingester-rf1/metastore/util.go new file mode 100644 index 0000000000000..50aa87f2fdf6d --- /dev/null +++ b/pkg/ingester-rf1/metastore/util.go @@ -0,0 +1,17 @@ +package metastore + +import ( + "fmt" + "os" + "runtime" +) + +const maxStacksize = 8 * 1024 + +func panicError(p interface{}) error { + stack := make([]byte, maxStacksize) + stack = stack[:runtime.Stack(stack, true)] + // keep a multiline stack + fmt.Fprintf(os.Stderr, "panic: %v\n%s", p, stack) + return fmt.Errorf("%v", p) +} diff --git a/pkg/ingester-rf1/stream.go b/pkg/ingester-rf1/stream.go index f0c556e341088..32ccf454c41c4 100644 --- a/pkg/ingester-rf1/stream.go +++ b/pkg/ingester-rf1/stream.go @@ -3,13 +3,13 @@ package ingesterrf1 import ( "bytes" "context" + "errors" "fmt" "net/http" "time" "github.com/grafana/dskit/httpgrpc" "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -133,7 +133,7 @@ func (s *stream) Push( return 0, nil, errorForFailedEntries(s, invalid, len(entries)) } - bytesAdded, res, err := s.storeEntries(ctx, wal, toStore, usageTracker) + bytesAdded, res, err := s.storeEntries(ctx, wal, toStore) if err != nil { return 0, nil, err } @@ -189,13 +189,11 @@ func hasRateLimitErr(errs []entryWithError) bool { return ok } -func (s *stream) storeEntries(ctx context.Context, w *wal.Manager, entries []*logproto.Entry, usageTracker push.UsageTracker) (int, *wal.AppendResult, error) { - if sp := opentracing.SpanFromContext(ctx); sp != nil { - sp.LogKV("event", "stream started to store entries", "labels", s.labelsString) - defer sp.LogKV("event", "stream finished to store entries") - } +func (s *stream) storeEntries(ctx context.Context, w *wal.Manager, entries []*logproto.Entry) (int, *wal.AppendResult, error) { + sp, _ := opentracing.StartSpanFromContext(ctx, "storeEntries") + defer sp.Finish() - var bytesAdded, outOfOrderSamples, outOfOrderBytes int + var bytesAdded int for i := 0; i < len(entries); i++ { s.entryCt++ @@ -211,17 +209,18 @@ func (s *stream) storeEntries(ctx context.Context, w *wal.Manager, entries []*lo res, err := w.Append(wal.AppendRequest{ TenantID: s.tenant, Labels: s.labels, - LabelsStr: s.labels.String(), + LabelsStr: s.labelsString, Entries: entries, }) if err != nil { return 0, nil, err } - s.reportMetrics(ctx, outOfOrderSamples, outOfOrderBytes, 0, 0, usageTracker) return bytesAdded, res, nil } func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry, rateLimitWholeStream bool, usageTracker push.UsageTracker) ([]*logproto.Entry, []entryWithError) { + sp, ctx := opentracing.StartSpanFromContext(ctx, "validateEntries") + defer sp.Finish() var ( outOfOrderSamples, outOfOrderBytes int rateLimitedSamples, rateLimitedBytes int diff --git a/pkg/ingester/checkpoint.go b/pkg/ingester/checkpoint.go index 9de54888f4748..1f6dcf8eefeee 100644 --- a/pkg/ingester/checkpoint.go +++ b/pkg/ingester/checkpoint.go @@ -3,6 +3,7 @@ package ingester import ( "bytes" "context" + "errors" "fmt" "os" "path/filepath" @@ -14,7 +15,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/wlog" @@ -125,7 +125,7 @@ func decodeCheckpointRecord(rec []byte, s *Series) error { case wal.CheckpointRecord: return proto.Unmarshal(cpy[1:], s) default: - return errors.Errorf("unexpected record type: %d", rec[0]) + return fmt.Errorf("unexpected record type: %d", rec[0]) } } @@ -345,12 +345,12 @@ func (w *WALCheckpointWriter) Advance() (bool, error) { } if err := os.MkdirAll(checkpointDirTemp, 0777); err != nil { - return false, errors.Wrap(err, "create checkpoint dir") + return false, fmt.Errorf("create checkpoint dir: %w", err) } checkpoint, err := wlog.NewSize(log.With(util_log.Logger, "component", "checkpoint_wal"), nil, checkpointDirTemp, walSegmentSize, wlog.CompressionNone) if err != nil { - return false, errors.Wrap(err, "open checkpoint") + return false, fmt.Errorf("open checkpoint: %w", err) } w.checkpointWAL = checkpoint @@ -493,7 +493,7 @@ func (w *WALCheckpointWriter) Close(abort bool) error { } if err := fileutil.Replace(w.checkpointWAL.Dir(), w.final); err != nil { - return errors.Wrap(err, "rename checkpoint directory") + return fmt.Errorf("rename checkpoint directory: %w", err) } level.Info(util_log.Logger).Log("msg", "atomic checkpoint finished", "old", w.checkpointWAL.Dir(), "new", w.final) // We delete the WAL segments which are before the previous checkpoint and not before the diff --git a/pkg/ingester/index/multi.go b/pkg/ingester/index/multi.go index 416876b1dcb25..293be18016b16 100644 --- a/pkg/ingester/index/multi.go +++ b/pkg/ingester/index/multi.go @@ -1,9 +1,9 @@ package index import ( + "fmt" "time" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -39,7 +39,7 @@ func NewMultiInvertedIndex(periods []config.PeriodConfig, indexShards uint32) (* if bitPrefixed == nil { bitPrefixed, err = NewBitPrefixWithShards(indexShards) if err != nil { - return nil, errors.Wrapf(err, "creating tsdb inverted index for period starting %v", pd.From) + return nil, fmt.Errorf("creating tsdb inverted index for period starting %v: %w", pd.From, err) } } periodIndices = append(periodIndices, periodIndex{ diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index af7f1fde288c9..da0021c11f3fd 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -2,6 +2,7 @@ package ingester import ( "context" + "errors" "flag" "fmt" "math/rand" @@ -29,7 +30,6 @@ import ( "github.com/grafana/dskit/services" "github.com/grafana/dskit/tenant" "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -543,7 +543,7 @@ func (i *Ingester) starting(ctx context.Context) error { shutdownMarkerPath := path.Join(i.cfg.ShutdownMarkerPath, shutdownMarkerFilename) shutdownMarker, err := shutdownMarkerExists(shutdownMarkerPath) if err != nil { - return errors.Wrap(err, "failed to check ingester shutdown marker") + return fmt.Errorf("failed to check ingester shutdown marker: %w", err) } if shutdownMarker { diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index 7391a8af71916..aeddd5e9f0f13 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -2,6 +2,7 @@ package ingester import ( "context" + "errors" "fmt" "math" "net/http" @@ -15,7 +16,6 @@ import ( "github.com/grafana/dskit/ring" "github.com/grafana/dskit/tenant" "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go index 7a2647a3bb015..6867b0e1519c2 100644 --- a/pkg/ingester/instance_test.go +++ b/pkg/ingester/instance_test.go @@ -20,7 +20,6 @@ import ( "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/flagext" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" @@ -53,7 +52,7 @@ func defaultConfig() *Config { OwnedStreamsCheckInterval: 1 * time.Second, } if err := cfg.Validate(); err != nil { - panic(errors.Wrap(err, "error building default test config")) + panic(fmt.Errorf("error building default test config: %w", err)) } return &cfg } diff --git a/pkg/ingester/recovery.go b/pkg/ingester/recovery.go index e8b1c244871bb..9ea4306bb5369 100644 --- a/pkg/ingester/recovery.go +++ b/pkg/ingester/recovery.go @@ -1,13 +1,14 @@ package ingester import ( + "errors" + "fmt" "io" "runtime" "sync" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" "github.com/prometheus/prometheus/tsdb/wlog" @@ -159,12 +160,12 @@ func (r *ingesterRecoverer) Push(userID string, entries wal.RefEntries) error { return r.ing.replayController.WithBackPressure(func() error { out, ok := r.users.Load(userID) if !ok { - return errors.Errorf("user (%s) not set during WAL replay", userID) + return fmt.Errorf("user (%s) not set during WAL replay", userID) } s, ok := out.(*sync.Map).Load(entries.Ref) if !ok { - return errors.Errorf("stream (%d) not set during WAL replay for user (%s)", entries.Ref, userID) + return fmt.Errorf("stream (%d) not set during WAL replay for user (%s)", entries.Ref, userID) } // ignore out of order errors here (it's possible for a checkpoint to already have data from the wal segments) @@ -273,7 +274,7 @@ func RecoverWAL(ctx context.Context, reader WALReader, recoverer Recoverer) erro entries, ok := next.data.(wal.RefEntries) var err error if !ok { - err = errors.Errorf("unexpected type (%T) when recovering WAL, expecting (%T)", next.data, entries) + err = fmt.Errorf("unexpected type (%T) when recovering WAL, expecting (%T)", next.data, entries) } if err == nil { err = recoverer.Push(next.userID, entries) @@ -323,7 +324,7 @@ func RecoverCheckpoint(reader WALReader, recoverer Recoverer) error { series, ok := next.data.(*Series) var err error if !ok { - err = errors.Errorf("unexpected type (%T) when recovering WAL, expecting (%T)", next.data, series) + err = fmt.Errorf("unexpected type (%T) when recovering WAL, expecting (%T)", next.data, series) } if err == nil { err = recoverer.Series(series) diff --git a/pkg/ingester/recovery_test.go b/pkg/ingester/recovery_test.go index 4c5a4ce815d8d..97c55d3da9f8f 100644 --- a/pkg/ingester/recovery_test.go +++ b/pkg/ingester/recovery_test.go @@ -11,7 +11,6 @@ import ( "github.com/go-kit/log" "github.com/grafana/dskit/user" - "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" @@ -142,7 +141,7 @@ func (r *MemRecoverer) SetStream(_ context.Context, userID string, series record } if _, exists := user[series.Ref]; exists { - return errors.Errorf("stream (%d) already exists for user (%s)", series.Ref, userID) + return fmt.Errorf("stream (%d) already exists for user (%s)", series.Ref, userID) } user[series.Ref] = make([]logproto.Entry, 0) @@ -156,12 +155,12 @@ func (r *MemRecoverer) Push(userID string, entries wal.RefEntries) error { user, ok := r.users[userID] if !ok { - return errors.Errorf("unexpected user access (%s)", userID) + return fmt.Errorf("unexpected user access (%s)", userID) } stream, ok := user[entries.Ref] if !ok { - return errors.Errorf("unexpected stream access") + return fmt.Errorf("unexpected stream access") } r.seriesCt += len(entries.Entries) diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go index 7d37859b1541f..0754de9caf0fa 100644 --- a/pkg/ingester/stream.go +++ b/pkg/ingester/stream.go @@ -3,6 +3,7 @@ package ingester import ( "bytes" "context" + "errors" "fmt" "net/http" "sync" @@ -13,7 +14,6 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/httpgrpc" "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" diff --git a/pkg/ingester/wal.go b/pkg/ingester/wal.go index 5a32aee050325..a87b2b995d79c 100644 --- a/pkg/ingester/wal.go +++ b/pkg/ingester/wal.go @@ -2,11 +2,11 @@ package ingester import ( "flag" + "fmt" "sync" "time" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/tsdb/wlog" @@ -33,7 +33,7 @@ type WALConfig struct { func (cfg *WALConfig) Validate() error { if cfg.Enabled && cfg.CheckpointDuration < 1 { - return errors.Errorf("invalid checkpoint duration: %v", cfg.CheckpointDuration) + return fmt.Errorf("invalid checkpoint duration: %v", cfg.CheckpointDuration) } return nil } diff --git a/pkg/ingester/wal/encoding.go b/pkg/ingester/wal/encoding.go index a21ce57bf34b1..f59795c8a9fea 100644 --- a/pkg/ingester/wal/encoding.go +++ b/pkg/ingester/wal/encoding.go @@ -1,9 +1,10 @@ package wal import ( + "errors" + "fmt" "time" - "github.com/pkg/errors" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/record" @@ -198,18 +199,18 @@ func DecodeEntries(b []byte, version RecordType, rec *Record) error { } if dec.Err() != nil { - return errors.Wrapf(dec.Err(), "entry decode error after %d RefEntries", nEntries-rem) + return fmt.Errorf("entry decode error after %d RefEntries: %w", nEntries-rem, dec.Err()) } rec.RefEntries = append(rec.RefEntries, refEntries) } if dec.Err() != nil { - return errors.Wrap(dec.Err(), "refEntry decode error") + return fmt.Errorf("refEntry decode error: %w", dec.Err()) } if len(dec.B) > 0 { - return errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return nil } diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go index e5e80b4d0c172..2e7b8a2e0e19d 100644 --- a/pkg/logql/syntax/ast.go +++ b/pkg/logql/syntax/ast.go @@ -318,9 +318,14 @@ func (e *PipelineExpr) Pipeline() (log.Pipeline, error) { // HasFilter returns true if the pipeline contains stage that can filter out lines. func (e *PipelineExpr) HasFilter() bool { for _, p := range e.MultiStages { - switch p.(type) { - case *LineFilterExpr, *LabelFilterExpr: + switch v := p.(type) { + case *LabelFilterExpr: return true + case *LineFilterExpr: + // ignore empty matchers as they match everything + if !((v.Ty == log.LineMatchEqual || v.Ty == log.LineMatchRegexp) && v.Match == "") { + return true + } default: continue } diff --git a/pkg/logql/syntax/ast_test.go b/pkg/logql/syntax/ast_test.go index d75ff2d0261b6..cb36cb2e7cd8e 100644 --- a/pkg/logql/syntax/ast_test.go +++ b/pkg/logql/syntax/ast_test.go @@ -1039,6 +1039,23 @@ func TestParseLargeQuery(t *testing.T) { require.NoError(t, err) } +func TestLogSelectorExprHasFilter(t *testing.T) { + for query, hasFilter := range map[string]bool{ + `{foo="bar"} |= ""`: false, + `{foo="bar"} |= "" |= ""`: false, + `{foo="bar"} |~ ""`: false, + `{foo="bar"} |= "notempty"`: true, + `{foo="bar"} |= "" |= "notempty"`: true, + `{foo="bar"} != ""`: true, + `{foo="bar"} | lbl="notempty"`: true, + `{foo="bar"} |= "" | lbl="notempty"`: true, + } { + expr, err := ParseExpr(query) + require.NoError(t, err) + require.Equal(t, hasFilter, expr.(LogSelectorExpr).HasFilter()) + } +} + func TestGroupingString(t *testing.T) { g := Grouping{ Groups: []string{"a", "b"}, diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 2e7e010b99801..3b08c0beb0a86 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -11,8 +11,6 @@ import ( rt "runtime" "time" - ingester_rf1 "github.com/grafana/loki/v3/pkg/ingester-rf1" - "go.uber.org/atomic" "github.com/fatih/color" @@ -42,6 +40,10 @@ import ( "github.com/grafana/loki/v3/pkg/distributor" "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/ingester" + ingester_rf1 "github.com/grafana/loki/v3/pkg/ingester-rf1" + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore" + metastoreclient "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/client" + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/health" ingester_client "github.com/grafana/loki/v3/pkg/ingester/client" "github.com/grafana/loki/v3/pkg/loghttp/push" "github.com/grafana/loki/v3/pkg/loki/common" @@ -49,6 +51,7 @@ import ( "github.com/grafana/loki/v3/pkg/lokifrontend/frontend/transport" "github.com/grafana/loki/v3/pkg/pattern" "github.com/grafana/loki/v3/pkg/querier" + querierrf1 "github.com/grafana/loki/v3/pkg/querier-rf1" "github.com/grafana/loki/v3/pkg/querier/queryrange" "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/v3/pkg/querier/worker" @@ -84,6 +87,7 @@ type Config struct { InternalServer internalserver.Config `yaml:"internal_server,omitempty" doc:"hidden"` Distributor distributor.Config `yaml:"distributor,omitempty"` Querier querier.Config `yaml:"querier,omitempty"` + QuerierRF1 querierrf1.Config `yaml:"querier_rf1,omitempty"` QueryScheduler scheduler.Config `yaml:"query_scheduler"` Frontend lokifrontend.Config `yaml:"frontend,omitempty"` QueryRange queryrange.Config `yaml:"query_range,omitempty"` @@ -107,6 +111,8 @@ type Config struct { Worker worker.Config `yaml:"frontend_worker,omitempty"` TableManager index.TableManagerConfig `yaml:"table_manager,omitempty"` MemberlistKV memberlist.KVConfig `yaml:"memberlist"` + Metastore metastore.Config `yaml:"metastore,omitempty"` + MetastoreClient metastoreclient.Config `yaml:"metastore_client"` RuntimeConfig runtimeconfig.Config `yaml:"runtime_config,omitempty"` OperationalConfig runtime.Config `yaml:"operational_config,omitempty"` @@ -160,10 +166,11 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Common.RegisterFlags(f) c.Distributor.RegisterFlags(f) c.Querier.RegisterFlags(f) + c.QuerierRF1.RegisterFlags(f) c.CompactorHTTPClient.RegisterFlags(f) c.CompactorGRPCClient.RegisterFlags(f) c.IngesterClient.RegisterFlags(f) - //c.IngesterRF1Client.RegisterFlags(f) + // c.IngesterRF1Client.RegisterFlags(f) c.Ingester.RegisterFlags(f) c.IngesterRF1.RegisterFlags(f) c.StorageConfig.RegisterFlags(f) @@ -187,6 +194,8 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Analytics.RegisterFlags(f) c.OperationalConfig.RegisterFlags(f) c.Profiling.RegisterFlags(f) + c.Metastore.RegisterFlags(f) + c.MetastoreClient.RegisterFlags(f) } func (c *Config) registerServerFlagsWithChangedDefaultValues(fs *flag.FlagSet) { @@ -368,6 +377,7 @@ type Loki struct { indexGatewayRingManager *lokiring.RingManager bloomCompactorRingManager *lokiring.RingManager bloomGatewayRingManager *lokiring.RingManager + MetastoreClient *metastoreclient.Client ClientMetrics storage.ClientMetrics deleteClientMetrics *deletion.DeleteRequestClientMetrics @@ -375,6 +385,7 @@ type Loki struct { Tee distributor.Tee PushParserWrapper push.RequestParserWrapper HTTPAuthMiddleware middleware.Interface + health *health.GRPCHealthService Codec Codec Metrics *server.Metrics @@ -405,6 +416,8 @@ func (t *Loki) setupAuthMiddleware() { // Also don't check auth for these gRPC methods, since single call is used for multiple users (or no user like health check). []string{ "/grpc.health.v1.Health/Check", + "/grpc.health.v1.Health/Watch", + "/metastorepb.MetastoreService/AddBlock", "/logproto.StreamData/GetStreamRates", "/frontend.Frontend/Process", "/frontend.Frontend/NotifyClientShutdown", @@ -495,7 +508,11 @@ func (t *Loki) Run(opts RunOpts) error { t.Server.HTTP.Path("/log_level").Methods("GET", "POST").Handler(util_log.LevelHandler(&t.Cfg.Server.LogLevel)) - grpc_health_v1.RegisterHealthServer(t.Server.GRPC, grpcutil.NewHealthCheck(sm)) + if t.Cfg.isTarget(Metastore) { + grpc_health_v1.RegisterHealthServer(t.Server.GRPC, t.health) + } else { + grpc_health_v1.RegisterHealthServer(t.Server.GRPC, grpcutil.NewHealthCheck(sm)) + } // Config endpoint adds a way to see the config and the changes compared to the defaults. t.bindConfigEndpoint(opts) @@ -688,6 +705,8 @@ func (t *Loki) setupModuleManager() error { mm.RegisterModule(CacheGenerationLoader, t.initCacheGenerationLoader) mm.RegisterModule(PatternIngester, t.initPatternIngester) mm.RegisterModule(PatternRingClient, t.initPatternRingClient, modules.UserInvisibleModule) + mm.RegisterModule(Metastore, t.initMetastore) + mm.RegisterModule(MetastoreClient, t.initMetastoreClient, modules.UserInvisibleModule) mm.RegisterModule(All, nil) mm.RegisterModule(Read, nil) @@ -703,7 +722,7 @@ func (t *Loki) setupModuleManager() error { TenantConfigs: {RuntimeConfig}, Distributor: {Ring, Server, Overrides, TenantConfigs, PatternRingClient, IngesterRF1RingClient, Analytics}, Store: {Overrides, IndexGatewayRing}, - IngesterRF1: {Store, Server, MemberlistKV, TenantConfigs, Analytics}, + IngesterRF1: {Store, Server, MemberlistKV, TenantConfigs, MetastoreClient, Analytics}, Ingester: {Store, Server, MemberlistKV, TenantConfigs, Analytics}, Querier: {Store, Ring, Server, IngesterQuerier, PatternRingClient, Overrides, Analytics, CacheGenerationLoader, QuerySchedulerRing}, QueryFrontendTripperware: {Server, Overrides, TenantConfigs}, @@ -722,6 +741,7 @@ func (t *Loki) setupModuleManager() error { PatternIngester: {Server, MemberlistKV, Analytics}, PatternRingClient: {Server, MemberlistKV, Analytics}, IngesterRF1RingClient: {Server, MemberlistKV, Analytics}, + Metastore: {Server, MetastoreClient}, IngesterQuerier: {Ring}, QuerySchedulerRing: {Overrides, MemberlistKV}, IndexGatewayRing: {Overrides, MemberlistKV}, diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 9e6e033c5e61f..03637e2e598da 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -15,8 +15,6 @@ import ( "strings" "time" - ingester_rf1 "github.com/grafana/loki/v3/pkg/ingester-rf1" - "github.com/NYTimes/gziphandler" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -35,14 +33,11 @@ import ( "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/common/model" - "github.com/grafana/loki/v3/pkg/bloomcompactor" - "github.com/grafana/loki/v3/pkg/logqlmodel/stats" - "github.com/grafana/loki/v3/pkg/storage/types" - "github.com/grafana/loki/v3/pkg/analytics" "github.com/grafana/loki/v3/pkg/bloombuild/builder" "github.com/grafana/loki/v3/pkg/bloombuild/planner" bloomprotos "github.com/grafana/loki/v3/pkg/bloombuild/protos" + "github.com/grafana/loki/v3/pkg/bloomcompactor" "github.com/grafana/loki/v3/pkg/bloomgateway" "github.com/grafana/loki/v3/pkg/compactor" compactorclient "github.com/grafana/loki/v3/pkg/compactor/client" @@ -52,14 +47,21 @@ import ( "github.com/grafana/loki/v3/pkg/distributor" "github.com/grafana/loki/v3/pkg/indexgateway" "github.com/grafana/loki/v3/pkg/ingester" + ingester_rf1 "github.com/grafana/loki/v3/pkg/ingester-rf1" + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore" + metastoreclient "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/client" + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/health" + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql" + "github.com/grafana/loki/v3/pkg/logqlmodel/stats" "github.com/grafana/loki/v3/pkg/lokifrontend/frontend" "github.com/grafana/loki/v3/pkg/lokifrontend/frontend/transport" "github.com/grafana/loki/v3/pkg/lokifrontend/frontend/v1/frontendv1pb" "github.com/grafana/loki/v3/pkg/lokifrontend/frontend/v2/frontendv2pb" "github.com/grafana/loki/v3/pkg/pattern" "github.com/grafana/loki/v3/pkg/querier" + querierrf1 "github.com/grafana/loki/v3/pkg/querier-rf1" "github.com/grafana/loki/v3/pkg/querier/queryrange" "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/v3/pkg/ruler" @@ -78,6 +80,7 @@ import ( "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/boltdb" boltdbcompactor "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/boltdb/compactor" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" + "github.com/grafana/loki/v3/pkg/storage/types" "github.com/grafana/loki/v3/pkg/util/constants" "github.com/grafana/loki/v3/pkg/util/httpreq" "github.com/grafana/loki/v3/pkg/util/limiter" @@ -139,6 +142,8 @@ const ( Backend string = "backend" Analytics string = "analytics" InitCodec string = "init-codec" + Metastore string = "metastore" + MetastoreClient string = "metastore-client" ) const ( @@ -168,6 +173,8 @@ func (t *Loki) initServer() (services.Service, error) { t.Server = serv + t.health = health.NewGRPCHealthService() + servicesToWaitFor := func() []services.Service { svs := []services.Service(nil) for m, s := range t.serviceMap { @@ -406,21 +413,29 @@ func (t *Loki) initQuerier() (services.Service, error) { return nil, err } - q, err := querier.New(t.Cfg.Querier, t.Store, t.ingesterQuerier, t.Overrides, deleteStore, prometheus.DefaultRegisterer, logger) - if err != nil { - return nil, err + if t.Cfg.QuerierRF1.Enabled { + logger.Log("Using RF-1 querier implementation") + t.Querier, err = querierrf1.New(t.Cfg.QuerierRF1, t.Store, t.Overrides, deleteStore, logger) + if err != nil { + return nil, err + } + } else { + t.Querier, err = querier.New(t.Cfg.Querier, t.Store, t.ingesterQuerier, t.Overrides, deleteStore, prometheus.DefaultRegisterer, logger) + if err != nil { + return nil, err + } } + if t.Cfg.Pattern.Enabled { patternQuerier, err := pattern.NewIngesterQuerier(t.Cfg.Pattern, t.PatternRingClient, t.Cfg.MetricsNamespace, prometheus.DefaultRegisterer, util_log.Logger) if err != nil { return nil, err } - q.WithPatternQuerier(patternQuerier) + t.Querier.WithPatternQuerier(patternQuerier) } + if t.Cfg.Querier.MultiTenantQueriesEnabled { - t.Querier = querier.NewMultiTenantQuerier(q, util_log.Logger) - } else { - t.Querier = q + t.Querier = querier.NewMultiTenantQuerier(t.Querier, util_log.Logger) } querierWorkerServiceConfig := querier.WorkerServiceConfig{ @@ -644,7 +659,7 @@ func (t *Loki) initIngesterRF1() (_ services.Service, err error) { level.Warn(util_log.Logger).Log("msg", "The config setting shutdown marker path is not set. The /ingester/prepare_shutdown endpoint won't work") } - t.IngesterRF1, err = ingester_rf1.New(t.Cfg.IngesterRF1, t.Cfg.IngesterRF1Client, t.Cfg.SchemaConfig.Configs, t.Cfg.StorageConfig, t.ClientMetrics, t.Overrides, t.tenantConfigs, prometheus.DefaultRegisterer, t.Cfg.Distributor.WriteFailuresLogging, t.Cfg.MetricsNamespace, logger, t.UsageTracker, t.ring) + t.IngesterRF1, err = ingester_rf1.New(t.Cfg.IngesterRF1, t.Cfg.IngesterRF1Client, t.Cfg.SchemaConfig.Configs, t.Cfg.StorageConfig, t.ClientMetrics, t.Overrides, t.tenantConfigs, t.MetastoreClient, prometheus.DefaultRegisterer, t.Cfg.Distributor.WriteFailuresLogging, t.Cfg.MetricsNamespace, logger, t.UsageTracker, t.ring) if err != nil { fmt.Println("Error initializing ingester rf1", err) return @@ -1798,6 +1813,25 @@ func (t *Loki) initAnalytics() (services.Service, error) { return ur, nil } +func (t *Loki) initMetastore() (services.Service, error) { + m, err := metastore.New(t.Cfg.Metastore, log.With(util_log.Logger, "component", "metastore"), prometheus.DefaultRegisterer, t.health) + if err != nil { + return nil, err + } + metastorepb.RegisterMetastoreServiceServer(t.Server.GRPC, m) + + return m, nil +} + +func (t *Loki) initMetastoreClient() (services.Service, error) { + mc, err := metastoreclient.New(t.Cfg.MetastoreClient, prometheus.DefaultRegisterer) + if err != nil { + return nil, err + } + t.MetastoreClient = mc + return mc.Service(), nil +} + func (t *Loki) deleteRequestsClient(clientType string, limits limiter.CombinedLimits) (deletion.DeleteRequestsClient, error) { if !t.supportIndexDeleteRequest() || !t.Cfg.CompactorConfig.RetentionEnabled { return deletion.NewNoOpDeleteRequestsStore(), nil diff --git a/pkg/pattern/drain/drain.go b/pkg/pattern/drain/drain.go index 64ef81e0e8a84..5c48d2980e022 100644 --- a/pkg/pattern/drain/drain.go +++ b/pkg/pattern/drain/drain.go @@ -158,6 +158,7 @@ func New(config *Config, format string, metrics *Metrics) *Drain { default: tokenizer = newPunctuationTokenizer() } + d.idToCluster = createLogClusterCache(config.MaxClusters, func(int, *LogCluster) { if metrics != nil { if d.pruning { @@ -170,7 +171,10 @@ func New(config *Config, format string, metrics *Metrics) *Drain { limiter.Evict() } }) - d.tokenizer = tokenizer + d.tokenizer = &DedupingTokenizer{ + LineTokenizer: tokenizer, + dedupParam: config.ParamString, + } d.limiter = limiter return d } @@ -297,14 +301,6 @@ func deduplicatePlaceholders(line string, placeholder string) string { return unsafeString(builder) } -func (d *Drain) PatternString(c *LogCluster) string { - s := deduplicatePlaceholders(d.tokenizer.Join(c.Tokens, c.TokenState), d.config.ParamString) - if s == d.config.ParamString { - return "" - } - return s -} - func (d *Drain) Prune() { d.pruneTree(d.rootNode) } diff --git a/pkg/pattern/drain/drain_test.go b/pkg/pattern/drain/drain_test.go index 944fd5ccdcb98..1aabea548aed6 100644 --- a/pkg/pattern/drain/drain_test.go +++ b/pkg/pattern/drain/drain_test.go @@ -260,7 +260,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>" checks="[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods"`, `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>" checks="pod is a DaemonSet pod"`, - `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>" node:="<_>" error:="[pod node selector does not match the node label, <_> <_><_> <_> <_><_> <_> <_>]"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>" node:="<_>" error:="[pod node selector does not match the node label, <_> <_> <_> <_> <_> <_>]"`, `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>" node:="<_>" error:="[pod node selector does not match the node label, insufficient <_>, insufficient <_>]"`, `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>" node:="<_>" error:="[pod node selector does not match the node label, insufficient <_>]"`, `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>" node:="<_>" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>, insufficient <_>]"`, diff --git a/pkg/pattern/drain/line_tokenizer.go b/pkg/pattern/drain/line_tokenizer.go index 04b998b8dba53..e2acd8228bcfa 100644 --- a/pkg/pattern/drain/line_tokenizer.go +++ b/pkg/pattern/drain/line_tokenizer.go @@ -233,7 +233,7 @@ func (t *logfmtTokenizer) Join(tokens []string, _ interface{}) string { buf := bytes.NewBuffer(make([]byte, 0, 1024)) enc := gologfmt.NewEncoder(buf) for i := 0; i < len(tokens); i += 2 { - k, v := tokens[i], tokens[i+1] + k, v := tokens[i], unsafeBytes(tokens[i+1]) if err := enc.EncodeKeyval(k, v); err != nil { return "" } @@ -286,3 +286,12 @@ func isVariableField(key []byte) bool { bytes.EqualFold(key, []byte("time")) || bytes.EqualFold(key, []byte("timestamp")) } + +type DedupingTokenizer struct { + LineTokenizer + dedupParam string +} + +func (d DedupingTokenizer) Join(tokens []string, state interface{}) string { + return deduplicatePlaceholders(d.LineTokenizer.Join(tokens, state), d.dedupParam) +} diff --git a/pkg/pattern/drain/line_tokenizer_test.go b/pkg/pattern/drain/line_tokenizer_test.go index 106d5d44b4870..200d1a8f510e6 100644 --- a/pkg/pattern/drain/line_tokenizer_test.go +++ b/pkg/pattern/drain/line_tokenizer_test.go @@ -209,6 +209,10 @@ func TestLogFmtTokenizer(t *testing.T) { line: `ts=2024-05-30T12:50:36.648377186Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`, want: []string{"ts", param, "caller", "scheduler_processor.go:143", "level", "warn", "msg", "error contacting scheduler", "err", "rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"", "addr", "10.0.151.101:9095"}, }, + { + line: `logger=sqlstore.metrics traceID=c933fefbe893411d3be8e1648d6bcf37 t=2024-07-10T16:00:15.564896897Z level=debug msg="query finished" status=success elapsedtime=1.324305ms error=null`, + want: []string{"logger", "sqlstore.metrics", "traceID", "<_>", "t", "<_>", "level", "debug", "msg", "query finished", "status", "success", "elapsedtime", "1.324305ms", "", "", "error", "null"}, + }, } tokenizer := newLogfmtTokenizer(param) @@ -258,6 +262,10 @@ func TestLogFmtTokenizerJoin(t *testing.T) { want: `ts=2024-05-30T12:50:36.648377186Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`, tokens: []string{"ts", "2024-05-30T12:50:36.648377186Z", "caller", "scheduler_processor.go:143", "level", "warn", "msg", "error contacting scheduler", "err", "rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"", "addr", "10.0.151.101:9095"}, }, + { + want: `error=null`, + tokens: []string{"error", "null"}, + }, } tokenizer := newLogfmtTokenizer("") diff --git a/pkg/pattern/metric/evaluator.go b/pkg/pattern/metric/evaluator.go index eb8bd4ac2d355..d15d98dd9b1b0 100644 --- a/pkg/pattern/metric/evaluator.go +++ b/pkg/pattern/metric/evaluator.go @@ -6,7 +6,6 @@ import ( "sort" "time" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" @@ -127,7 +126,7 @@ func (ev *DefaultEvaluatorFactory) NewStepEvaluator( } if e.Grouping == nil { - return nil, errors.Errorf("aggregation operator '%q' without grouping", e.Operation) + return nil, fmt.Errorf("aggregation operator '%q' without grouping", e.Operation) } nextEvaluator, err := evFactory.NewStepEvaluator(ctx, evFactory, e.Left, from, through, step) if err != nil { @@ -158,7 +157,7 @@ func (ev *DefaultEvaluatorFactory) NewStepEvaluator( ) return NewSampleRangeAggEvaluator(loki_iter.NewPeekingSampleIterator(it), e, params, e.Left.Offset) default: - return nil, errors.Errorf("unexpected expr type (%T)", e) + return nil, fmt.Errorf("unexpected expr type (%T)", e) } } diff --git a/pkg/querier-rf1/handler.go b/pkg/querier-rf1/handler.go new file mode 100644 index 0000000000000..f7ade2b237218 --- /dev/null +++ b/pkg/querier-rf1/handler.go @@ -0,0 +1,149 @@ +package querierrf1 + +import ( + "context" + "fmt" + "net/http" + + "github.com/grafana/loki/v3/pkg/loghttp" + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/querier/queryrange" + "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase" +) + +type Handler struct { + api *QuerierAPI +} + +func NewQuerierHandler(api *QuerierAPI) *Handler { + return &Handler{ + api: api, + } +} + +func (h *Handler) Do(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + + switch concrete := req.(type) { + case *queryrange.LokiRequest: + res, err := h.api.RangeQueryHandler(ctx, concrete) + if err != nil { + return nil, err + } + + params, err := queryrange.ParamsFromRequest(req) + if err != nil { + return nil, err + } + + return queryrange.ResultToResponse(res, params) + case *queryrange.LokiInstantRequest: + res, err := h.api.InstantQueryHandler(ctx, concrete) + if err != nil { + return nil, err + } + + params, err := queryrange.ParamsFromRequest(req) + if err != nil { + return nil, err + } + + return queryrange.ResultToResponse(res, params) + case *queryrange.LokiSeriesRequest: + request := &logproto.SeriesRequest{ + Start: concrete.StartTs, + End: concrete.EndTs, + Groups: concrete.Match, + Shards: concrete.Shards, + } + result, statResult, err := h.api.SeriesHandler(ctx, request) + if err != nil { + return nil, err + } + + return &queryrange.LokiSeriesResponse{ + Status: "success", + Version: uint32(loghttp.VersionV1), + Data: result.Series, + Statistics: statResult, + }, nil + case *queryrange.LabelRequest: + res, err := h.api.LabelHandler(ctx, &concrete.LabelRequest) + if err != nil { + return nil, err + } + + return &queryrange.LokiLabelNamesResponse{ + Status: "success", + Version: uint32(loghttp.VersionV1), + Data: res.Values, + }, nil + case *logproto.IndexStatsRequest: + request := loghttp.NewRangeQueryWithDefaults() + request.Start = concrete.From.Time() + request.End = concrete.Through.Time() + request.Query = concrete.GetQuery() + request.UpdateStep() + + result, err := h.api.IndexStatsHandler(ctx, request) + if err != nil { + return nil, err + } + return &queryrange.IndexStatsResponse{Response: result}, nil + case *logproto.ShardsRequest: + request := loghttp.NewRangeQueryWithDefaults() + request.Start = concrete.From.Time() + request.End = concrete.Through.Time() + request.Query = concrete.GetQuery() + request.UpdateStep() + result, err := h.api.IndexShardsHandler(ctx, request, concrete.TargetBytesPerShard) + if err != nil { + return nil, err + } + return &queryrange.ShardsResponse{Response: result}, nil + + case *logproto.VolumeRequest: + result, err := h.api.VolumeHandler(ctx, concrete) + if err != nil { + return nil, err + } + return &queryrange.VolumeResponse{Response: result}, nil + case *queryrange.DetectedFieldsRequest: + result, err := h.api.DetectedFieldsHandler(ctx, &concrete.DetectedFieldsRequest) + if err != nil { + return nil, err + } + + return &queryrange.DetectedFieldsResponse{ + Response: result, + }, nil + case *logproto.QueryPatternsRequest: + result, err := h.api.PatternsHandler(ctx, concrete) + if err != nil { + return nil, err + } + return &queryrange.QueryPatternsResponse{ + Response: result, + }, nil + case *queryrange.DetectedLabelsRequest: + result, err := h.api.DetectedLabelsHandler(ctx, &concrete.DetectedLabelsRequest) + if err != nil { + return nil, err + } + + return &queryrange.DetectedLabelsResponse{Response: result}, nil + case *logproto.QuerySamplesRequest: + result, err := h.api.SamplesHandler(ctx, concrete) + if err != nil { + return nil, err + } + return &queryrange.QuerySamplesResponse{ + Response: result, + }, nil + default: + return nil, fmt.Errorf("unsupported query type %T", req) + } +} + +func NewQuerierHTTPHandler(h *Handler) http.Handler { + return queryrange.NewSerializeHTTPHandler(h, queryrange.DefaultCodec) +} diff --git a/pkg/querier-rf1/http.go b/pkg/querier-rf1/http.go new file mode 100644 index 0000000000000..875647deb7e88 --- /dev/null +++ b/pkg/querier-rf1/http.go @@ -0,0 +1,370 @@ +package querierrf1 + +import ( + "context" + "net/http" + "strconv" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/middleware" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/promql/parser" + + "github.com/grafana/dskit/tenant" + + "github.com/grafana/loki/v3/pkg/loghttp" + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/logql" + "github.com/grafana/loki/v3/pkg/logql/syntax" + "github.com/grafana/loki/v3/pkg/logqlmodel" + "github.com/grafana/loki/v3/pkg/logqlmodel/stats" + "github.com/grafana/loki/v3/pkg/querier/queryrange" + index_stats "github.com/grafana/loki/v3/pkg/storage/stores/index/stats" + "github.com/grafana/loki/v3/pkg/util/httpreq" + util_log "github.com/grafana/loki/v3/pkg/util/log" + serverutil "github.com/grafana/loki/v3/pkg/util/server" + "github.com/grafana/loki/v3/pkg/util/spanlogger" + util_validation "github.com/grafana/loki/v3/pkg/util/validation" +) + +const ( + wsPingPeriod = 1 * time.Second +) + +type QueryResponse struct { + ResultType parser.ValueType `json:"resultType"` + Result parser.Value `json:"result"` +} + +type Engine interface { + Query(logql.Params) logql.Query +} + +// nolint // QuerierAPI defines HTTP handler functions for the querier. +type QuerierAPI struct { + querier Querier + cfg Config + limits Limits + engine Engine +} + +// NewQuerierAPI returns an instance of the QuerierAPI. +func NewQuerierAPI(cfg Config, querier Querier, limits Limits, logger log.Logger) *QuerierAPI { + engine := logql.NewEngine(cfg.Engine, querier, limits, logger) + return &QuerierAPI{ + cfg: cfg, + limits: limits, + querier: querier, + engine: engine, + } +} + +// RangeQueryHandler is a http.HandlerFunc for range queries and legacy log queries +func (q *QuerierAPI) RangeQueryHandler(ctx context.Context, req *queryrange.LokiRequest) (logqlmodel.Result, error) { + if err := q.validateMaxEntriesLimits(ctx, req.Plan.AST, req.Limit); err != nil { + return logqlmodel.Result{}, err + } + + params, err := queryrange.ParamsFromRequest(req) + if err != nil { + return logqlmodel.Result{}, err + } + + query := q.engine.Query(params) + return query.Exec(ctx) +} + +// InstantQueryHandler is a http.HandlerFunc for instant queries. +func (q *QuerierAPI) InstantQueryHandler(ctx context.Context, req *queryrange.LokiInstantRequest) (logqlmodel.Result, error) { + // do not allow log selector expression (aka log query) as instant query + if _, ok := req.Plan.AST.(syntax.SampleExpr); !ok { + return logqlmodel.Result{}, logqlmodel.ErrUnsupportedSyntaxForInstantQuery + } + + if err := q.validateMaxEntriesLimits(ctx, req.Plan.AST, req.Limit); err != nil { + return logqlmodel.Result{}, err + } + + params, err := queryrange.ParamsFromRequest(req) + if err != nil { + return logqlmodel.Result{}, err + } + query := q.engine.Query(params) + return query.Exec(ctx) +} + +// LabelHandler is a http.HandlerFunc for handling label queries. +func (q *QuerierAPI) LabelHandler(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) { + timer := prometheus.NewTimer(logql.QueryTime.WithLabelValues(logql.QueryTypeLabels)) + defer timer.ObserveDuration() + + start := time.Now() + statsCtx, ctx := stats.NewContext(ctx) + + resp, err := q.querier.Label(ctx, req) + queueTime, _ := ctx.Value(httpreq.QueryQueueTimeHTTPHeader).(time.Duration) + + resLength := 0 + if resp != nil { + resLength = len(resp.Values) + } + statResult := statsCtx.Result(time.Since(start), queueTime, resLength) + if sp := opentracing.SpanFromContext(ctx); sp != nil { + sp.LogKV(statResult.KVList()...) + } + + status := 200 + if err != nil { + status, _ = serverutil.ClientHTTPStatusAndError(err) + } + + logql.RecordLabelQueryMetrics(ctx, util_log.Logger, *req.Start, *req.End, req.Name, req.Query, strconv.Itoa(status), statResult) + + return resp, err +} + +// TailHandler is a http.HandlerFunc for handling tail queries. +func (q *QuerierAPI) TailHandler(w http.ResponseWriter, r *http.Request) { + logger := util_log.WithContext(r.Context(), util_log.Logger) + logger.Log("msg", "rf1 tail handler called") + serverutil.WriteError(httpgrpc.Errorf(http.StatusInternalServerError, "not implemented"), w) +} + +// SeriesHandler returns the list of time series that match a certain label set. +// See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers +func (q *QuerierAPI) SeriesHandler(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, stats.Result, error) { + timer := prometheus.NewTimer(logql.QueryTime.WithLabelValues(logql.QueryTypeSeries)) + defer timer.ObserveDuration() + + start := time.Now() + statsCtx, ctx := stats.NewContext(ctx) + + resp, err := q.querier.Series(ctx, req) + queueTime, _ := ctx.Value(httpreq.QueryQueueTimeHTTPHeader).(time.Duration) + + resLength := 0 + if resp != nil { + resLength = len(resp.Series) + } + + statResult := statsCtx.Result(time.Since(start), queueTime, resLength) + if sp := opentracing.SpanFromContext(ctx); sp != nil { + sp.LogKV(statResult.KVList()...) + } + + status := 200 + if err != nil { + status, _ = serverutil.ClientHTTPStatusAndError(err) + } + + logql.RecordSeriesQueryMetrics(ctx, util_log.Logger, req.Start, req.End, req.Groups, strconv.Itoa(status), req.GetShards(), statResult) + + return resp, statResult, err +} + +// IndexStatsHandler queries the index for the data statistics related to a query +func (q *QuerierAPI) IndexStatsHandler(ctx context.Context, req *loghttp.RangeQuery) (*logproto.IndexStatsResponse, error) { + timer := prometheus.NewTimer(logql.QueryTime.WithLabelValues(logql.QueryTypeStats)) + defer timer.ObserveDuration() + + start := time.Now() + statsCtx, ctx := stats.NewContext(ctx) + + // TODO(karsten): we might want to change IndexStats to receive a logproto.IndexStatsRequest instead + resp, err := q.querier.IndexStats(ctx, req) + if resp == nil { + // Some stores don't implement this + resp = &index_stats.Stats{} + } + + queueTime, _ := ctx.Value(httpreq.QueryQueueTimeHTTPHeader).(time.Duration) + statResult := statsCtx.Result(time.Since(start), queueTime, 1) + if sp := opentracing.SpanFromContext(ctx); sp != nil { + sp.LogKV(statResult.KVList()...) + } + + status := 200 + if err != nil { + status, _ = serverutil.ClientHTTPStatusAndError(err) + } + + logql.RecordStatsQueryMetrics(ctx, util_log.Logger, req.Start, req.End, req.Query, strconv.Itoa(status), statResult) + + return resp, err +} + +func (q *QuerierAPI) IndexShardsHandler(ctx context.Context, req *loghttp.RangeQuery, targetBytesPerShard uint64) (*logproto.ShardsResponse, error) { + timer := prometheus.NewTimer(logql.QueryTime.WithLabelValues(logql.QueryTypeShards)) + defer timer.ObserveDuration() + + start := time.Now() + statsCtx, ctx := stats.NewContext(ctx) + + resp, err := q.querier.IndexShards(ctx, req, targetBytesPerShard) + queueTime, _ := ctx.Value(httpreq.QueryQueueTimeHTTPHeader).(time.Duration) + + resLength := 0 + if resp != nil { + resLength = len(resp.Shards) + stats.JoinResults(ctx, resp.Statistics) + } + + statResult := statsCtx.Result(time.Since(start), queueTime, resLength) + + if sp := opentracing.SpanFromContext(ctx); sp != nil { + sp.LogKV(statResult.KVList()...) + } + + status := 200 + if err != nil { + status, _ = serverutil.ClientHTTPStatusAndError(err) + } + + logql.RecordShardsQueryMetrics( + ctx, util_log.Logger, req.Start, req.End, req.Query, targetBytesPerShard, strconv.Itoa(status), resLength, statResult, + ) + + return resp, err +} + +// TODO(trevorwhitney): add test for the handler split + +// VolumeHandler queries the index label volumes related to the passed matchers and given time range. +// Returns either N values where N is the time range / step and a single value for a time range depending on the request. +func (q *QuerierAPI) VolumeHandler(ctx context.Context, req *logproto.VolumeRequest) (*logproto.VolumeResponse, error) { + timer := prometheus.NewTimer(logql.QueryTime.WithLabelValues(logql.QueryTypeVolume)) + defer timer.ObserveDuration() + + start := time.Now() + statsCtx, ctx := stats.NewContext(ctx) + + resp, err := q.querier.Volume(ctx, req) + if err != nil { + return nil, err + } + if resp == nil { // Some stores don't implement this + return &logproto.VolumeResponse{Volumes: []logproto.Volume{}}, nil + } + + queueTime, _ := ctx.Value(httpreq.QueryQueueTimeHTTPHeader).(time.Duration) + statResult := statsCtx.Result(time.Since(start), queueTime, 1) + if sp := opentracing.SpanFromContext(ctx); sp != nil { + sp.LogKV(statResult.KVList()...) + } + + status := 200 + if err != nil { + status, _ = serverutil.ClientHTTPStatusAndError(err) + } + + logql.RecordVolumeQueryMetrics(ctx, util_log.Logger, req.From.Time(), req.Through.Time(), req.GetQuery(), uint32(req.GetLimit()), time.Duration(req.GetStep()), strconv.Itoa(status), statResult) + + return resp, nil +} + +func (q *QuerierAPI) DetectedFieldsHandler(ctx context.Context, req *logproto.DetectedFieldsRequest) (*logproto.DetectedFieldsResponse, error) { + resp, err := q.querier.DetectedFields(ctx, req) + if err != nil { + return nil, err + } + if resp == nil { // Some stores don't implement this + level.Debug(spanlogger.FromContext(ctx)).Log( + "msg", "queried store for detected fields that does not support it, no response from querier.DetectedFields", + ) + return &logproto.DetectedFieldsResponse{ + Fields: []*logproto.DetectedField{}, + FieldLimit: req.GetFieldLimit(), + }, nil + } + return resp, nil +} + +func (q *QuerierAPI) PatternsHandler(ctx context.Context, req *logproto.QueryPatternsRequest) (*logproto.QueryPatternsResponse, error) { + resp, err := q.querier.Patterns(ctx, req) + if err != nil { + return nil, err + } + if resp == nil { // Some stores don't implement this + return &logproto.QueryPatternsResponse{ + Series: []*logproto.PatternSeries{}, + }, nil + } + return resp, nil +} + +func (q *QuerierAPI) SamplesHandler(ctx context.Context, req *logproto.QuerySamplesRequest) (*logproto.QuerySamplesResponse, error) { + resp, err := q.querier.SelectMetricSamples(ctx, req) + if err != nil { + return nil, err + } + if resp == nil { // Some stores don't implement this + return &logproto.QuerySamplesResponse{ + Series: []logproto.Series{}, + }, nil + } + return resp, nil +} + +func (q *QuerierAPI) validateMaxEntriesLimits(ctx context.Context, expr syntax.Expr, limit uint32) error { + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + + // entry limit does not apply to metric queries. + if _, ok := expr.(syntax.SampleExpr); ok { + return nil + } + + maxEntriesCapture := func(id string) int { return q.limits.MaxEntriesLimitPerQuery(ctx, id) } + maxEntriesLimit := util_validation.SmallestPositiveNonZeroIntPerTenant(tenantIDs, maxEntriesCapture) + if int(limit) > maxEntriesLimit && maxEntriesLimit != 0 { + return httpgrpc.Errorf(http.StatusBadRequest, + "max entries limit per query exceeded, limit > max_entries_limit (%d > %d)", limit, maxEntriesLimit) + } + return nil +} + +// DetectedLabelsHandler returns a response for detected labels +func (q *QuerierAPI) DetectedLabelsHandler(ctx context.Context, req *logproto.DetectedLabelsRequest) (*logproto.DetectedLabelsResponse, error) { + resp, err := q.querier.DetectedLabels(ctx, req) + + if err != nil { + return nil, err + } + return resp, nil +} + +// WrapQuerySpanAndTimeout applies a context deadline and a span logger to a query call. +// +// The timeout is based on the per-tenant query timeout configuration. +func WrapQuerySpanAndTimeout(call string, limits Limits) middleware.Interface { + return middleware.Func(func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + sp, ctx := opentracing.StartSpanFromContext(req.Context(), call) + defer sp.Finish() + log := spanlogger.FromContext(req.Context()) + defer log.Finish() + + tenants, err := tenant.TenantIDs(ctx) + if err != nil { + level.Error(log).Log("msg", "couldn't fetch tenantID", "err", err) + serverutil.WriteError(httpgrpc.Errorf(http.StatusBadRequest, err.Error()), w) + return + } + + timeoutCapture := func(id string) time.Duration { return limits.QueryTimeout(ctx, id) } + timeout := util_validation.SmallestPositiveNonZeroDurationPerTenant(tenants, timeoutCapture) + newCtx, cancel := context.WithTimeoutCause(ctx, timeout, errors.New("query timeout reached")) + defer cancel() + + newReq := req.WithContext(newCtx) + next.ServeHTTP(w, newReq) + }) + }) +} diff --git a/pkg/querier-rf1/querier.go b/pkg/querier-rf1/querier.go new file mode 100644 index 0000000000000..d4952ea59745c --- /dev/null +++ b/pkg/querier-rf1/querier.go @@ -0,0 +1,930 @@ +package querierrf1 + +import ( + "context" + "flag" + "fmt" + "net/http" + "sort" + "strconv" + "time" + + "github.com/axiomhq/hyperloglog" + "github.com/dustin/go-humanize" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/google/uuid" + "github.com/grafana/dskit/httpgrpc" + "github.com/grafana/dskit/tenant" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/grafana/loki/v3/pkg/compactor/deletion" + "github.com/grafana/loki/v3/pkg/indexgateway" + "github.com/grafana/loki/v3/pkg/iter" + "github.com/grafana/loki/v3/pkg/loghttp" + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/logql" + logql_log "github.com/grafana/loki/v3/pkg/logql/log" + "github.com/grafana/loki/v3/pkg/logql/syntax" + "github.com/grafana/loki/v3/pkg/logqlmodel" + "github.com/grafana/loki/v3/pkg/querier" + querier_limits "github.com/grafana/loki/v3/pkg/querier/limits" + "github.com/grafana/loki/v3/pkg/querier/plan" + "github.com/grafana/loki/v3/pkg/storage" + "github.com/grafana/loki/v3/pkg/storage/stores/index" + "github.com/grafana/loki/v3/pkg/storage/stores/index/seriesvolume" + "github.com/grafana/loki/v3/pkg/storage/stores/index/stats" + "github.com/grafana/loki/v3/pkg/util/spanlogger" + util_validation "github.com/grafana/loki/v3/pkg/util/validation" +) + +var nowFunc = func() time.Time { return time.Now() } + +type Config struct { + Enabled bool + ExtraQueryDelay time.Duration `yaml:"extra_query_delay,omitempty"` + Engine logql.EngineOpts `yaml:"engine,omitempty"` + MaxConcurrent int `yaml:"max_concurrent"` + PerRequestLimitsEnabled bool `yaml:"per_request_limits_enabled"` +} + +// RegisterFlags register flags. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.Engine.RegisterFlagsWithPrefix("querier-rf1", f) + f.BoolVar(&cfg.Enabled, "querier-rf1.enabled", false, "Enable the RF1 querier. If set, replaces the usual querier with a RF-1 querier when using 'ALL' target.") + f.DurationVar(&cfg.ExtraQueryDelay, "querier-rf1.extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.") + f.IntVar(&cfg.MaxConcurrent, "querier-rf1.max-concurrent", 4, "The maximum number of queries that can be simultaneously processed by the querier.") + f.BoolVar(&cfg.PerRequestLimitsEnabled, "querier-rf1.per-request-limits-enabled", false, "When true, querier limits sent via a header are enforced.") +} + +var _ Querier = &Rf1Querier{} + +// Querier can select logs and samples and handle query requests. +type Querier interface { + logql.Querier + Label(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) + Series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) + Tail(ctx context.Context, req *logproto.TailRequest, categorizedLabels bool) (*querier.Tailer, error) + IndexStats(ctx context.Context, req *loghttp.RangeQuery) (*stats.Stats, error) + IndexShards(ctx context.Context, req *loghttp.RangeQuery, targetBytesPerShard uint64) (*logproto.ShardsResponse, error) + Volume(ctx context.Context, req *logproto.VolumeRequest) (*logproto.VolumeResponse, error) + DetectedFields(ctx context.Context, req *logproto.DetectedFieldsRequest) (*logproto.DetectedFieldsResponse, error) + Patterns(ctx context.Context, req *logproto.QueryPatternsRequest) (*logproto.QueryPatternsResponse, error) + DetectedLabels(ctx context.Context, req *logproto.DetectedLabelsRequest) (*logproto.DetectedLabelsResponse, error) + SelectMetricSamples(ctx context.Context, req *logproto.QuerySamplesRequest) (*logproto.QuerySamplesResponse, error) +} + +type Limits querier_limits.Limits + +// Store is the store interface we need on the querier. +type Store interface { + storage.SelectStore + index.BaseReader + index.StatsReader +} + +// Rf1Querier handles rf1 queries. +type Rf1Querier struct { + cfg Config + store Store + limits Limits + deleteGetter deleteGetter + logger log.Logger + patternQuerier PatterQuerier +} + +type deleteGetter interface { + GetAllDeleteRequestsForUser(ctx context.Context, userID string) ([]deletion.DeleteRequest, error) +} + +// New makes a new Querier for RF1 work. +func New(cfg Config, store Store, limits Limits, d deleteGetter, logger log.Logger) (*Rf1Querier, error) { + return &Rf1Querier{ + cfg: cfg, + store: store, + limits: limits, + deleteGetter: d, + logger: logger, + }, nil +} + +// Select Implements logql.Querier which select logs via matchers and regex filters. +func (q *Rf1Querier) SelectLogs(ctx context.Context, params logql.SelectLogParams) (iter.EntryIterator, error) { + var err error + params.Start, params.End, err = q.validateQueryRequest(ctx, params) + if err != nil { + return nil, err + } + + params.QueryRequest.Deletes, err = q.deletesForUser(ctx, params.Start, params.End) + if err != nil { + level.Error(spanlogger.FromContext(ctx)).Log("msg", "failed loading deletes for user", "err", err) + } + + sp := opentracing.SpanFromContext(ctx) + iters := []iter.EntryIterator{} + if sp != nil { + sp.LogKV( + "msg", "querying rf1 store", + "params", params) + } + storeIter, err := q.store.SelectLogs(ctx, params) + if err != nil { + return nil, err + } + + iters = append(iters, storeIter) + if len(iters) == 1 { + return iters[0], nil + } + return iter.NewMergeEntryIterator(ctx, iters, params.Direction), nil +} + +func (q *Rf1Querier) SelectSamples(ctx context.Context, params logql.SelectSampleParams) (iter.SampleIterator, error) { + var err error + params.Start, params.End, err = q.validateQueryRequest(ctx, params) + if err != nil { + return nil, err + } + + params.SampleQueryRequest.Deletes, err = q.deletesForUser(ctx, params.Start, params.End) + if err != nil { + level.Error(spanlogger.FromContext(ctx)).Log("msg", "failed loading deletes for user", "err", err) + } + + sp := opentracing.SpanFromContext(ctx) + if sp != nil { + sp.LogKV( + "msg", "querying rf1 store for samples", + "params", params) + } + storeIter, err := q.store.SelectSamples(ctx, params) + if err != nil { + return nil, err + } + + iters := []iter.SampleIterator{} + iters = append(iters, storeIter) + return iter.NewMergeSampleIterator(ctx, iters), nil +} + +func (q *Rf1Querier) deletesForUser(ctx context.Context, startT, endT time.Time) ([]*logproto.Delete, error) { + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } + + d, err := q.deleteGetter.GetAllDeleteRequestsForUser(ctx, userID) + if err != nil { + return nil, err + } + + start := startT.UnixNano() + end := endT.UnixNano() + + var deletes []*logproto.Delete + for _, del := range d { + if del.StartTime.UnixNano() <= end && del.EndTime.UnixNano() >= start { + deletes = append(deletes, &logproto.Delete{ + Selector: del.Query, + Start: del.StartTime.UnixNano(), + End: del.EndTime.UnixNano(), + }) + } + } + + return deletes, nil +} + +// Label does the heavy lifting for a Label query. +func (q *Rf1Querier) Label(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) { + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } + + if *req.Start, *req.End, err = validateQueryTimeRangeLimits(ctx, userID, q.limits, *req.Start, *req.End); err != nil { + return nil, err + } + + var matchers []*labels.Matcher + if req.Query != "" { + matchers, err = syntax.ParseMatchers(req.Query, true) + if err != nil { + return nil, err + } + } + + // Enforce the query timeout while querying backends + queryTimeout := q.limits.QueryTimeout(ctx, userID) + ctx, cancel := context.WithDeadline(ctx, time.Now().Add(queryTimeout)) + defer cancel() + + g, ctx := errgroup.WithContext(ctx) + + var storeValues []string + g.Go(func() error { + var ( + err error + from = model.TimeFromUnixNano(req.Start.UnixNano()) + through = model.TimeFromUnixNano(req.End.UnixNano()) + ) + + if req.Values { + storeValues, err = q.store.LabelValuesForMetricName(ctx, userID, from, through, "logs", req.Name, matchers...) + } else { + storeValues, err = q.store.LabelNamesForMetricName(ctx, userID, from, through, "logs", matchers...) + } + return err + }) + + if err := g.Wait(); err != nil { + return nil, err + } + + return &logproto.LabelResponse{ + Values: storeValues, + }, nil +} + +// Check implements the grpc healthcheck +func (*Rf1Querier) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { + return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil +} + +// Tail keeps getting matching logs from all ingesters for given query +func (q *Rf1Querier) Tail(_ context.Context, _ *logproto.TailRequest, _ bool) (*querier.Tailer, error) { + return nil, errors.New("not implemented") +} + +// Series fetches any matching series for a list of matcher sets +func (q *Rf1Querier) Series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) { + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } + + if req.Start, req.End, err = validateQueryTimeRangeLimits(ctx, userID, q.limits, req.Start, req.End); err != nil { + return nil, err + } + + // Enforce the query timeout while querying backends + queryTimeout := q.limits.QueryTimeout(ctx, userID) + ctx, cancel := context.WithDeadlineCause(ctx, time.Now().Add(queryTimeout), errors.New("query timeout reached")) + defer cancel() + + return q.awaitSeries(ctx, req) +} + +func (q *Rf1Querier) awaitSeries(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) { + // buffer the channels to the # of calls they're expecting su + series := make(chan [][]logproto.SeriesIdentifier, 1) + errs := make(chan error, 1) + + go func() { + storeValues, err := q.seriesForMatchers(ctx, req.Start, req.End, req.GetGroups(), req.Shards) + if err != nil { + errs <- err + return + } + series <- [][]logproto.SeriesIdentifier{storeValues} + }() + + var sets [][]logproto.SeriesIdentifier + for i := 0; i < 2; i++ { + select { + case err := <-errs: + return nil, err + case s := <-series: + sets = append(sets, s...) + } + } + + response := &logproto.SeriesResponse{ + Series: make([]logproto.SeriesIdentifier, 0), + } + seen := make(map[uint64]struct{}) + b := make([]byte, 0, 1024) + for _, set := range sets { + for _, s := range set { + key := s.Hash(b) + if _, exists := seen[key]; !exists { + seen[key] = struct{}{} + response.Series = append(response.Series, s) + } + } + } + + return response, nil +} + +// seriesForMatchers fetches series from the store for each matcher set +// TODO: make efficient if/when the index supports labels so we don't have to read chunks +func (q *Rf1Querier) seriesForMatchers( + ctx context.Context, + from, through time.Time, + groups []string, + shards []string, +) ([]logproto.SeriesIdentifier, error) { + var results []logproto.SeriesIdentifier + // If no matchers were specified for the series query, + // we send a query with an empty matcher which will match every series. + if len(groups) == 0 { + var err error + results, err = q.seriesForMatcher(ctx, from, through, "", shards) + if err != nil { + return nil, err + } + } else { + for _, group := range groups { + ids, err := q.seriesForMatcher(ctx, from, through, group, shards) + if err != nil { + return nil, err + } + results = append(results, ids...) + } + } + return results, nil +} + +// seriesForMatcher fetches series from the store for a given matcher +func (q *Rf1Querier) seriesForMatcher(ctx context.Context, from, through time.Time, matcher string, shards []string) ([]logproto.SeriesIdentifier, error) { + var parsed syntax.Expr + var err error + if matcher != "" { + parsed, err = syntax.ParseExpr(matcher) + if err != nil { + return nil, err + } + } + + ids, err := q.store.SelectSeries(ctx, logql.SelectLogParams{ + QueryRequest: &logproto.QueryRequest{ + Selector: matcher, + Limit: 1, + Start: from, + End: through, + Direction: logproto.FORWARD, + Shards: shards, + Plan: &plan.QueryPlan{ + AST: parsed, + }, + }, + }) + if err != nil { + return nil, err + } + return ids, nil +} + +func (q *Rf1Querier) validateQueryRequest(ctx context.Context, req logql.QueryParams) (time.Time, time.Time, error) { + userID, err := tenant.TenantID(ctx) + if err != nil { + return time.Time{}, time.Time{}, err + } + + selector, err := req.LogSelector() + if err != nil { + return time.Time{}, time.Time{}, err + } + matchers := selector.Matchers() + + maxStreamMatchersPerQuery := q.limits.MaxStreamsMatchersPerQuery(ctx, userID) + if len(matchers) > maxStreamMatchersPerQuery { + return time.Time{}, time.Time{}, httpgrpc.Errorf(http.StatusBadRequest, + "max streams matchers per query exceeded, matchers-count > limit (%d > %d)", len(matchers), maxStreamMatchersPerQuery) + } + + return validateQueryTimeRangeLimits(ctx, userID, q.limits, req.GetStart(), req.GetEnd()) +} + +type TimeRangeLimits querier_limits.TimeRangeLimits + +func validateQueryTimeRangeLimits(ctx context.Context, userID string, limits TimeRangeLimits, from, through time.Time) (time.Time, time.Time, error) { + now := nowFunc() + // Clamp the time range based on the max query lookback. + maxQueryLookback := limits.MaxQueryLookback(ctx, userID) + if maxQueryLookback > 0 && from.Before(now.Add(-maxQueryLookback)) { + origStartTime := from + from = now.Add(-maxQueryLookback) + + level.Debug(spanlogger.FromContext(ctx)).Log( + "msg", "the start time of the query has been manipulated because of the 'max query lookback' setting", + "original", origStartTime, + "updated", from) + + } + maxQueryLength := limits.MaxQueryLength(ctx, userID) + if maxQueryLength > 0 && (through).Sub(from) > maxQueryLength { + return time.Time{}, time.Time{}, httpgrpc.Errorf(http.StatusBadRequest, util_validation.ErrQueryTooLong, (through).Sub(from), model.Duration(maxQueryLength)) + } + if through.Before(from) { + return time.Time{}, time.Time{}, httpgrpc.Errorf(http.StatusBadRequest, util_validation.ErrQueryTooOld, model.Duration(maxQueryLookback)) + } + return from, through, nil +} + +func (q *Rf1Querier) IndexStats(ctx context.Context, req *loghttp.RangeQuery) (*stats.Stats, error) { + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } + + start, end, err := validateQueryTimeRangeLimits(ctx, userID, q.limits, req.Start, req.End) + if err != nil { + return nil, err + } + + matchers, err := syntax.ParseMatchers(req.Query, true) + if err != nil { + return nil, err + } + + // Enforce the query timeout while querying backends + queryTimeout := q.limits.QueryTimeout(ctx, userID) + ctx, cancel := context.WithDeadline(ctx, time.Now().Add(queryTimeout)) + defer cancel() + + return q.store.Stats( + ctx, + userID, + model.TimeFromUnixNano(start.UnixNano()), + model.TimeFromUnixNano(end.UnixNano()), + matchers..., + ) +} + +func (q *Rf1Querier) IndexShards( + ctx context.Context, + req *loghttp.RangeQuery, + targetBytesPerShard uint64, +) (*logproto.ShardsResponse, error) { + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } + + start, end, err := validateQueryTimeRangeLimits(ctx, userID, q.limits, req.Start, req.End) + if err != nil { + return nil, err + } + + // Enforce the query timeout while querying backends + queryTimeout := q.limits.QueryTimeout(ctx, userID) + ctx, cancel := context.WithDeadline(ctx, time.Now().Add(queryTimeout)) + defer cancel() + + p, err := indexgateway.ExtractShardRequestMatchersAndAST(req.Query) + if err != nil { + return nil, err + } + + shards, err := q.store.GetShards( + ctx, + userID, + model.TimeFromUnixNano(start.UnixNano()), + model.TimeFromUnixNano(end.UnixNano()), + targetBytesPerShard, + p, + ) + if err != nil { + return nil, err + } + return shards, nil +} + +func (q *Rf1Querier) Volume(ctx context.Context, req *logproto.VolumeRequest) (*logproto.VolumeResponse, error) { + sp, ctx := opentracing.StartSpanFromContext(ctx, "Querier.Volume") + defer sp.Finish() + + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } + + matchers, err := syntax.ParseMatchers(req.Matchers, true) + if err != nil && req.Matchers != seriesvolume.MatchAny { + return nil, err + } + + // Enforce the query timeout while querying backends + queryTimeout := q.limits.QueryTimeout(ctx, userID) + ctx, cancel := context.WithDeadline(ctx, time.Now().Add(queryTimeout)) + defer cancel() + + sp.LogKV( + "user", userID, + "from", req.From.Time(), + "through", req.Through.Time(), + "matchers", syntax.MatchersString(matchers), + "limit", req.Limit, + "targetLabels", req.TargetLabels, + "aggregateBy", req.AggregateBy, + ) + + numResponses := 1 + responses := make([]*logproto.VolumeResponse, 0, numResponses) + + resp, err := q.store.Volume( + ctx, + userID, + model.TimeFromUnix(req.From.Unix()), + model.TimeFromUnix(req.Through.Unix()), + req.Limit, + req.TargetLabels, + req.AggregateBy, + matchers..., + ) + if err != nil { + return nil, err + } + + responses = append(responses, resp) + + return seriesvolume.Merge(responses, req.Limit), nil +} + +// DetectedLabels fetches labels and values from store and ingesters and filters them by relevance criteria as per logs app. +func (q *Rf1Querier) DetectedLabels(ctx context.Context, req *logproto.DetectedLabelsRequest) (*logproto.DetectedLabelsResponse, error) { + userID, err := tenant.TenantID(ctx) + if err != nil { + return nil, err + } + staticLabels := map[string]struct{}{"cluster": {}, "namespace": {}, "instance": {}, "pod": {}} + + // Enforce the query timeout while querying backends + queryTimeout := q.limits.QueryTimeout(ctx, userID) + ctx, cancel := context.WithDeadline(ctx, time.Now().Add(queryTimeout)) + defer cancel() + g, ctx := errgroup.WithContext(ctx) + + if req.Start, req.End, err = validateQueryTimeRangeLimits(ctx, userID, q.limits, req.Start, req.End); err != nil { + return nil, err + } + + // Fetch labels from the store + storeLabelsMap := make(map[string][]string) + var matchers []*labels.Matcher + if req.Query != "" { + matchers, err = syntax.ParseMatchers(req.Query, true) + if err != nil { + return nil, err + } + } + g.Go(func() error { + var err error + start := model.TimeFromUnixNano(req.Start.UnixNano()) + end := model.TimeFromUnixNano(req.End.UnixNano()) + storeLabels, err := q.store.LabelNamesForMetricName(ctx, userID, start, end, "logs") + for _, label := range storeLabels { + values, err := q.store.LabelValuesForMetricName(ctx, userID, start, end, "logs", label, matchers...) + if err != nil { + return err + } + storeLabelsMap[label] = values + } + return err + }) + + if err := g.Wait(); err != nil { + return nil, err + } + + if len(storeLabelsMap) == 0 { + return &logproto.DetectedLabelsResponse{ + DetectedLabels: []*logproto.DetectedLabel{}, + }, nil + } + + return &logproto.DetectedLabelsResponse{ + DetectedLabels: countLabelsAndCardinality(storeLabelsMap, staticLabels), + }, nil +} + +func countLabelsAndCardinality(storeLabelsMap map[string][]string, staticLabels map[string]struct{}) []*logproto.DetectedLabel { + dlMap := make(map[string]*parsedFields) + + for label, values := range storeLabelsMap { + if _, isStatic := staticLabels[label]; isStatic || !containsAllIDTypes(values) { + _, ok := dlMap[label] + if !ok { + dlMap[label] = newParsedLabels() + } + + parsedFields := dlMap[label] + for _, v := range values { + parsedFields.Insert(v) + } + } + } + + var detectedLabels []*logproto.DetectedLabel + for k, v := range dlMap { + sketch, err := v.sketch.MarshalBinary() + if err != nil { + // TODO: add log here + continue + } + detectedLabels = append(detectedLabels, &logproto.DetectedLabel{ + Label: k, + Cardinality: v.Estimate(), + Sketch: sketch, + }) + } + return detectedLabels +} + +// containsAllIDTypes filters out all UUID, GUID and numeric types. Returns false if even one value is not of the type +func containsAllIDTypes(values []string) bool { + for _, v := range values { + _, err := strconv.ParseFloat(v, 64) + if err != nil { + _, err = uuid.Parse(v) + if err != nil { + return false + } + } + } + + return true +} + +func (q *Rf1Querier) DetectedFields(ctx context.Context, req *logproto.DetectedFieldsRequest) (*logproto.DetectedFieldsResponse, error) { + expr, err := syntax.ParseLogSelector(req.Query, true) + if err != nil { + return nil, err + } + params := logql.SelectLogParams{ + QueryRequest: &logproto.QueryRequest{ + Start: req.Start, + End: req.End, + Limit: req.LineLimit, + Direction: logproto.BACKWARD, + Selector: expr.String(), + Plan: &plan.QueryPlan{ + AST: expr, + }, + }, + } + + iters, err := q.SelectLogs(ctx, params) + if err != nil { + return nil, err + } + + // TODO(twhitney): converting from a step to a duration should be abstracted and reused, + // doing this in a few places now. + streams, err := streamsForFieldDetection(iters, req.LineLimit) + if err != nil { + return nil, err + } + + detectedFields := parseDetectedFields(ctx, req.FieldLimit, streams) + + fields := make([]*logproto.DetectedField, len(detectedFields)) + fieldCount := 0 + for k, v := range detectedFields { + sketch, err := v.sketch.MarshalBinary() + if err != nil { + level.Warn(q.logger).Log("msg", "failed to marshal hyperloglog sketch", "err", err) + continue + } + + fields[fieldCount] = &logproto.DetectedField{ + Label: k, + Type: v.fieldType, + Cardinality: v.Estimate(), + Sketch: sketch, + Parsers: v.parsers, + } + + fieldCount++ + } + + return &logproto.DetectedFieldsResponse{ + Fields: fields, + FieldLimit: req.GetFieldLimit(), + }, nil +} + +type parsedFields struct { + sketch *hyperloglog.Sketch + fieldType logproto.DetectedFieldType + parsers []string +} + +func newParsedFields(parser *string) *parsedFields { + p := "" + if parser != nil { + p = *parser + } + return &parsedFields{ + sketch: hyperloglog.New(), + fieldType: logproto.DetectedFieldString, + parsers: []string{p}, + } +} + +func newParsedLabels() *parsedFields { + return &parsedFields{ + sketch: hyperloglog.New(), + fieldType: logproto.DetectedFieldString, + } +} + +func (p *parsedFields) Insert(value string) { + p.sketch.Insert([]byte(value)) +} + +func (p *parsedFields) Estimate() uint64 { + return p.sketch.Estimate() +} + +func (p *parsedFields) Marshal() ([]byte, error) { + return p.sketch.MarshalBinary() +} + +func (p *parsedFields) DetermineType(value string) { + p.fieldType = determineType(value) +} + +func determineType(value string) logproto.DetectedFieldType { + if _, err := strconv.ParseInt(value, 10, 64); err == nil { + return logproto.DetectedFieldInt + } + + if _, err := strconv.ParseFloat(value, 64); err == nil { + return logproto.DetectedFieldFloat + } + + if _, err := strconv.ParseBool(value); err == nil { + return logproto.DetectedFieldBoolean + } + + if _, err := time.ParseDuration(value); err == nil { + return logproto.DetectedFieldDuration + } + + if _, err := humanize.ParseBytes(value); err == nil { + return logproto.DetectedFieldBytes + } + + return logproto.DetectedFieldString +} + +func parseDetectedFields(ctx context.Context, limit uint32, streams logqlmodel.Streams) map[string]*parsedFields { + detectedFields := make(map[string]*parsedFields, limit) + fieldCount := uint32(0) + + for _, stream := range streams { + level.Debug(spanlogger.FromContext(ctx)).Log( + "detected_fields", "true", + "msg", fmt.Sprintf("looking for detected fields in stream %d with %d lines", stream.Hash, len(stream.Entries))) + + for _, entry := range stream.Entries { + detected, parser := parseLine(entry.Line) + for k, vals := range detected { + df, ok := detectedFields[k] + if !ok && fieldCount < limit { + df = newParsedFields(parser) + detectedFields[k] = df + fieldCount++ + } + + if df == nil { + continue + } + + if !slices.Contains(df.parsers, *parser) { + df.parsers = append(df.parsers, *parser) + } + + detectType := true + for _, v := range vals { + parsedFields := detectedFields[k] + if detectType { + // we don't want to determine the type for every line, so we assume the type in each stream will be the same, and re-detect the type for the next stream + parsedFields.DetermineType(v) + detectType = false + } + + parsedFields.Insert(v) + } + + level.Debug(spanlogger.FromContext(ctx)).Log( + "detected_fields", "true", + "msg", fmt.Sprintf("detected field %s with %d values", k, len(vals))) + } + } + } + + return detectedFields +} + +func parseLine(line string) (map[string][]string, *string) { + parser := "logfmt" + logFmtParser := logql_log.NewLogfmtParser(true, false) + + lbls := logql_log.NewBaseLabelsBuilder().ForLabels(labels.EmptyLabels(), 0) + _, logfmtSuccess := logFmtParser.Process(0, []byte(line), lbls) + if !logfmtSuccess || lbls.HasErr() { + parser = "json" + jsonParser := logql_log.NewJSONParser() + lbls.Reset() + _, jsonSuccess := jsonParser.Process(0, []byte(line), lbls) + if !jsonSuccess || lbls.HasErr() { + return map[string][]string{}, nil + } + } + + parsedLabels := map[string]map[string]struct{}{} + for _, lbl := range lbls.LabelsResult().Labels() { + if values, ok := parsedLabels[lbl.Name]; ok { + values[lbl.Value] = struct{}{} + } else { + parsedLabels[lbl.Name] = map[string]struct{}{lbl.Value: {}} + } + } + + result := make(map[string][]string, len(parsedLabels)) + for lbl, values := range parsedLabels { + vals := make([]string, 0, len(values)) + for v := range values { + vals = append(vals, v) + } + result[lbl] = vals + } + + return result, &parser +} + +// streamsForFieldDetection reads the streams from the iterator and returns them sorted. +// If categorizeLabels is true, the stream labels contains just the stream labels and entries inside each stream have their +// structuredMetadata and parsed fields populated with structured metadata labels plus the parsed labels respectively. +// Otherwise, the stream labels are the whole series labels including the stream labels, structured metadata labels and parsed labels. +func streamsForFieldDetection(i iter.EntryIterator, size uint32) (logqlmodel.Streams, error) { + streams := map[string]*logproto.Stream{} + respSize := uint32(0) + // lastEntry should be a really old time so that the first comparison is always true, we use a negative + // value here because many unit tests start at time.Unix(0,0) + lastEntry := time.Unix(-100, 0) + for respSize < size && i.Next() { + streamLabels, entry := i.Labels(), i.At() + + // Always going backward as the direction for field detection is hard-coded to BACKWARD + shouldOutput := entry.Timestamp.Equal(lastEntry) || entry.Timestamp.Before(lastEntry) + + // If lastEntry.Unix < 0 this is the first pass through the loop and we should output the line. + // Then check to see if the entry is equal to, or past a forward step + if lastEntry.Unix() < 0 || shouldOutput { + stream, ok := streams[streamLabels] + if !ok { + stream = &logproto.Stream{ + Labels: streamLabels, + } + streams[streamLabels] = stream + } + stream.Entries = append(stream.Entries, entry) + lastEntry = i.At().Timestamp + respSize++ + } + } + + result := make(logqlmodel.Streams, 0, len(streams)) + for _, stream := range streams { + result = append(result, *stream) + } + sort.Sort(result) + return result, i.Err() +} + +type PatterQuerier interface { + Patterns(ctx context.Context, req *logproto.QueryPatternsRequest) (*logproto.QueryPatternsResponse, error) + Samples(ctx context.Context, req *logproto.QuerySamplesRequest) (*logproto.QuerySamplesResponse, error) +} + +func (q *Rf1Querier) WithPatternQuerier(pq querier.PatterQuerier) { + q.patternQuerier = pq +} + +func (q *Rf1Querier) Patterns(ctx context.Context, req *logproto.QueryPatternsRequest) (*logproto.QueryPatternsResponse, error) { + if q.patternQuerier == nil { + return nil, httpgrpc.Errorf(http.StatusNotFound, "") + } + res, err := q.patternQuerier.Patterns(ctx, req) + if err != nil { + return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } + + return res, err +} + +func (q *Rf1Querier) SelectMetricSamples(_ context.Context, _ *logproto.QuerySamplesRequest) (*logproto.QuerySamplesResponse, error) { + return nil, httpgrpc.Errorf(http.StatusBadRequest, "not implmented") +} diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 0ce4982fb3766..961c67345f61c 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -3,7 +3,6 @@ package querier import ( "context" "flag" - "fmt" "net/http" "sort" "strconv" @@ -43,6 +42,8 @@ import ( listutil "github.com/grafana/loki/v3/pkg/util" "github.com/grafana/loki/v3/pkg/util/spanlogger" util_validation "github.com/grafana/loki/v3/pkg/util/validation" + + "github.com/grafana/loki/pkg/push" ) const ( @@ -106,6 +107,7 @@ type Querier interface { Patterns(ctx context.Context, req *logproto.QueryPatternsRequest) (*logproto.QueryPatternsResponse, error) DetectedLabels(ctx context.Context, req *logproto.DetectedLabelsRequest) (*logproto.DetectedLabelsResponse, error) SelectMetricSamples(ctx context.Context, req *logproto.QuerySamplesRequest) (*logproto.QuerySamplesResponse, error) + WithPatternQuerier(patternQuerier PatterQuerier) } type Limits querier_limits.Limits @@ -1116,7 +1118,7 @@ func (q *SingleTenantQuerier) DetectedFields(ctx context.Context, req *logproto. return nil, err } - detectedFields := parseDetectedFields(ctx, req.FieldLimit, streams) + detectedFields := parseDetectedFields(req.FieldLimit, streams) fields := make([]*logproto.DetectedField, len(detectedFields)) fieldCount := 0 @@ -1209,16 +1211,39 @@ func determineType(value string) logproto.DetectedFieldType { return logproto.DetectedFieldString } -func parseDetectedFields(ctx context.Context, limit uint32, streams logqlmodel.Streams) map[string]*parsedFields { +func parseDetectedFields(limit uint32, streams logqlmodel.Streams) map[string]*parsedFields { detectedFields := make(map[string]*parsedFields, limit) fieldCount := uint32(0) + emtpyparser := "" for _, stream := range streams { - level.Debug(spanlogger.FromContext(ctx)).Log( - "detected_fields", "true", - "msg", fmt.Sprintf("looking for detected fields in stream %d with %d lines", stream.Hash, len(stream.Entries))) - for _, entry := range stream.Entries { + structuredMetadata := getStructuredMetadata(entry) + for k, vals := range structuredMetadata { + df, ok := detectedFields[k] + if !ok && fieldCount < limit { + df = newParsedFields(&emtpyparser) + detectedFields[k] = df + fieldCount++ + } + + if df == nil { + continue + } + + detectType := true + for _, v := range vals { + parsedFields := detectedFields[k] + if detectType { + // we don't want to determine the type for every line, so we assume the type in each stream will be the same, and re-detect the type for the next stream + parsedFields.DetermineType(v) + detectType = false + } + + parsedFields.Insert(v) + } + } + detected, parser := parseLine(entry.Line) for k, vals := range detected { df, ok := detectedFields[k] @@ -1247,10 +1272,6 @@ func parseDetectedFields(ctx context.Context, limit uint32, streams logqlmodel.S parsedFields.Insert(v) } - - level.Debug(spanlogger.FromContext(ctx)).Log( - "detected_fields", "true", - "msg", fmt.Sprintf("detected field %s with %d values", k, len(vals))) } } } @@ -1258,6 +1279,28 @@ func parseDetectedFields(ctx context.Context, limit uint32, streams logqlmodel.S return detectedFields } +func getStructuredMetadata(entry push.Entry) map[string][]string { + labels := map[string]map[string]struct{}{} + for _, lbl := range entry.StructuredMetadata { + if values, ok := labels[lbl.Name]; ok { + values[lbl.Value] = struct{}{} + } else { + labels[lbl.Name] = map[string]struct{}{lbl.Value: {}} + } + } + + result := make(map[string][]string, len(labels)) + for lbl, values := range labels { + vals := make([]string, 0, len(values)) + for v := range values { + vals = append(vals, v) + } + result[lbl] = vals + } + + return result +} + func parseLine(line string) (map[string][]string, *string) { parser := "logfmt" logFmtParser := logql_log.NewLogfmtParser(true, false) diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index 07ff0d7cf1bb4..c016855dd4d96 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -737,6 +737,8 @@ func (q *querierMock) SelectMetricSamples( return resp.(*logproto.QuerySamplesResponse), err } +func (q *querierMock) WithPatternQuerier(_ PatterQuerier) {} + type engineMock struct { util.ExtendedMock } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 450e2eb0d67f7..9e98a780561f3 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -1759,6 +1759,52 @@ func TestQuerier_DetectedFields(t *testing.T) { } }) + t.Run("returns detected fields with structured metadata from queried logs", func(t *testing.T) { + store := newStoreMock() + store.On("SelectLogs", mock.Anything, mock.Anything). + Return(mockLogfmtStreamIterator(1, 5), nil) + + queryClient := newQueryClientMock() + queryClient.On("Recv"). + Return(mockQueryResponse([]logproto.Stream{mockLogfmtStreamWithStructuredMetadata(1, 5)}), nil) + + ingesterClient := newQuerierClientMock() + ingesterClient.On("Query", mock.Anything, mock.Anything, mock.Anything). + Return(queryClient, nil) + + querier, err := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + store, limits) + require.NoError(t, err) + + resp, err := querier.DetectedFields(ctx, &request) + require.NoError(t, err) + + detectedFields := resp.Fields + // log lines come from querier_mock_test.go + // message="line %d" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t + assert.Len(t, detectedFields, 9) + expectedCardinality := map[string]uint64{ + "variable": 5, + "constant": 1, + "message": 5, + "count": 5, + "fake": 1, + "bytes": 5, + "duration": 5, + "percent": 5, + "even": 2, + } + for _, d := range detectedFields { + card := expectedCardinality[d.Label] + assert.Equal(t, card, d.Cardinality, "Expected cardinality mismatch for: %s", d.Label) + } + }) + t.Run("correctly identifies different field types", func(t *testing.T) { store := newStoreMock() store.On("SelectLogs", mock.Anything, mock.Anything). @@ -1814,6 +1860,68 @@ func TestQuerier_DetectedFields(t *testing.T) { assert.Equal(t, logproto.DetectedFieldFloat, floatField.Type) assert.Equal(t, logproto.DetectedFieldBoolean, evenField.Type) }) + + t.Run("correctly identifies parser to use with logfmt and structured metadata", func(t *testing.T) { + store := newStoreMock() + store.On("SelectLogs", mock.Anything, mock.Anything). + Return(mockLogfmtStreamIterator(1, 2), nil) + + queryClient := newQueryClientMock() + queryClient.On("Recv"). + Return(mockQueryResponse([]logproto.Stream{mockLogfmtStreamWithStructuredMetadata(1, 2)}), nil) + + ingesterClient := newQuerierClientMock() + ingesterClient.On("Query", mock.Anything, mock.Anything, mock.Anything). + Return(queryClient, nil) + + querier, err := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + store, limits) + require.NoError(t, err) + + resp, err := querier.DetectedFields(ctx, &request) + require.NoError(t, err) + + detectedFields := resp.Fields + // log lines come from querier_mock_test.go + // message="line %d" count=%d fake=true bytes=%dMB duration=%dms percent=%f even=%t + assert.Len(t, detectedFields, 9) + + var messageField, countField, bytesField, durationField, floatField, evenField, constantField, variableField *logproto.DetectedField + for _, field := range detectedFields { + switch field.Label { + case "message": + messageField = field + case "count": + countField = field + case "bytes": + bytesField = field + case "duration": + durationField = field + case "percent": + floatField = field + case "even": + evenField = field + case "constant": + constantField = field + case "variable": + variableField = field + } + } + + assert.Equal(t, []string{"logfmt"}, messageField.Parsers) + assert.Equal(t, []string{"logfmt"}, countField.Parsers) + assert.Equal(t, []string{"logfmt"}, bytesField.Parsers) + assert.Equal(t, []string{"logfmt"}, durationField.Parsers) + assert.Equal(t, []string{"logfmt"}, floatField.Parsers) + assert.Equal(t, []string{"logfmt"}, evenField.Parsers) + assert.Equal(t, []string{""}, constantField.Parsers) + assert.Equal(t, []string{""}, variableField.Parsers) + }) } func BenchmarkQuerierDetectedFields(b *testing.B) { diff --git a/pkg/querier/queryrange/queryrangebase/retry.go b/pkg/querier/queryrange/queryrangebase/retry.go index a3f2693b1ba75..691d20fa5b5ec 100644 --- a/pkg/querier/queryrange/queryrangebase/retry.go +++ b/pkg/querier/queryrange/queryrangebase/retry.go @@ -2,6 +2,7 @@ package queryrangebase import ( "context" + "reflect" "time" "github.com/go-kit/log" @@ -10,7 +11,6 @@ import ( "github.com/grafana/dskit/grpcutil" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "google.golang.org/grpc/codes" "github.com/grafana/loki/v3/pkg/util" util_log "github.com/grafana/loki/v3/pkg/util/log" @@ -81,20 +81,33 @@ func (r retry) Do(ctx context.Context, req Request) (Response, error) { query := req.GetQuery() for ; tries < r.maxRetries; tries++ { + // Make sure the context isn't done before sending the request if ctx.Err() != nil { return nil, ctx.Err() } + resp, err := r.next.Do(ctx, req) if err == nil { return resp, nil } - // Retry if we get a HTTP 500 or an unknown error. - if code := grpcutil.ErrorToStatusCode(err); code == codes.Unknown || code/100 == 5 { + // Make sure the context isn't done before retrying the request + if ctx.Err() != nil { + return nil, ctx.Err() + } + + code := grpcutil.ErrorToStatusCode(err) + // Error handling is tricky... There are many places we wrap any error and set an HTTP style status code + // but there are also places where we return an existing GRPC object which will use GRPC status codes + // If the code is < 100 it's a gRPC status code, currently we retry all of these, even codes.Canceled + // because when our pools close connections they do so with a cancel and we want to retry these + // If it's > 100, it's an HTTP code and we only retry 5xx + if code < 100 || code/100 == 5 { lastErr = err level.Error(util_log.WithContext(ctx, r.log)).Log( "msg", "error processing request", "try", tries, + "type", logImplementingType(req), "query", query, "query_hash", util.HashedQuery(query), "start", start.Format(time.RFC3339Nano), @@ -103,13 +116,31 @@ func (r retry) Do(ctx context.Context, req Request) (Response, error) { "end_delta", time.Since(end), "length", end.Sub(start), "retry_in", bk.NextDelay(), + "code", code, "err", err, ) bk.Wait() continue + } else { + level.Warn(util_log.WithContext(ctx, r.log)).Log("msg", "received an error but not a retryable code, this is possibly a bug.", "code", code, "err", err) } return nil, err } return nil, lastErr } + +func logImplementingType(i Request) string { + if i == nil { + return "nil" + } + + t := reflect.TypeOf(i) + + // Check if it's a pointer and get the underlying type if so + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + return t.String() +} diff --git a/pkg/querier/queryrange/querysharding.go b/pkg/querier/queryrange/querysharding.go index 07da7abfb6518..bd5c26079636b 100644 --- a/pkg/querier/queryrange/querysharding.go +++ b/pkg/querier/queryrange/querysharding.go @@ -42,6 +42,7 @@ func NewQueryShardMiddleware( limits Limits, maxShards int, statsHandler queryrangebase.Handler, + retryNextHandler queryrangebase.Handler, shardAggregation []string, ) queryrangebase.Middleware { noshards := !hasShards(confs) @@ -56,7 +57,7 @@ func NewQueryShardMiddleware( } mapperware := queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler { - return newASTMapperware(confs, engineOpts, next, statsHandler, logger, shardingMetrics, limits, maxShards, shardAggregation) + return newASTMapperware(confs, engineOpts, next, retryNextHandler, statsHandler, logger, shardingMetrics, limits, maxShards, shardAggregation) }) return queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler { @@ -76,6 +77,7 @@ func newASTMapperware( confs ShardingConfigs, engineOpts logql.EngineOpts, next queryrangebase.Handler, + retryNextHandler queryrangebase.Handler, statsHandler queryrangebase.Handler, logger log.Logger, metrics *logql.MapperMetrics, @@ -88,6 +90,7 @@ func newASTMapperware( logger: log.With(logger, "middleware", "QueryShard.astMapperware"), limits: limits, next: next, + retryNextHandler: retryNextHandler, statsHandler: next, ng: logql.NewDownstreamEngine(engineOpts, DownstreamHandler{next: next, limits: limits}, limits, logger), metrics: metrics, @@ -103,14 +106,15 @@ func newASTMapperware( } type astMapperware struct { - confs ShardingConfigs - logger log.Logger - limits Limits - next queryrangebase.Handler - statsHandler queryrangebase.Handler - ng *logql.DownstreamEngine - metrics *logql.MapperMetrics - maxShards int + confs ShardingConfigs + logger log.Logger + limits Limits + next queryrangebase.Handler + retryNextHandler queryrangebase.Handler + statsHandler queryrangebase.Handler + ng *logql.DownstreamEngine + metrics *logql.MapperMetrics + maxShards int // Feature flag for sharding range and vector aggregations such as // quantile_ver_time with probabilistic data structures. @@ -191,6 +195,7 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que ast.maxShards, r, ast.statsHandler, + ast.retryNextHandler, ast.next, ast.limits, ) diff --git a/pkg/querier/queryrange/querysharding_test.go b/pkg/querier/queryrange/querysharding_test.go index b17b38d6932aa..809013ffb0021 100644 --- a/pkg/querier/queryrange/querysharding_test.go +++ b/pkg/querier/queryrange/querysharding_test.go @@ -168,6 +168,7 @@ func Test_astMapper(t *testing.T) { }, testEngineOpts, handler, + handler, nil, log.NewNopLogger(), nilShardingMetrics, @@ -307,6 +308,7 @@ func Test_astMapper_QuerySizeLimits(t *testing.T) { }, testEngineOpts, handler, + handler, nil, log.NewNopLogger(), nilShardingMetrics, @@ -352,6 +354,7 @@ func Test_ShardingByPass(t *testing.T) { }, testEngineOpts, handler, + handler, nil, log.NewNopLogger(), nilShardingMetrics, @@ -439,6 +442,7 @@ func Test_InstantSharding(t *testing.T) { }, 0, nil, + nil, []string{}, ) response, err := sharding.Wrap(queryrangebase.HandlerFunc(func(c context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { @@ -718,6 +722,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { confs, testEngineOpts, handler, + handler, nil, log.NewNopLogger(), nilShardingMetrics, @@ -853,6 +858,7 @@ func Test_ASTMapper_MaxLookBackPeriod(t *testing.T) { testSchemasTSDB, engineOpts, queryHandler, + queryHandler, statsHandler, log.NewNopLogger(), nilShardingMetrics, diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 44aa0b0f4a0b5..6d71bf587d5ec 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -206,7 +206,7 @@ func NewMiddleware( return nil, nil, err } - limitedTripperware, err := NewLimitedTripperware(cfg, log, limits, schema, metrics, codec, iqo, metricsNamespace) + limitedTripperware, err := NewLimitedTripperware(cfg, engineOpts, log, limits, schema, metrics, codec, iqo, indexStatsTripperware, metricsNamespace) if err != nil { return nil, nil, err } @@ -253,12 +253,10 @@ func NewMiddleware( detectedLabelsTripperware, err := NewDetectedLabelsTripperware( cfg, - engineOpts, log, limits, schema, metrics, - indexStatsTripperware, metricsNamespace, codec, limits, iqo) if err != nil { @@ -282,25 +280,17 @@ func NewMiddleware( }), StopperWrapper{resultsCache, statsCache, volumeCache}, nil } -func NewDetectedLabelsTripperware(cfg Config, opts logql.EngineOpts, logger log.Logger, l Limits, schema config.SchemaConfig, metrics *Metrics, mw base.Middleware, namespace string, merger base.Merger, limits Limits, iqo util.IngesterQueryOptions) (base.Middleware, error) { +func NewDetectedLabelsTripperware(cfg Config, logger log.Logger, l Limits, schema config.SchemaConfig, metrics *Metrics, namespace string, merger base.Merger, limits Limits, iqo util.IngesterQueryOptions) (base.Middleware, error) { return base.MiddlewareFunc(func(next base.Handler) base.Handler { - statsHandler := mw.Wrap(next) splitter := newDefaultSplitter(limits, iqo) queryRangeMiddleware := []base.Middleware{ StatsCollectorMiddleware(), NewLimitsMiddleware(l), - NewQuerySizeLimiterMiddleware(schema.Configs, opts, logger, l, statsHandler), base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), SplitByIntervalMiddleware(schema.Configs, limits, merger, splitter, metrics.SplitByMetrics), } - // The sharding middleware takes care of enforcing this limit for both shardable and non-shardable queries. - // If we are not using sharding, we enforce the limit by adding this middleware after time splitting. - queryRangeMiddleware = append(queryRangeMiddleware, - NewQuerierSizeLimiterMiddleware(schema.Configs, opts, logger, l, statsHandler), - ) - if cfg.MaxRetries > 0 { queryRangeMiddleware = append( queryRangeMiddleware, base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), @@ -553,6 +543,12 @@ func getOperation(path string) string { func NewLogFilterTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Logger, limits Limits, schema config.SchemaConfig, merger base.Merger, iqo util.IngesterQueryOptions, c cache.Cache, metrics *Metrics, indexStatsTripperware base.Middleware, metricsNamespace string) (base.Middleware, error) { return base.MiddlewareFunc(func(next base.Handler) base.Handler { statsHandler := indexStatsTripperware.Wrap(next) + retryNextHandler := next + if cfg.MaxRetries > 0 { + tr := base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics) + rm := base.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics, metricsNamespace) + retryNextHandler = queryrangebase.MergeMiddlewares(tr, rm).Wrap(next) + } queryRangeMiddleware := []base.Middleware{ QueryMetricsMiddleware(metrics.QueryMetrics), @@ -592,6 +588,7 @@ func NewLogFilterTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Lo limits, 0, // 0 is unlimited shards statsHandler, + retryNextHandler, cfg.ShardAggregations, ), ) @@ -615,14 +612,42 @@ func NewLogFilterTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Lo } // NewLimitedTripperware creates a new frontend tripperware responsible for handling log requests which are label matcher only, no filter expression. -func NewLimitedTripperware(cfg Config, log log.Logger, limits Limits, schema config.SchemaConfig, metrics *Metrics, merger base.Merger, iqo util.IngesterQueryOptions, metricsNamespace string) (base.Middleware, error) { +func NewLimitedTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Logger, limits Limits, schema config.SchemaConfig, metrics *Metrics, merger base.Merger, iqo util.IngesterQueryOptions, indexStatsTripperware base.Middleware, metricsNamespace string) (base.Middleware, error) { return base.MiddlewareFunc(func(next base.Handler) base.Handler { + statsHandler := indexStatsTripperware.Wrap(next) + retryNextHandler := next + if cfg.MaxRetries > 0 { + tr := base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics) + rm := base.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics, metricsNamespace) + retryNextHandler = queryrangebase.MergeMiddlewares(tr, rm).Wrap(next) + } + queryRangeMiddleware := []base.Middleware{ StatsCollectorMiddleware(), NewLimitsMiddleware(limits), base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), SplitByIntervalMiddleware(schema.Configs, WithMaxParallelism(limits, limitedQuerySplits), merger, newDefaultSplitter(limits, iqo), metrics.SplitByMetrics), } + + if cfg.ShardedQueries { + queryRangeMiddleware = append(queryRangeMiddleware, + NewQueryShardMiddleware( + log, + schema.Configs, + engineOpts, + metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware + metrics.MiddlewareMapperMetrics.shardMapper, + limits, + // Too many shards on limited queries results in slowing down this type of query + // and overwhelming the frontend, therefore we fix the number of shards to prevent this. + 32, // 0 is unlimited shards + statsHandler, + retryNextHandler, + cfg.ShardAggregations, + ), + ) + } + if cfg.MaxRetries > 0 { queryRangeMiddleware = append( queryRangeMiddleware, base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), @@ -833,6 +858,12 @@ func NewMetricTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Logge return base.MiddlewareFunc(func(next base.Handler) base.Handler { statsHandler := indexStatsTripperware.Wrap(next) + retryNextHandler := next + if cfg.MaxRetries > 0 { + tr := base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics) + rm := base.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics, metricsNamespace) + retryNextHandler = queryrangebase.MergeMiddlewares(tr, rm).Wrap(next) + } queryRangeMiddleware := []base.Middleware{ QueryMetricsMiddleware(metrics.QueryMetrics), @@ -874,6 +905,7 @@ func NewMetricTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Logge limits, 0, // 0 is unlimited shards statsHandler, + retryNextHandler, cfg.ShardAggregations, ), ) @@ -955,6 +987,12 @@ func NewInstantMetricTripperware( return base.MiddlewareFunc(func(next base.Handler) base.Handler { statsHandler := indexStatsTripperware.Wrap(next) + retryNextHandler := next + if cfg.MaxRetries > 0 { + tr := base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics) + rm := base.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics, metricsNamespace) + retryNextHandler = queryrangebase.MergeMiddlewares(tr, rm).Wrap(next) + } queryRangeMiddleware := []base.Middleware{ StatsCollectorMiddleware(), @@ -982,6 +1020,7 @@ func NewInstantMetricTripperware( limits, 0, // 0 is unlimited shards statsHandler, + retryNextHandler, cfg.ShardAggregations, ), ) diff --git a/pkg/querier/queryrange/shard_resolver.go b/pkg/querier/queryrange/shard_resolver.go index 33438c3717817..31366d0a0dd76 100644 --- a/pkg/querier/queryrange/shard_resolver.go +++ b/pkg/querier/queryrange/shard_resolver.go @@ -37,21 +37,22 @@ func shardResolverForConf( maxParallelism int, maxShards int, r queryrangebase.Request, - statsHandler, next queryrangebase.Handler, + statsHandler, next, retryNext queryrangebase.Handler, limits Limits, ) (logql.ShardResolver, bool) { if conf.IndexType == types.TSDBType { return &dynamicShardResolver{ - ctx: ctx, - logger: logger, - statsHandler: statsHandler, - next: next, - limits: limits, - from: model.Time(r.GetStart().UnixMilli()), - through: model.Time(r.GetEnd().UnixMilli()), - maxParallelism: maxParallelism, - maxShards: maxShards, - defaultLookback: defaultLookback, + ctx: ctx, + logger: logger, + statsHandler: statsHandler, + retryNextHandler: retryNext, + next: next, + limits: limits, + from: model.Time(r.GetStart().UnixMilli()), + through: model.Time(r.GetEnd().UnixMilli()), + maxParallelism: maxParallelism, + maxShards: maxShards, + defaultLookback: defaultLookback, }, true } if conf.RowShards < 2 { @@ -64,10 +65,11 @@ type dynamicShardResolver struct { ctx context.Context // TODO(owen-d): shouldn't have to fork handlers here -- one should just transparently handle the right logic // depending on the underlying type? - statsHandler queryrangebase.Handler // index stats handler (hooked up to results cache, etc) - next queryrangebase.Handler // next handler in the chain (used for non-stats reqs) - logger log.Logger - limits Limits + statsHandler queryrangebase.Handler // index stats handler (hooked up to results cache, etc) + retryNextHandler queryrangebase.Handler // next handler wrapped with retries + next queryrangebase.Handler // next handler in the chain (used for non-stats reqs) + logger log.Logger + limits Limits from, through model.Time maxParallelism int @@ -251,7 +253,8 @@ func (r *dynamicShardResolver) ShardingRanges(expr syntax.Expr, targetBytesPerSh exprStr := expr.String() // try to get shards for the given expression // if it fails, fallback to linearshards based on stats - resp, err := r.next.Do(r.ctx, &logproto.ShardsRequest{ + // use the retry handler here to retry transient errors + resp, err := r.retryNextHandler.Do(r.ctx, &logproto.ShardsRequest{ From: adjustedFrom, Through: r.through, Query: expr.String(), diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go index 274f4c37f25dc..e5a71d0aedd4d 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer.go @@ -7,12 +7,13 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/grafana/loki/pkg/push" "github.com/grafana/loki/v3/pkg/iter" v2iter "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter" "github.com/grafana/loki/v3/pkg/util/encoding" + + "github.com/grafana/loki/pkg/push" ) /* diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go index 8f3e4f473e930..29deada6ab82c 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer_test.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go @@ -11,13 +11,14 @@ import ( logger "github.com/go-kit/log" "github.com/grafana/dskit/multierror" - "github.com/grafana/loki/pkg/push" "github.com/grafana/loki/v3/pkg/chunkenc" "github.com/grafana/loki/v3/pkg/iter" v2 "github.com/grafana/loki/v3/pkg/iter/v2" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql/log" + "github.com/grafana/loki/pkg/push" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" diff --git a/pkg/storage/stores/shipper/bloomshipper/blockscache.go b/pkg/storage/stores/shipper/bloomshipper/blockscache.go index b26a4ed5cbda5..a7992af267c54 100644 --- a/pkg/storage/stores/shipper/bloomshipper/blockscache.go +++ b/pkg/storage/stores/shipper/bloomshipper/blockscache.go @@ -272,6 +272,10 @@ func (c *BlocksCache) put(key string, value BlockDirectory) (*Entry, error) { func (c *BlocksCache) evict(key string, element *list.Element, reason string) { entry := element.Value.(*Entry) + if key != entry.Key { + level.Error(c.logger).Log("msg", "failed to remove entry: entry key and map key do not match", "map_key", key, "entry_key", entry.Key) + return + } err := c.remove(entry) if err != nil { level.Error(c.logger).Log("msg", "failed to remove entry from disk", "err", err) @@ -400,6 +404,7 @@ func (c *BlocksCache) evictLeastRecentlyUsedItems() { ) elem := c.lru.Back() for c.currSizeBytes >= int64(c.cfg.SoftLimit) && elem != nil { + nextElem := elem.Prev() entry := elem.Value.(*Entry) if entry.refCount.Load() == 0 { level.Debug(c.logger).Log( @@ -408,7 +413,7 @@ func (c *BlocksCache) evictLeastRecentlyUsedItems() { ) c.evict(entry.Key, elem, reasonFull) } - elem = elem.Prev() + elem = nextElem } } diff --git a/pkg/storage/stores/shipper/bloomshipper/blockscache_test.go b/pkg/storage/stores/shipper/bloomshipper/blockscache_test.go index 1ddc465577fcf..d19833805ef47 100644 --- a/pkg/storage/stores/shipper/bloomshipper/blockscache_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/blockscache_test.go @@ -197,8 +197,8 @@ func TestBlocksCache_TTLEviction(t *testing.T) { func TestBlocksCache_LRUEviction(t *testing.T) { cfg := config.BlocksCacheConfig{ TTL: time.Hour, - SoftLimit: flagext.Bytes(15), - HardLimit: flagext.Bytes(20), + SoftLimit: flagext.Bytes(25), + HardLimit: flagext.Bytes(50), // no need for TTL evictions PurgeInterval: time.Minute, } @@ -216,27 +216,32 @@ func TestBlocksCache_LRUEviction(t *testing.T) { // oldest without refcount - will be evicted err = cache.Put(ctx, "c", CacheValue("c", 4)) require.NoError(t, err) + err = cache.Put(ctx, "d", CacheValue("d", 4)) + require.NoError(t, err) + err = cache.Put(ctx, "e", CacheValue("e", 4)) + require.NoError(t, err) + err = cache.Put(ctx, "f", CacheValue("f", 4)) + require.NoError(t, err) // increase ref counter on "b" _, found := cache.Get(ctx, "b") require.True(t, found) // exceed soft limit - err = cache.Put(ctx, "d", CacheValue("d", 4)) + err = cache.Put(ctx, "g", CacheValue("g", 16)) require.NoError(t, err) time.Sleep(time.Second) l, ok := cache.len() require.True(t, ok) - require.Equal(t, 3, l) - // key "b" was evicted because it was the oldest - // and it had no ref counts - _, found = cache.Get(ctx, "c") - require.False(t, found) - - require.Equal(t, int64(12), cache.currSizeBytes) + // expect 3 items in cache: + // * item a with size 4 + // * item b with size 4 + // * item g with size 16 + require.Equal(t, 3, l) + require.Equal(t, int64(24), cache.currSizeBytes) } func TestBlocksCache_RefCounter(t *testing.T) { diff --git a/pkg/storage/stores/shipper/bloomshipper/cache.go b/pkg/storage/stores/shipper/bloomshipper/cache.go index bf533dac25b4b..203d15684502e 100644 --- a/pkg/storage/stores/shipper/bloomshipper/cache.go +++ b/pkg/storage/stores/shipper/bloomshipper/cache.go @@ -100,9 +100,12 @@ func loadBlockDirectories(root string, logger log.Logger) (keys []string, values } if ok, clean := isBlockDir(path, logger); ok { - keys = append(keys, resolver.Block(ref).Addr()) + // the cache key must not contain the directory prefix + // therefore we use the defaultKeyResolver to resolve the block's address + key := defaultKeyResolver{}.Block(ref).Addr() + keys = append(keys, key) values = append(values, NewBlockDirectory(ref, path)) - level.Debug(logger).Log("msg", "found block directory", "ref", ref, "path", path) + level.Debug(logger).Log("msg", "found block directory", "path", path, "key", key) } else { level.Warn(logger).Log("msg", "skip directory entry", "err", "not a block directory containing blooms and series", "path", path) _ = clean(path) diff --git a/pkg/storage/stores/shipper/bloomshipper/cache_test.go b/pkg/storage/stores/shipper/bloomshipper/cache_test.go index 2ce48d5022ed2..941b7fa29e99a 100644 --- a/pkg/storage/stores/shipper/bloomshipper/cache_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/cache_test.go @@ -99,7 +99,7 @@ func Test_LoadBlocksDirIntoCache(t *testing.T) { require.Equal(t, 1, len(c.entries)) - key := filepath.Join(wd, validDir) + ".tar.gz" + key := validDir + ".tar.gz" // cache key must not contain directory prefix elem, found := c.entries[key] require.True(t, found) blockDir := elem.Value.(*Entry).Value diff --git a/pkg/storage/wal/manager.go b/pkg/storage/wal/manager.go index d5d8fff893f03..fd873ca71df03 100644 --- a/pkg/storage/wal/manager.go +++ b/pkg/storage/wal/manager.go @@ -102,38 +102,43 @@ type Manager struct { cfg Config metrics *ManagerMetrics available *list.List - - // pending is a list of segments that are waiting to be flushed. Once - // flushed, the segment is reset and moved to the back of the available - // list to accept writes again. - pending *list.List - - // firstAppend is the time of the first append to the segment at the - // front of the available list. It is used to know when the segment has - // exceeded the maximum age and should be moved to the pending list. - // It is reset each time this happens. - firstAppend time.Time - - closed bool - mu sync.Mutex - + pending *list.List + closed bool + mu sync.Mutex // Used in tests. clock quartz.Clock } -// item is similar to PendingItem, but it is an internal struct used in the -// available and pending lists. It contains a single-use result that is returned -// to callers appending to the WAL and a re-usable segment that is reset after -// each flush. -type item struct { +// segment is similar to PendingSegment, however it is an internal struct used +// in the available and pending lists. It contains a single-use result that is +// returned to callers appending to the WAL and a re-usable segment that is reset +// after each flush. +type segment struct { r *AppendResult w *SegmentWriter + + // firstAppend is the time of the first append to the segment. It is used to + // know when the segment has exceeded the maximum age and should be moved to + // the pending list. + firstAppend time.Time + + // moved is the time the segment was moved to the pending list. It is used + // to calculate the age of the segment. A segment is moved when it has + // exceeded the maximum age or the maximum size. + moved time.Time +} + +// PendingSegment contains a result and the segment to be flushed. +type PendingSegment struct { + Result *AppendResult + Writer *SegmentWriter + FirstAppend time.Time + Moved time.Time } -// PendingItem contains a result and the segment to be flushed. -type PendingItem struct { - Result *AppendResult - Writer *SegmentWriter +// Age returns the age of the segment. +func (p *PendingSegment) Age() time.Duration { + return p.Moved.Sub(p.FirstAppend) } func NewManager(cfg Config, metrics *Metrics) (*Manager, error) { @@ -152,7 +157,7 @@ func NewManager(cfg Config, metrics *Metrics) (*Manager, error) { if err != nil { return nil, err } - m.available.PushBack(&item{ + m.available.PushBack(&segment{ r: &AppendResult{done: make(chan struct{})}, w: w, }) @@ -171,29 +176,29 @@ func (m *Manager) Append(r AppendRequest) (*AppendResult, error) { if el == nil { return nil, ErrFull } - it := el.Value.(*item) - if m.firstAppend.IsZero() { + s := el.Value.(*segment) + if s.firstAppend.IsZero() { // This is the first append to the segment. This time will be used in // know when the segment has exceeded its maximum age and should be // moved to the pending list. - m.firstAppend = m.clock.Now() + s.firstAppend = m.clock.Now() } - it.w.Append(r.TenantID, r.LabelsStr, r.Labels, r.Entries) - // If the segment exceeded the maximum age or the maximum size, move it to + s.w.Append(r.TenantID, r.LabelsStr, r.Labels, r.Entries) + // If the segment exceeded the maximum age or the maximum size, move s to // the closed list to be flushed. - if m.clock.Since(m.firstAppend) >= m.cfg.MaxAge || it.w.InputSize() >= m.cfg.MaxSegmentSize { - m.move(el, it) + if m.clock.Since(s.firstAppend) >= m.cfg.MaxAge || s.w.InputSize() >= m.cfg.MaxSegmentSize { + m.move(el, s) } - return it.r, nil + return s.r, nil } func (m *Manager) Close() { m.mu.Lock() defer m.mu.Unlock() if el := m.available.Front(); el != nil { - it := el.Value.(*item) - if it.w.InputSize() > 0 { - m.move(el, it) + s := el.Value.(*segment) + if s.w.InputSize() > 0 { + m.move(el, s) } } m.closed = true @@ -203,7 +208,7 @@ func (m *Manager) Close() { // It returns nil if there are no segments waiting to be flushed. If the WAL // is closed it returns all remaining segments from the pending list and then // ErrClosed. -func (m *Manager) NextPending() (*PendingItem, error) { +func (m *Manager) NextPending() (*PendingSegment, error) { m.mu.Lock() defer m.mu.Unlock() if m.pending.Len() == 0 && !m.moveFrontIfExpired() { @@ -213,35 +218,40 @@ func (m *Manager) NextPending() (*PendingItem, error) { return nil, nil } el := m.pending.Front() - it := el.Value.(*item) + s := el.Value.(*segment) m.pending.Remove(el) m.metrics.NumPending.Dec() m.metrics.NumFlushing.Inc() - return &PendingItem{Result: it.r, Writer: it.w}, nil + return &PendingSegment{ + Result: s.r, + Writer: s.w, + FirstAppend: s.firstAppend, + Moved: s.moved, + }, nil } // Put resets the segment and puts it back in the available list to accept -// writes. A PendingItem should not be put back until it has been flushed. -func (m *Manager) Put(it *PendingItem) { - it.Writer.Reset() +// writes. A PendingSegment should not be put back until it has been flushed. +func (m *Manager) Put(s *PendingSegment) { + s.Writer.Reset() m.mu.Lock() defer m.mu.Unlock() m.metrics.NumFlushing.Dec() m.metrics.NumAvailable.Inc() - m.available.PushBack(&item{ + m.available.PushBack(&segment{ r: &AppendResult{done: make(chan struct{})}, - w: it.Writer, + w: s.Writer, }) } // move the element from the available list to the pending list and sets the // relevant metrics. -func (m *Manager) move(el *list.Element, it *item) { - m.pending.PushBack(it) +func (m *Manager) move(el *list.Element, s *segment) { + s.moved = m.clock.Now() + m.pending.PushBack(s) m.metrics.NumPending.Inc() m.available.Remove(el) m.metrics.NumAvailable.Dec() - m.firstAppend = time.Time{} } // moveFrontIfExpired moves the element from the front of the available list to @@ -249,9 +259,9 @@ func (m *Manager) move(el *list.Element, it *item) { // relevant metrics. func (m *Manager) moveFrontIfExpired() bool { if el := m.available.Front(); el != nil { - it := el.Value.(*item) - if !m.firstAppend.IsZero() && m.clock.Since(m.firstAppend) >= m.cfg.MaxAge { - m.move(el, it) + s := el.Value.(*segment) + if !s.firstAppend.IsZero() && m.clock.Since(s.firstAppend) >= m.cfg.MaxAge { + m.move(el, s) return true } } diff --git a/pkg/storage/wal/manager_test.go b/pkg/storage/wal/manager_test.go index 9dc49dce4759f..a69fbee51ab21 100644 --- a/pkg/storage/wal/manager_test.go +++ b/pkg/storage/wal/manager_test.go @@ -255,9 +255,9 @@ func TestManager_NextPending(t *testing.T) { // There should be no segments waiting to be flushed as no data has been // written. - it, err := m.NextPending() + s, err := m.NextPending() require.NoError(t, err) - require.Nil(t, it) + require.Nil(t, s) // Append 1KB of data. lbs := labels.Labels{{Name: "a", Value: "b"}} @@ -271,14 +271,78 @@ func TestManager_NextPending(t *testing.T) { require.NoError(t, err) // There should be a segment waiting to be flushed. - it, err = m.NextPending() + s, err = m.NextPending() require.NoError(t, err) - require.NotNil(t, it) + require.NotNil(t, s) // There should be no more segments waiting to be flushed. - it, err = m.NextPending() + s, err = m.NextPending() require.NoError(t, err) - require.Nil(t, it) + require.Nil(t, s) +} + +func TestManager_NextPendingAge(t *testing.T) { + m, err := NewManager(Config{ + MaxAge: 100 * time.Millisecond, + MaxSegments: 1, + MaxSegmentSize: 1024, // 1KB + }, NewMetrics(nil)) + require.NoError(t, err) + + // Create a mock clock. + clock := quartz.NewMock(t) + m.clock = clock + + // Append 1B of data. + lbs := labels.Labels{{Name: "a", Value: "b"}} + entries := []*logproto.Entry{{Timestamp: time.Now(), Line: "c"}} + res, err := m.Append(AppendRequest{ + TenantID: "1", + Labels: lbs, + LabelsStr: lbs.String(), + Entries: entries, + }) + require.NoError(t, err) + require.NotNil(t, res) + + // Wait 100ms. The segment that was just appended to should have reached + // the maximum age. + clock.Advance(100 * time.Millisecond) + s, err := m.NextPending() + require.NoError(t, err) + require.NotNil(t, s) + require.Equal(t, 100*time.Millisecond, s.Age()) + m.Put(s) + + // Append 1KB of data using two separate append requests, 1ms apart. + entries = []*logproto.Entry{{Timestamp: time.Now(), Line: strings.Repeat("c", 512)}} + res, err = m.Append(AppendRequest{ + TenantID: "1", + Labels: lbs, + LabelsStr: lbs.String(), + Entries: entries, + }) + require.NoError(t, err) + require.NotNil(t, res) + + // Wait 1ms and then append the rest of the data. + clock.Advance(time.Millisecond) + entries = []*logproto.Entry{{Timestamp: time.Now(), Line: strings.Repeat("c", 512)}} + res, err = m.Append(AppendRequest{ + TenantID: "1", + Labels: lbs, + LabelsStr: lbs.String(), + Entries: entries, + }) + require.NoError(t, err) + require.NotNil(t, res) + + // The segment that was just appended to should have reached the maximum + // size. + s, err = m.NextPending() + require.NoError(t, err) + require.NotNil(t, s) + require.Equal(t, time.Millisecond, s.Age()) } func TestManager_NextPendingMaxAgeExceeded(t *testing.T) { @@ -307,18 +371,18 @@ func TestManager_NextPendingMaxAgeExceeded(t *testing.T) { // The segment that was just appended to has neither reached the maximum // age nor maximum size to be flushed. - it, err := m.NextPending() + s, err := m.NextPending() require.NoError(t, err) - require.Nil(t, it) + require.Nil(t, s) require.Equal(t, 1, m.available.Len()) require.Equal(t, 0, m.pending.Len()) // Wait 100ms. The segment that was just appended to should have reached // the maximum age. clock.Advance(100 * time.Millisecond) - it, err = m.NextPending() + s, err = m.NextPending() require.NoError(t, err) - require.NotNil(t, it) + require.NotNil(t, s) require.Equal(t, 0, m.available.Len()) require.Equal(t, 0, m.pending.Len()) } @@ -345,24 +409,24 @@ func TestManager_NextPendingWALClosed(t *testing.T) { // There should be no segments waiting to be flushed as neither the maximum // age nor maximum size has been exceeded. - it, err := m.NextPending() + s, err := m.NextPending() require.NoError(t, err) - require.Nil(t, it) + require.Nil(t, s) // Close the WAL. m.Close() // There should be one segment waiting to be flushed. - it, err = m.NextPending() + s, err = m.NextPending() require.NoError(t, err) - require.NotNil(t, it) + require.NotNil(t, s) // There are no more segments waiting to be flushed, and since the WAL is // closed, successive calls should return ErrClosed. for i := 0; i < 10; i++ { - it, err = m.NextPending() + s, err = m.NextPending() require.ErrorIs(t, err, ErrClosed) - require.Nil(t, it) + require.Nil(t, s) } } @@ -395,22 +459,22 @@ func TestManager_Put(t *testing.T) { require.Equal(t, 1, m.pending.Len()) // Getting the pending segment should remove it from the list. - it, err := m.NextPending() + s, err := m.NextPending() require.NoError(t, err) - require.NotNil(t, it) + require.NotNil(t, s) require.Equal(t, 0, m.available.Len()) require.Equal(t, 0, m.pending.Len()) // The segment should contain 1KB of data. - require.Equal(t, int64(1024), it.Writer.InputSize()) + require.Equal(t, int64(1024), s.Writer.InputSize()) // Putting it back should add it to the available list. - m.Put(it) + m.Put(s) require.Equal(t, 1, m.available.Len()) require.Equal(t, 0, m.pending.Len()) // The segment should be reset. - require.Equal(t, int64(0), it.Writer.InputSize()) + require.Equal(t, int64(0), s.Writer.InputSize()) } func TestManager_Metrics(t *testing.T) { @@ -465,9 +529,9 @@ wal_segments_pending 1 require.NoError(t, testutil.CollectAndCompare(r, strings.NewReader(expected), metricNames...)) // Get the segment from the pending list. - it, err := m.NextPending() + s, err := m.NextPending() require.NoError(t, err) - require.NotNil(t, it) + require.NotNil(t, s) expected = ` # HELP wal_segments_available The number of WAL segments accepting writes. # TYPE wal_segments_available gauge @@ -482,7 +546,7 @@ wal_segments_pending 0 require.NoError(t, testutil.CollectAndCompare(r, strings.NewReader(expected), metricNames...)) // Reset the segment and put it back in the available list. - m.Put(it) + m.Put(s) expected = ` # HELP wal_segments_available The number of WAL segments accepting writes. # TYPE wal_segments_available gauge diff --git a/pkg/storage/wal/segment.go b/pkg/storage/wal/segment.go index 6a792988bdb47..57ad096fac309 100644 --- a/pkg/storage/wal/segment.go +++ b/pkg/storage/wal/segment.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" "github.com/grafana/loki/v3/pkg/logproto" tsdbindex "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" "github.com/grafana/loki/v3/pkg/storage/wal/chunks" @@ -29,7 +30,6 @@ var ( streamSegmentPool = sync.Pool{ New: func() interface{} { return &streamSegment{ - lock: &sync.Mutex{}, entries: make([]*logproto.Entry, 0, 4096), } }, @@ -46,17 +46,16 @@ type streamID struct { } type SegmentWriter struct { - metrics *SegmentMetrics - streams map[streamID]*streamSegment - buf1 encoding.Encbuf - outputSize atomic.Int64 - inputSize atomic.Int64 - idxWriter *index.Writer - consistencyMtx *sync.RWMutex + metrics *SegmentMetrics + streams map[streamID]*streamSegment + buf1 encoding.Encbuf + outputSize atomic.Int64 + inputSize atomic.Int64 + idxWriter *index.Writer + indexRef metastorepb.DataRef } type streamSegment struct { - lock *sync.Mutex lbls labels.Labels entries []*logproto.Entry tenantID string @@ -84,24 +83,19 @@ func NewWalSegmentWriter(m *SegmentMetrics) (*SegmentWriter, error) { return nil, err } return &SegmentWriter{ - metrics: m, - streams: make(map[streamID]*streamSegment, 64), - buf1: encoding.EncWith(make([]byte, 0, 4)), - idxWriter: idxWriter, - inputSize: atomic.Int64{}, - consistencyMtx: &sync.RWMutex{}, + metrics: m, + streams: make(map[streamID]*streamSegment, 64), + buf1: encoding.EncWith(make([]byte, 0, 4)), + idxWriter: idxWriter, + inputSize: atomic.Int64{}, }, nil } func (b *SegmentWriter) getOrCreateStream(id streamID, lbls labels.Labels) *streamSegment { - b.consistencyMtx.RLock() s, ok := b.streams[id] - b.consistencyMtx.RUnlock() if ok { return s } - b.consistencyMtx.Lock() - defer b.consistencyMtx.Unlock() // Check another thread has not created it s, ok = b.streams[id] if ok { @@ -128,8 +122,6 @@ func (b *SegmentWriter) Append(tenantID, labelsString string, lbls labels.Labels id := streamID{labels: labelsString, tenant: tenantID} s := b.getOrCreateStream(id, lbls) - s.lock.Lock() - defer s.lock.Unlock() for i, e := range entries { if e.Timestamp.UnixNano() >= s.maxt { s.entries = append(s.entries, entries[i]) @@ -147,14 +139,11 @@ func (b *SegmentWriter) Append(tenantID, labelsString string, lbls labels.Labels } } -// Observe updates metrics for the writer. If called before WriteTo then the -// output size histogram will observe 0. -func (b *SegmentWriter) Observe() { - b.consistencyMtx.Lock() - defer b.consistencyMtx.Unlock() - +// ReportMetrics for the writer. If called before WriteTo then the output size +// histogram will observe 0. +func (b *SegmentWriter) ReportMetrics() { b.metrics.streams.Observe(float64(len(b.streams))) - tenants := make(map[string]struct{}, len(b.streams)) + tenants := make(map[string]struct{}, 64) for _, s := range b.streams { tenants[s.tenantID] = struct{}{} } @@ -163,6 +152,55 @@ func (b *SegmentWriter) Observe() { b.metrics.outputSizeBytes.Observe(float64(b.outputSize.Load())) } +func (b *SegmentWriter) Meta(id string) *metastorepb.BlockMeta { + var globalMinT, globalMaxT int64 + + tenants := make(map[string]*metastorepb.TenantStreams, 64) + for _, s := range b.streams { + tenant, ok := tenants[s.tenantID] + if !ok { + tenant = &metastorepb.TenantStreams{ + TenantId: s.tenantID, + } + tenants[s.tenantID] = tenant + } + if len(s.entries) == 0 { + continue + } + streamMinT, streamMaxT := s.entries[0].Timestamp.UnixNano(), s.entries[len(s.entries)-1].Timestamp.UnixNano() + + if globalMinT == 0 || streamMinT < globalMinT { + globalMinT = streamMinT + } + if streamMaxT > globalMaxT { + globalMaxT = streamMaxT + } + if tenant.MinTime == 0 || tenant.MinTime > streamMinT { + tenant.MinTime = streamMinT + } + if tenant.MaxTime < streamMaxT { + tenant.MaxTime = streamMaxT + } + } + result := make([]*metastorepb.TenantStreams, 0, len(tenants)) + for _, tenant := range tenants { + tenant := tenant + result = append(result, tenant) + } + sort.Slice(result, func(i, j int) bool { + return result[i].TenantId < result[j].TenantId + }) + return &metastorepb.BlockMeta{ + Id: id, + FormatVersion: uint64(1), + CompactionLevel: 0, + IndexRef: b.indexRef, + MinTime: globalMinT, + MaxTime: globalMaxT, + TenantStreams: result, + } +} + func (b *SegmentWriter) WriteTo(w io.Writer) (int64, error) { var ( total int64 @@ -256,6 +294,8 @@ func (b *SegmentWriter) WriteTo(w io.Writer) (int64, error) { if n != len(buf) { return total, errors.New("invalid written index len") } + b.indexRef.Offset = total + b.indexRef.Length = int64(n) total += int64(n) // write index len 4b @@ -297,6 +337,8 @@ func (b *SegmentWriter) Reset() { b.streams = make(map[streamID]*streamSegment, 64) b.buf1.Reset() b.inputSize.Store(0) + b.indexRef.Length = 0 + b.indexRef.Offset = 0 } // InputSize returns the total size of the input data written to the writer. diff --git a/pkg/storage/wal/segment_test.go b/pkg/storage/wal/segment_test.go index 51117e3b500f8..34b8d78b2d586 100644 --- a/pkg/storage/wal/segment_test.go +++ b/pkg/storage/wal/segment_test.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "sort" - "sync" "testing" "time" @@ -13,8 +12,10 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" + "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql/syntax" + "github.com/grafana/loki/v3/pkg/storage/wal/index" "github.com/grafana/loki/v3/pkg/storage/wal/testdata" "github.com/grafana/loki/pkg/push" @@ -130,163 +131,6 @@ func TestWalSegmentWriter_Append(t *testing.T) { } } -func BenchmarkConcurrentAppends(t *testing.B) { - type appendArgs struct { - tenant string - labels labels.Labels - entries []*push.Entry - } - - lbls := []labels.Labels{ - labels.FromStrings("container", "foo", "namespace", "dev"), - labels.FromStrings("container", "bar", "namespace", "staging"), - labels.FromStrings("container", "bar", "namespace", "prod"), - } - characters := "abcdefghijklmnopqrstuvwxyz" - tenants := []string{} - // 676 unique tenants (26^2) - for i := 0; i < len(characters); i++ { - for j := 0; j < len(characters); j++ { - tenants = append(tenants, string(characters[i])+string(characters[j])) - } - } - - workChan := make(chan *appendArgs) - var wg sync.WaitGroup - var w *SegmentWriter - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - for args := range workChan { - w.Append(args.tenant, args.labels.String(), args.labels, args.entries) - } - wg.Done() - }(i) - } - - t.ResetTimer() - for i := 0; i < t.N; i++ { - var err error - w, err = NewWalSegmentWriter(NewSegmentMetrics(nil)) - require.NoError(t, err) - - for _, lbl := range lbls { - for _, r := range tenants { - for i := 0; i < 10; i++ { - workChan <- &appendArgs{ - tenant: r, - labels: lbl, - entries: []*push.Entry{ - {Timestamp: time.Unix(0, int64(i)), Line: fmt.Sprintf("log line %d", i)}, - }, - } - } - } - } - } - close(workChan) - wg.Wait() -} - -func TestConcurrentAppends(t *testing.T) { - type appendArgs struct { - tenant string - labels labels.Labels - entries []*push.Entry - } - dst := bytes.NewBuffer(nil) - - w, err := NewWalSegmentWriter(NewSegmentMetrics(nil)) - require.NoError(t, err) - var wg sync.WaitGroup - workChan := make(chan *appendArgs, 100) - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - for args := range workChan { - w.Append(args.tenant, args.labels.String(), args.labels, args.entries) - } - wg.Done() - }(i) - } - - lbls := []labels.Labels{ - labels.FromStrings("container", "foo", "namespace", "dev"), - labels.FromStrings("container", "bar", "namespace", "staging"), - labels.FromStrings("container", "bar", "namespace", "prod"), - } - characters := "abcdefghijklmnopqrstuvwxyz" - tenants := []string{} - // 676 unique tenants (26^2) - for i := 0; i < len(characters); i++ { - for j := 0; j < len(characters); j++ { - for k := 0; k < len(characters); k++ { - tenants = append(tenants, string(characters[i])+string(characters[j])+string(characters[k])) - } - } - } - - msgsPerSeries := 10 - msgsGenerated := 0 - for _, r := range tenants { - for _, lbl := range lbls { - for i := 0; i < msgsPerSeries; i++ { - msgsGenerated++ - workChan <- &appendArgs{ - tenant: r, - labels: lbl, - entries: []*push.Entry{ - {Timestamp: time.Unix(0, int64(i)), Line: fmt.Sprintf("log line %d", i)}, - }, - } - } - } - } - close(workChan) - wg.Wait() - - n, err := w.WriteTo(dst) - require.NoError(t, err) - require.True(t, n > 0) - - r, err := NewReader(dst.Bytes()) - require.NoError(t, err) - - iter, err := r.Series(context.Background()) - require.NoError(t, err) - - var expectedSeries, actualSeries []string - - for _, tenant := range tenants { - for _, lbl := range lbls { - expectedSeries = append(expectedSeries, labels.NewBuilder(lbl).Set(tenantLabel, tenant).Labels().String()) - } - } - - msgsRead := 0 - for iter.Next() { - actualSeries = append(actualSeries, iter.At().String()) - chk, err := iter.ChunkReader(nil) - require.NoError(t, err) - // verify all lines - var i int - for chk.Next() { - ts, line := chk.At() - require.Equal(t, int64(i), ts) - require.Equal(t, fmt.Sprintf("log line %d", i), string(line)) - msgsRead++ - i++ - } - require.NoError(t, chk.Err()) - require.NoError(t, chk.Close()) - require.Equal(t, msgsPerSeries, i) - } - require.NoError(t, iter.Err()) - require.ElementsMatch(t, expectedSeries, actualSeries) - require.Equal(t, msgsGenerated, msgsRead) - t.Logf("Generated %d messages between %d tenants", msgsGenerated, len(tenants)) -} - func TestMultiTenantWrite(t *testing.T) { w, err := NewWalSegmentWriter(NewSegmentMetrics(nil)) require.NoError(t, err) @@ -445,6 +289,54 @@ func TestReset(t *testing.T) { require.Equal(t, dst.Bytes(), copyBuffer.Bytes()) } +func Test_Meta(t *testing.T) { + w, err := NewWalSegmentWriter(NewSegmentMetrics(nil)) + buff := bytes.NewBuffer(nil) + + require.NoError(t, err) + + lbls := labels.FromStrings("container", "foo", "namespace", "dev") + w.Append("tenantb", lbls.String(), lbls, []*push.Entry{ + {Timestamp: time.Unix(1, 0), Line: "Entry 1"}, + {Timestamp: time.Unix(2, 0), Line: "Entry 2"}, + {Timestamp: time.Unix(3, 0), Line: "Entry 3"}, + }) + lbls = labels.FromStrings("container", "bar", "namespace", "dev") + w.Append("tenanta", lbls.String(), lbls, []*push.Entry{ + {Timestamp: time.Unix(2, 0), Line: "Entry 1"}, + {Timestamp: time.Unix(3, 0), Line: "Entry 2"}, + {Timestamp: time.Unix(4, 0), Line: "Entry 3"}, + }) + _, err = w.WriteTo(buff) + require.NoError(t, err) + meta := w.Meta("bar") + indexReader, err := index.NewReader(index.RealByteSlice(buff.Bytes()[meta.IndexRef.Offset : meta.IndexRef.Offset+meta.IndexRef.Length])) + require.NoError(t, err) + + defer indexReader.Close() + + require.Equal(t, &metastorepb.BlockMeta{ + FormatVersion: 1, + Id: "bar", + MinTime: time.Unix(1, 0).UnixNano(), + MaxTime: time.Unix(4, 0).UnixNano(), + CompactionLevel: 0, + IndexRef: meta.IndexRef, + TenantStreams: []*metastorepb.TenantStreams{ + { + TenantId: "tenanta", + MinTime: time.Unix(2, 0).UnixNano(), + MaxTime: time.Unix(4, 0).UnixNano(), + }, + { + TenantId: "tenantb", + MinTime: time.Unix(1, 0).UnixNano(), + MaxTime: time.Unix(3, 0).UnixNano(), + }, + }, + }, meta) +} + func BenchmarkWrites(b *testing.B) { files := testdata.Files() lbls := []labels.Labels{} diff --git a/production/docker/docker-compose.yaml b/production/docker/docker-compose.yaml index a4f74c7bb1182..5b42a434377c4 100644 --- a/production/docker/docker-compose.yaml +++ b/production/docker/docker-compose.yaml @@ -104,7 +104,7 @@ services: - loki loki-gateway: - image: nginx:1.19 + image: nginx:1.27 volumes: - ./config/nginx.conf:/etc/nginx/nginx.conf ports: diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 525cf8245ac49..3941821328db5 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 6.7.2 + +- [BUGFIX] Fix imagePullSecrets for statefulset-results-cache + ## 6.7.1 - [CHANGE] Changed version of Loki to 3.1.0 diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index eb9a96beae195..168da97136daa 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. type: application appVersion: 3.1.0 -version: 6.7.1 +version: 6.7.2 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index acb17fe25f140..a6e7e53df538d 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 6.7.1](https://img.shields.io/badge/Version-6.7.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.1.0](https://img.shields.io/badge/AppVersion-3.1.0-informational?style=flat-square) +![Version: 6.7.2](https://img.shields.io/badge/Version-6.7.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.1.0](https://img.shields.io/badge/AppVersion-3.1.0-informational?style=flat-square) Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. diff --git a/production/helm/loki/src/helm-test/Dockerfile b/production/helm/loki/src/helm-test/Dockerfile index 48ff7e8a6a275..bb71f28b98edc 100644 --- a/production/helm/loki/src/helm-test/Dockerfile +++ b/production/helm/loki/src/helm-test/Dockerfile @@ -1,4 +1,5 @@ -FROM golang:1.22.2 as build +ARG GO_VERSION=1.22 +FROM golang:${GO_VERSION} as build # build via Makefile target helm-test-image in root # Makefile. Building from this directory will not be @@ -7,7 +8,6 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false helm-test -FROM alpine:3.18.5 -RUN apk add --update --no-cache ca-certificates=20230506-r0 +FROM gcr.io/distroless/base-nossl:debug COPY --from=build /src/loki/production/helm/loki/src/helm-test/helm-test /usr/bin/helm-test ENTRYPOINT [ "/usr/bin/helm-test" ] diff --git a/production/helm/loki/src/helm-test/helm-test b/production/helm/loki/src/helm-test/helm-test new file mode 100755 index 0000000000000..b08a5f43b84cd Binary files /dev/null and b/production/helm/loki/src/helm-test/helm-test differ diff --git a/production/helm/loki/templates/memcached/_memcached-statefulset.tpl b/production/helm/loki/templates/memcached/_memcached-statefulset.tpl index 32fd624502677..8310e49dbc723 100644 --- a/production/helm/loki/templates/memcached/_memcached-statefulset.tpl +++ b/production/helm/loki/templates/memcached/_memcached-statefulset.tpl @@ -72,7 +72,7 @@ spec: terminationGracePeriodSeconds: {{ .terminationGracePeriodSeconds }} {{- if $.ctx.Values.imagePullSecrets }} imagePullSecrets: - {{- range $.ctx.Values.image.pullSecrets }} + {{- range $.ctx.Values.imagePullSecrets }} - name: {{ . }} {{- end }} {{- end }} diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 9de6584cbeda4..4f27bd1376894 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -3222,7 +3222,7 @@ monitoring: # -- Labels for the dashboards ConfigMap labels: grafana_dashboard: "1" - # Recording rules for monitoring Loki, required for some dashboards + # -- DEPRECATED Recording rules for monitoring Loki, required for some dashboards rules: # -- If enabled, create PrometheusRule resource with Loki recording rules enabled: false @@ -3252,7 +3252,7 @@ monitoring: # expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job, route) # - record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate # expr: sum(rate(container_cpu_usage_seconds_total[1m])) by (node, namespace, pod, container) - # ServiceMonitor configuration + # -- DEPRECATED ServiceMonitor configuration serviceMonitor: # -- If enabled, ServiceMonitor resources for Prometheus Operator are created enabled: false @@ -3288,7 +3288,7 @@ monitoring: labels: {} # -- If defined a MetricsInstance will be created to remote write metrics. remoteWrite: null - # Self monitoring determines whether Loki should scrape its own logs. + # -- DEPRECATED Self monitoring determines whether Loki should scrape its own logs. # This feature currently relies on the Grafana Agent Operator being installed, # which is installed by default using the grafana-agent-operator sub-chart. # It will create custom resources for GrafanaAgent, LogsInstance, and PodLogs to configure @@ -3304,9 +3304,9 @@ monitoring: # -- Namespace to create additional tenant token secret in. Useful if your Grafana instance # is in a separate namespace. Token will still be created in the canary namespace. secretNamespace: "{{ .Release.Namespace }}" - # Grafana Agent configuration + # -- DEPRECATED Grafana Agent configuration grafanaAgent: - # -- Controls whether to install the Grafana Agent Operator and its CRDs. + # -- DEPRECATED Controls whether to install the Grafana Agent Operator and its CRDs. # Note that helm will not install CRDs if this flag is enabled during an upgrade. # In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds installOperator: false diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json b/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json index 0718507d941fd..e630e9b9f5c0b 100644 --- a/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json +++ b/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json @@ -379,7 +379,7 @@ "span": 4, "targets": [ { - "expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki.*|enterprise-logs)-backend.*\"}", + "expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"}", "format": "time_series", "legendFormat": "{{pod}}", "legendLink": null @@ -426,7 +426,7 @@ "span": 4, "targets": [ { - "expr": "go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} / 1024 / 1024 ", + "expr": "go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"} / 1024 / 1024 ", "format": "time_series", "legendFormat": " {{pod}} ", "legendLink": null @@ -579,7 +579,7 @@ "span": 6, "targets": [ { - "expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki.*|enterprise-logs)-backend.*\"}[$__rate_interval])) by (user)", + "expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"}[$__rate_interval])) by (user)", "format": "time_series", "legendFormat": "{{user}}", "legendLink": null @@ -606,7 +606,7 @@ "span": 6, "targets": [ { - "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki.*|enterprise-logs)-backend.*\"} |~ \"Started processing delete request|delete request for user marked as processed\" | logfmt | line_format \"{{.ts}} user={{.user}} delete_request_id={{.delete_request_id}} msg={{.msg}}\" ", + "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"} |~ \"Started processing delete request|delete request for user marked as processed\" | logfmt | line_format \"{{.ts}} user={{.user}} delete_request_id={{.delete_request_id}} msg={{.msg}}\" ", "refId": "A" } ], @@ -619,7 +619,7 @@ "span": 6, "targets": [ { - "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki.*|enterprise-logs)-backend.*\"} |~ \"delete request for user added\" | logfmt | line_format \"{{.ts}} user={{.user}} query='{{.query}}'\"", + "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"} |~ \"delete request for user added\" | logfmt | line_format \"{{.ts}} user={{.user}} query='{{.query}}'\"", "refId": "A" } ], diff --git a/production/loki-mixin-compiled/dashboards/loki-deletion.json b/production/loki-mixin-compiled/dashboards/loki-deletion.json index 7b048f729e2b3..e630e9b9f5c0b 100644 --- a/production/loki-mixin-compiled/dashboards/loki-deletion.json +++ b/production/loki-mixin-compiled/dashboards/loki-deletion.json @@ -379,7 +379,7 @@ "span": 4, "targets": [ { - "expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"}", + "expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"}", "format": "time_series", "legendFormat": "{{pod}}", "legendLink": null @@ -426,7 +426,7 @@ "span": 4, "targets": [ { - "expr": "go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} / 1024 / 1024 ", + "expr": "go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"} / 1024 / 1024 ", "format": "time_series", "legendFormat": " {{pod}} ", "legendLink": null @@ -579,7 +579,7 @@ "span": 6, "targets": [ { - "expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"}[$__rate_interval])) by (user)", + "expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"}[$__rate_interval])) by (user)", "format": "time_series", "legendFormat": "{{user}}", "legendLink": null @@ -606,7 +606,7 @@ "span": 6, "targets": [ { - "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} |~ \"Started processing delete request|delete request for user marked as processed\" | logfmt | line_format \"{{.ts}} user={{.user}} delete_request_id={{.delete_request_id}} msg={{.msg}}\" ", + "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"} |~ \"Started processing delete request|delete request for user marked as processed\" | logfmt | line_format \"{{.ts}} user={{.user}} delete_request_id={{.delete_request_id}} msg={{.msg}}\" ", "refId": "A" } ], @@ -619,7 +619,7 @@ "span": 6, "targets": [ { - "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} |~ \"delete request for user added\" | logfmt | line_format \"{{.ts}} user={{.user}} query='{{.query}}'\"", + "expr": "{cluster=~\"$cluster\", namespace=~\"$namespace\", pod=~\"(compactor|(loki.*|enterprise-logs)-backend.*|loki-single-binary)\"} |~ \"delete request for user added\" | logfmt | line_format \"{{.ts}} user={{.user}} query='{{.query}}'\"", "refId": "A" } ], diff --git a/production/loki-mixin-compiled/dashboards/loki-reads.json b/production/loki-mixin-compiled/dashboards/loki-reads.json index aed1907ab365b..ed65027b88d85 100644 --- a/production/loki-mixin-compiled/dashboards/loki-reads.json +++ b/production/loki-mixin-compiled/dashboards/loki-reads.json @@ -1643,7 +1643,7 @@ "span": 4, "targets": [ { - "expr": "histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/bloom-gateway\", route=~\"(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\"})) * 1e3", + "expr": "histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/index-gateway\", route=~\"(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\"})) * 1e3", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{ route }} 99th Percentile", @@ -1651,7 +1651,7 @@ "step": 10 }, { - "expr": "histogram_quantile(0.50, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/bloom-gateway\", route=~\"(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\"})) * 1e3", + "expr": "histogram_quantile(0.50, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/index-gateway\", route=~\"(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\"})) * 1e3", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{ route }} 50th Percentile", @@ -1659,7 +1659,7 @@ "step": 10 }, { - "expr": "1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/bloom-gateway\", route=~\"(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\"}) by (route) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/bloom-gateway\", route=~\"(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\"}) by (route) ", + "expr": "1e3 * sum(cluster_job_route:loki_request_duration_seconds_sum:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/index-gateway\", route=~\"(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\"}) by (route) / sum(cluster_job_route:loki_request_duration_seconds_count:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/index-gateway\", route=~\"(/base.Ruler/Rules|/indexgatewaypb.IndexGateway/GetChunkRef|/indexgatewaypb.IndexGateway/GetSeries|/indexgatewaypb.IndexGateway/GetShards|/indexgatewaypb.IndexGateway/GetStats|/indexgatewaypb.IndexGateway/GetVolume|/indexgatewaypb.IndexGateway/LabelNamesForMetricName|/indexgatewaypb.IndexGateway/LabelValuesForMetricName|/indexgatewaypb.IndexGateway/QueryIndex|/logproto.BloomGateway/FilterChunkRefs|/logproto.Pattern/Query|/logproto.Querier/GetChunkIDs|/logproto.Querier/GetDetectedLabels|/logproto.Querier/GetStats|/logproto.Querier/GetVolume|/logproto.Querier/Label|/logproto.Querier/Query|/logproto.Querier/QuerySample|/logproto.Querier/Series|/logproto.StreamData/GetStreamRates)\"}) by (route) ", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{ route }} Average", diff --git a/production/loki-mixin/dashboards/loki-deletion.libsonnet b/production/loki-mixin/dashboards/loki-deletion.libsonnet index a1c50ecfa6911..a4c4ae779e701 100644 --- a/production/loki-mixin/dashboards/loki-deletion.libsonnet +++ b/production/loki-mixin/dashboards/loki-deletion.libsonnet @@ -2,9 +2,7 @@ local g = import 'grafana-builder/grafana.libsonnet'; local utils = import 'mixin-utils/utils.libsonnet'; (import 'dashboard-utils.libsonnet') { - local compactor_matcher = if $._config.meta_monitoring.enabled - then 'pod=~"(compactor|%s-backend.*|loki-single-binary)"' % $._config.ssd.pod_prefix_matcher - else if $._config.ssd.enabled then 'container="loki", pod=~"%s-backend.*"' % $._config.ssd.pod_prefix_matcher else 'container="compactor"', + local compactor_matcher = 'pod=~"(compactor|%s-backend.*|loki-single-binary)"' % $._config.ssd.pod_prefix_matcher, grafanaDashboards+:: { 'loki-deletion.json': @@ -50,7 +48,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; ) .addPanel( $.newQueryPanel('Compactor memory usage (MiB)') + - g.queryPanel('go_memstats_heap_inuse_bytes{%s, container="compactor"} / 1024 / 1024 ' % $.namespaceMatcher(), ' {{pod}} '), + g.queryPanel('go_memstats_heap_inuse_bytes{%s, %s} / 1024 / 1024 ' % [$.namespaceMatcher(), compactor_matcher], ' {{pod}} '), ) .addPanel( $.newQueryPanel('Compaction run duration (seconds)') + diff --git a/production/loki-mixin/dashboards/loki-reads.libsonnet b/production/loki-mixin/dashboards/loki-reads.libsonnet index ea791408bb7a5..0de9bd801e2ba 100644 --- a/production/loki-mixin/dashboards/loki-reads.libsonnet +++ b/production/loki-mixin/dashboards/loki-reads.libsonnet @@ -249,7 +249,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; $.newQueryPanel('Latency', 'ms') + utils.latencyRecordingRulePanel( 'loki_request_duration_seconds', - dashboards['loki-reads.json'].clusterMatchers + dashboards['loki-reads.json'].matchers.bloomGateway + [utils.selector.re('route', grpc_routes)], + dashboards['loki-reads.json'].clusterMatchers + dashboards['loki-reads.json'].matchers.indexGateway + [utils.selector.re('route', grpc_routes)], sum_by=['route'] ) ) diff --git a/tools/kafka/sasl-plain/docker-compose.yml b/tools/kafka/sasl-plain/docker-compose.yml index f875885a800de..81a824690df07 100644 --- a/tools/kafka/sasl-plain/docker-compose.yml +++ b/tools/kafka/sasl-plain/docker-compose.yml @@ -14,7 +14,7 @@ services: - ./conf:/etc/kafka/secrets kafka: - image: confluentinc/cp-kafka:6.2.1 + image: confluentinc/cp-kafka:6.2.15 depends_on: - zookeeper ports: diff --git a/tools/kafka/sasl-scram/docker-compose.yml b/tools/kafka/sasl-scram/docker-compose.yml index f01ec571d1774..5d255ee210c87 100644 --- a/tools/kafka/sasl-scram/docker-compose.yml +++ b/tools/kafka/sasl-scram/docker-compose.yml @@ -14,7 +14,7 @@ services: - ./conf:/etc/kafka/secrets kafka: - image: confluentinc/cp-kafka:6.2.1 + image: confluentinc/cp-kafka:6.2.15 depends_on: - zookeeper ports: diff --git a/tools/kafka/sasl-ssl/docker-compose.yml b/tools/kafka/sasl-ssl/docker-compose.yml index a3917f7d10a3c..d4bfdc22b563b 100644 --- a/tools/kafka/sasl-ssl/docker-compose.yml +++ b/tools/kafka/sasl-ssl/docker-compose.yml @@ -14,7 +14,7 @@ services: - ./conf:/etc/kafka/secrets kafka: - image: confluentinc/cp-kafka:6.2.1 + image: confluentinc/cp-kafka:6.2.15 depends_on: - zookeeper ports: diff --git a/tools/kafka/ssl/docker-compose.yml b/tools/kafka/ssl/docker-compose.yml index 6df926af85ffe..7a98ff4190fe8 100644 --- a/tools/kafka/ssl/docker-compose.yml +++ b/tools/kafka/ssl/docker-compose.yml @@ -9,7 +9,7 @@ services: ZOOKEEPER_CLIENT_PORT: 22181 kafka: - image: confluentinc/cp-kafka:6.2.1 + image: confluentinc/cp-kafka:6.2.15 depends_on: - zookeeper ports: diff --git a/tools/lambda-promtail/lambda-promtail/json_stream.go b/tools/lambda-promtail/lambda-promtail/json_stream.go index bce105cf86fe2..b33d0bc2223e5 100644 --- a/tools/lambda-promtail/lambda-promtail/json_stream.go +++ b/tools/lambda-promtail/lambda-promtail/json_stream.go @@ -28,8 +28,7 @@ func NewJSONStream(recordChan chan Record) Stream { func (s Stream) Start(r io.ReadCloser, tokenCountToTarget int) { defer r.Close() defer close(s.records) - var decoder *json.Decoder - decoder = json.NewDecoder(r) + decoder := json.NewDecoder(r) // Skip the provided count of JSON tokens to get the the target array, ex: "{" "Record" for i := 0; i < tokenCountToTarget; i++ { diff --git a/tools/lambda-promtail/lambda-promtail/kinesis.go b/tools/lambda-promtail/lambda-promtail/kinesis.go index 31b619284b06d..64b57d91fd8a9 100644 --- a/tools/lambda-promtail/lambda-promtail/kinesis.go +++ b/tools/lambda-promtail/lambda-promtail/kinesis.go @@ -4,7 +4,9 @@ import ( "bytes" "compress/gzip" "context" + "encoding/json" "io" + "log" "time" "github.com/aws/aws-lambda-go/events" @@ -13,36 +15,34 @@ import ( "github.com/grafana/loki/pkg/logproto" ) -func parseKinesisEvent(ctx context.Context, b batchIf, ev *events.KinesisEvent) error { +func parseKinesisEvent(ctx context.Context, b *batch, ev *events.KinesisEvent) error { if ev == nil { return nil } - for _, record := range ev.Records { - timestamp := time.Unix(record.Kinesis.ApproximateArrivalTimestamp.Unix(), 0) - - labels := model.LabelSet{ - model.LabelName("__aws_log_type"): model.LabelValue("kinesis"), - model.LabelName("__aws_kinesis_event_source_arn"): model.LabelValue(record.EventSourceArn), - } - - labels = applyLabels(labels) + var data []byte + var recordData events.CloudwatchLogsData + var err error - // Check if the data is gzipped by inspecting the 'data' field + for _, record := range ev.Records { if isGzipped(record.Kinesis.Data) { - uncompressedData, err := ungzipData(record.Kinesis.Data) + data, err = ungzipData(record.Kinesis.Data) if err != nil { - return err + log.Printf("Error decompressing data: %v", err) } - b.add(ctx, entry{labels, logproto.Entry{ - Line: string(uncompressedData), - Timestamp: timestamp, - }}) } else { - b.add(ctx, entry{labels, logproto.Entry{ - Line: string(record.Kinesis.Data), - Timestamp: timestamp, - }}) + data = record.Kinesis.Data + } + + recordData, err = unmarshalData(data) + if err != nil { + log.Printf("Error unmarshalling data: %v", err) + } + + labels := createLabels(record, recordData) + + if err := processLogEvents(ctx, b, recordData.LogEvents, labels); err != nil { + return err } } @@ -79,3 +79,39 @@ func ungzipData(data []byte) ([]byte, error) { return io.ReadAll(reader) } + +func unmarshalData(data []byte) (events.CloudwatchLogsData, error) { + var recordData events.CloudwatchLogsData + err := json.Unmarshal(data, &recordData) + return recordData, err +} + +func createLabels(record events.KinesisEventRecord, recordData events.CloudwatchLogsData) model.LabelSet { + labels := model.LabelSet{ + model.LabelName("__aws_log_type"): model.LabelValue("kinesis"), + model.LabelName("__aws_kinesis_event_source_arn"): model.LabelValue(record.EventSourceArn), + model.LabelName("__aws_cloudwatch_log_group"): model.LabelValue(recordData.LogGroup), + model.LabelName("__aws_cloudwatch_owner"): model.LabelValue(recordData.Owner), + } + + if keepStream { + labels[model.LabelName("__aws_cloudwatch_log_stream")] = model.LabelValue(recordData.LogStream) + } + + return applyLabels(labels) +} + +func processLogEvents(ctx context.Context, b *batch, logEvents []events.CloudwatchLogsLogEvent, labels model.LabelSet) error { + for _, logEvent := range logEvents { + timestamp := time.UnixMilli(logEvent.Timestamp) + + if err := b.add(ctx, entry{labels, logproto.Entry{ + Line: logEvent.Message, + Timestamp: timestamp, + }}); err != nil { + return err + } + } + + return nil +} diff --git a/tools/lambda-promtail/lambda-promtail/kinesis_test.go b/tools/lambda-promtail/lambda-promtail/kinesis_test.go index bd29cb6a7b902..9a29b69f05624 100644 --- a/tools/lambda-promtail/lambda-promtail/kinesis_test.go +++ b/tools/lambda-promtail/lambda-promtail/kinesis_test.go @@ -12,28 +12,6 @@ import ( "github.com/grafana/loki/pkg/logproto" ) -type MockBatch struct { - streams map[string]*logproto.Stream - size int -} - -func (b *MockBatch) add(_ context.Context, e entry) error { - b.streams[e.labels.String()] = &logproto.Stream{ - Labels: e.labels.String(), - } - return nil -} - -func (b *MockBatch) flushBatch(_ context.Context) error { - return nil -} -func (b *MockBatch) encode() ([]byte, int, error) { - return nil, 0, nil -} -func (b *MockBatch) createPushRequest() (*logproto.PushRequest, int) { - return nil, 0 -} - func ReadJSONFromFile(t *testing.T, inputFile string) []byte { inputJSON, err := os.ReadFile(inputFile) if err != nil { @@ -45,6 +23,9 @@ func ReadJSONFromFile(t *testing.T, inputFile string) []byte { func TestLambdaPromtail_KinesisParseEvents(t *testing.T) { inputJson, err := os.ReadFile("../testdata/kinesis-event.json") + mockBatch := &batch{ + streams: map[string]*logproto.Stream{}, + } if err != nil { t.Errorf("could not open test file. details: %v", err) @@ -56,13 +37,7 @@ func TestLambdaPromtail_KinesisParseEvents(t *testing.T) { } ctx := context.TODO() - b := &MockBatch{ - streams: map[string]*logproto.Stream{}, - } - err = parseKinesisEvent(ctx, b, &testEvent) + err = parseKinesisEvent(ctx, mockBatch, &testEvent) require.Nil(t, err) - - labels_str := "{__aws_kinesis_event_source_arn=\"arn:aws:kinesis:us-east-1:123456789012:stream/simple-stream\", __aws_log_type=\"kinesis\"}" - require.Contains(t, b.streams, labels_str) } diff --git a/tools/lambda-promtail/lambda-promtail/main.go b/tools/lambda-promtail/lambda-promtail/main.go index 0e0df1e880041..0a58480c20743 100644 --- a/tools/lambda-promtail/lambda-promtail/main.go +++ b/tools/lambda-promtail/lambda-promtail/main.go @@ -187,7 +187,7 @@ func checkEventType(ev map[string]interface{}) (interface{}, error) { reader.Seek(0, 0) } - return nil, fmt.Errorf("unknown event type!") + return nil, fmt.Errorf("unknown event type") } func handler(ctx context.Context, ev map[string]interface{}) error { @@ -210,7 +210,7 @@ func handler(ctx context.Context, ev map[string]interface{}) error { event, err := checkEventType(ev) if err != nil { - level.Error(*pClient.log).Log("err", fmt.Errorf("invalid event: %s\n", ev)) + level.Error(*pClient.log).Log("err", fmt.Errorf("invalid event: %s", ev)) return err } diff --git a/tools/lambda-promtail/lambda-promtail/promtail.go b/tools/lambda-promtail/lambda-promtail/promtail.go index c1d01c36b174b..ef5dae14c2a04 100644 --- a/tools/lambda-promtail/lambda-promtail/promtail.go +++ b/tools/lambda-promtail/lambda-promtail/promtail.go @@ -42,13 +42,6 @@ type batch struct { client Client } -type batchIf interface { - add(ctx context.Context, e entry) error - encode() ([]byte, int, error) - createPushRequest() (*logproto.PushRequest, int) - flushBatch(ctx context.Context) error -} - func newBatch(ctx context.Context, pClient Client, entries ...entry) (*batch, error) { b := &batch{ streams: map[string]*logproto.Stream{}, @@ -158,7 +151,7 @@ func (c *promtailClient) sendToPromtail(ctx context.Context, b *batch) error { if status > 0 && status != 429 && status/100 != 5 { break } - level.Error(*c.log).Log("err", fmt.Errorf("error sending batch, will retry, status: %d error: %s\n", status, err)) + level.Error(*c.log).Log("err", fmt.Errorf("error sending batch, will retry, status: %d error: %s", status, err)) backoff.Wait() // Make sure it sends at least once before checking for retry. @@ -168,7 +161,7 @@ func (c *promtailClient) sendToPromtail(ctx context.Context, b *batch) error { } if err != nil { - level.Error(*c.log).Log("err", fmt.Errorf("Failed to send logs! %s\n", err)) + level.Error(*c.log).Log("err", fmt.Errorf("failed to send logs! %s", err)) return err } diff --git a/tools/lambda-promtail/lambda-promtail/promtail_client.go b/tools/lambda-promtail/lambda-promtail/promtail_client.go index 470a760b5233b..a322e82452b7a 100644 --- a/tools/lambda-promtail/lambda-promtail/promtail_client.go +++ b/tools/lambda-promtail/lambda-promtail/promtail_client.go @@ -4,7 +4,6 @@ import ( "context" "crypto/tls" "net/http" - "net/url" "time" "github.com/go-kit/log" @@ -25,7 +24,6 @@ type promtailClient struct { type promtailClientConfig struct { backoff *backoff.Config http *httpClientConfig - url *url.URL } type httpClientConfig struct { diff --git a/tools/lambda-promtail/lambda-promtail/s3.go b/tools/lambda-promtail/lambda-promtail/s3.go index 77694ba603432..2d2b7fe090fe9 100644 --- a/tools/lambda-promtail/lambda-promtail/s3.go +++ b/tools/lambda-promtail/lambda-promtail/s3.go @@ -50,6 +50,7 @@ const ( LB_NLB_TYPE string = "net" LB_ALB_TYPE string = "app" WAF_LOG_TYPE string = "WAFLogs" + GUARDDUTY_LOG_TYPE string = "GuardDuty" ) var ( @@ -75,6 +76,10 @@ var ( // source: https://docs.aws.amazon.com/waf/latest/developerguide/logging-s3.html // format: aws-waf-logs-suffix[/prefix]/AWSLogs/aws-account-id/WAFLogs/region/webacl-name/year/month/day/hour/minute/aws-account-id_waflogs_region_webacl-name_timestamp_hash.log.gz // example: aws-waf-logs-test/AWSLogs/11111111111/WAFLogs/us-east-1/TEST-WEBACL/2021/10/28/19/50/11111111111_waflogs_us-east-1_TEST-WEBACL_20211028T1950Z_e0ca43b5.log.gz + // AWS GuardDuty + // source: https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_exportfindings.html + // format: my-bucket/AWSLogs/aws-account-id/GuardDuty/region/year/month/day/random-string.jsonl.gz + // example: my-bucket/AWSLogs/123456789012/GuardDuty/us-east-1/2024/05/30/07a3f2ce-1485-3031-b842-e1f324c4a48d.jsonl.gz defaultFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P\d+)\/(?P[a-zA-Z0-9_\-]+)\/(?P[\w-]+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/\d+\_(?:elasticloadbalancing|vpcflowlogs)_(?:\w+-\w+-(?:\w+-)?\d)_(?:(?Papp|net)\.*?)?(?P[a-zA-Z0-9\-]+)`) defaultTimestampRegex = regexp.MustCompile(`(?P\d+-\d+-\d+T\d+:\d+:\d+(?:\.\d+Z)?)`) cloudtrailFilenameRegex = regexp.MustCompile(`AWSLogs\/(?Po-[a-z0-9]{10,32})?\/?(?P\d+)\/(?P[a-zA-Z0-9_\-]+)\/(?P[\w-]+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/\d+\_(?:CloudTrail|CloudTrail-Digest)_(?:\w+-\w+-(?:\w+-)?\d)_(?:(?:app|nlb|net)\.*?)?.+_(?P[a-zA-Z0-9\-]+)`) @@ -82,6 +87,7 @@ var ( cloudfrontTimestampRegex = regexp.MustCompile(`(?P\d+-\d+-\d+\s\d+:\d+:\d+)`) wafFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P\d+)\/(?PWAFLogs)\/(?P[\w-]+)\/(?P[\w-]+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/\d+\_waflogs\_[\w-]+_[\w-]+_\d+T\d+Z_\w+`) wafTimestampRegex = regexp.MustCompile(`"timestamp":\s*(?P\d+),`) + guarddutyFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P\d+)\/(?PGuardDuty)\/(?P[\w-]+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/.+`) parsers = map[string]parserConfig{ FLOW_LOG_TYPE: { logTypeLabel: "s3_vpc_flow", @@ -122,6 +128,14 @@ var ( timestampRegex: wafTimestampRegex, timestampType: "unix", }, + GUARDDUTY_LOG_TYPE: { + logTypeLabel: "s3_guardduty", + filenameRegex: guarddutyFilenameRegex, + ownerLabelKey: "account_id", + timestampFormat: time.RFC3339, + timestampRegex: defaultTimestampRegex, + timestampType: "string", + }, } ) @@ -165,7 +179,7 @@ func parseS3Log(ctx context.Context, b *batch, labels map[string]string, obj io. ls = applyLabels(ls) // extract the timestamp of the nested event and sends the rest as raw json - if labels["type"] == CLOUDTRAIL_LOG_TYPE { + if labels["type"] == CLOUDTRAIL_LOG_TYPE || labels["type"] == GUARDDUTY_LOG_TYPE { records := make(chan Record) jsonStream := NewJSONStream(records) go jsonStream.Start(gzreader, parser.skipHeaderCount) @@ -187,17 +201,17 @@ func parseS3Log(ctx context.Context, b *batch, labels map[string]string, obj io. var lineCount int for scanner.Scan() { - log_line := scanner.Text() + logLine := scanner.Text() lineCount++ if lineCount <= parser.skipHeaderCount { continue } if printLogLine { - fmt.Println(log_line) + fmt.Println(logLine) } timestamp := time.Now() - match := parser.timestampRegex.FindStringSubmatch(log_line) + match := parser.timestampRegex.FindStringSubmatch(logLine) if len(match) > 0 { if labels["lb_type"] == LB_NLB_TYPE { // NLB logs don't have .SSSSSSZ suffix. RFC3339 requires a TZ specifier, use UTC @@ -222,7 +236,7 @@ func parseS3Log(ctx context.Context, b *batch, labels map[string]string, obj io. } if err := b.add(ctx, entry{ls, logproto.Entry{ - Line: log_line, + Line: logLine, Timestamp: timestamp, }}); err != nil { return err @@ -281,7 +295,7 @@ func processS3Event(ctx context.Context, ev *events.S3Event, pc Client, log *log ExpectedBucketOwner: aws.String(labels["bucketOwner"]), }) if err != nil { - return fmt.Errorf("Failed to get object %s from bucket %s on account %s\n, %s", labels["key"], labels["bucket"], labels["bucketOwner"], err) + return fmt.Errorf("failed to get object %s from bucket %s on account %s, %s", labels["key"], labels["bucket"], labels["bucketOwner"], err) } err = parseS3Log(ctx, batch, labels, obj.Body, log) if err != nil { diff --git a/tools/lambda-promtail/lambda-promtail/s3_test.go b/tools/lambda-promtail/lambda-promtail/s3_test.go index 644ad12f17276..4cbcacc6e7577 100644 --- a/tools/lambda-promtail/lambda-promtail/s3_test.go +++ b/tools/lambda-promtail/lambda-promtail/s3_test.go @@ -93,6 +93,38 @@ func Test_getLabels(t *testing.T) { }, wantErr: false, }, + { + name: "s3_guardduty", + args: args{ + record: events.S3EventRecord{ + AWSRegion: "us-east-1", + S3: events.S3Entity{ + Bucket: events.S3Bucket{ + Name: "s3_guardduty_test", + OwnerIdentity: events.S3UserIdentity{ + PrincipalID: "test", + }, + }, + Object: events.S3Object{ + Key: "AWSLogs/123456789012/GuardDuty/us-east-1/2024/05/30/07a3f2ce-1485-3031-b842-e1f324c4a48d.jsonl.gz", + }, + }, + }, + }, + want: map[string]string{ + "account_id": "123456789012", + "bucket": "s3_guardduty_test", + "bucket_owner": "test", + "bucket_region": "us-east-1", + "day": "30", + "key": "AWSLogs/123456789012/GuardDuty/us-east-1/2024/05/30/07a3f2ce-1485-3031-b842-e1f324c4a48d.jsonl.gz", + "month": "05", + "region": "us-east-1", + "type": GUARDDUTY_LOG_TYPE, + "year": "2024", + }, + wantErr: false, + }, { name: "s3_flow_logs", args: args{ diff --git a/tools/lambda-promtail/main.tf b/tools/lambda-promtail/main.tf index 37f7e9ede7d0e..3c7e74fdc7e5b 100644 --- a/tools/lambda-promtail/main.tf +++ b/tools/lambda-promtail/main.tf @@ -251,7 +251,7 @@ resource "aws_s3_bucket_notification" "this" { lambda_function_arn = aws_lambda_function.this.arn events = ["s3:ObjectCreated:*"] filter_prefix = "AWSLogs/" - filter_suffix = ".log.gz" + filter_suffix = var.filter_suffix } depends_on = [ diff --git a/tools/lambda-promtail/testdata/kinesis-event.json b/tools/lambda-promtail/testdata/kinesis-event.json index c3cb2020f1e83..9250b093bd66b 100644 --- a/tools/lambda-promtail/testdata/kinesis-event.json +++ b/tools/lambda-promtail/testdata/kinesis-event.json @@ -1,36 +1,25 @@ { - "Records": [ - { - "kinesis": { - "kinesisSchemaVersion": "1.0", - "partitionKey": "s1", - "sequenceNumber": "49568167373333333333333333333333333333333333333333333333", - "data": "SGVsbG8gV29ybGQ=", - "approximateArrivalTimestamp": 1480641523.477 - }, - "eventSource": "aws:kinesis", - "eventVersion": "1.0", - "eventID": "shardId-000000000000:49568167373333333333333333333333333333333333333333333333", - "eventName": "aws:kinesis:record", - "invokeIdentityArn": "arn:aws:iam::123456789012:role/LambdaRole", - "awsRegion": "us-east-1", - "eventSourceARN": "arn:aws:kinesis:us-east-1:123456789012:stream/simple-stream" - }, - { - "kinesis": { - "kinesisSchemaVersion": "1.0", - "partitionKey": "s1", - "sequenceNumber": "49568167373333333334444444444444444444444444444444444444", - "data": "SGVsbG8gV29ybGQ=", - "approximateArrivalTimestamp": 1480841523.477 - }, - "eventSource": "aws:kinesis", - "eventVersion": "1.0", - "eventID": "shardId-000000000000:49568167373333333334444444444444444444444444444444444444", - "eventName": "aws:kinesis:record", - "invokeIdentityArn": "arn:aws:iam::123456789012:role/LambdaRole", - "awsRegion": "us-east-1", - "eventSourceARN": "arn:aws:kinesis:us-east-1:123456789012:stream/simple-stream" - } - ] -} \ No newline at end of file + "messageType": "DATA_MESSAGE", + "owner": "some_owner", + "logGroup": "test-logroup", + "logStream": "test-logstream", + "subscriptionFilters": ["test-subscription"], + "logEvents": [ + { + "id": "98237509", + "timestamp": 1719922604969, + "message": "some_message" + }, + { + "id": "20396236", + "timestamp": 1719922604969, + "message": "some_message" + }, + { + "id": "23485670", + "timestamp": 1719922604969, + "message": "some_message" + } + ] +} + diff --git a/tools/lambda-promtail/variables.tf b/tools/lambda-promtail/variables.tf index bda956bc855b3..74dceb1a4a199 100644 --- a/tools/lambda-promtail/variables.tf +++ b/tools/lambda-promtail/variables.tf @@ -16,6 +16,12 @@ variable "bucket_names" { default = [] } +variable "filter_suffix" { + type = string + description = "Suffix for S3 bucket notification filter" + default = ".gz" +} + variable "log_group_names" { type = set(string) description = "List of CloudWatch Log Group names to create Subscription Filters for." diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml index 7b503d26a36f6..faedfe937a7a2 100644 --- a/vendor/github.com/Microsoft/go-winio/.golangci.yml +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -1,7 +1,3 @@ -run: - skip-dirs: - - pkg/etw/sample - linters: enable: # style @@ -20,9 +16,13 @@ linters: - gofmt # files are gofmt'ed - gosec # security - nilerr # returns nil even with non-nil error + - thelper # test helpers without t.Helper() - unparam # unused function params issues: + exclude-dirs: + - pkg/etw/sample + exclude-rules: # err is very often shadowed in nested scopes - linters: @@ -69,9 +69,7 @@ linters-settings: # struct order is often for Win32 compat # also, ignore pointer bytes/GC issues for now until performance becomes an issue - fieldalignment - check-shadowing: true nolintlint: - allow-leading-space: false require-explanation: true require-specific: true revive: diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go index 09621c88463a5..b54341daacb74 100644 --- a/vendor/github.com/Microsoft/go-winio/backup.go +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -10,14 +10,14 @@ import ( "io" "os" "runtime" - "syscall" "unicode/utf16" + "github.com/Microsoft/go-winio/internal/fs" "golang.org/x/sys/windows" ) -//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead -//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite +//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite const ( BackupData = uint32(iota + 1) @@ -104,7 +104,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) { if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { return nil, err } - hdr.Name = syscall.UTF16ToString(name) + hdr.Name = windows.UTF16ToString(name) } if wsi.StreamID == BackupSparseBlock { if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { @@ -205,7 +205,7 @@ func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { // Read reads a backup stream from the file by calling the Win32 API BackupRead(). func (r *BackupFileReader) Read(b []byte) (int, error) { var bytesRead uint32 - err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) if err != nil { return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} } @@ -220,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) { // the underlying file. func (r *BackupFileReader) Close() error { if r.ctx != 0 { - _ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + _ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) runtime.KeepAlive(r.f) r.ctx = 0 } @@ -244,7 +244,7 @@ func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { // Write restores a portion of the file using the provided backup stream. func (w *BackupFileWriter) Write(b []byte) (int, error) { var bytesWritten uint32 - err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) if err != nil { return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} } @@ -259,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) { // close the underlying file. func (w *BackupFileWriter) Close() error { if w.ctx != 0 { - _ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + _ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) runtime.KeepAlive(w.f) w.ctx = 0 } @@ -271,17 +271,14 @@ func (w *BackupFileWriter) Close() error { // // If the file opened was a directory, it cannot be used with Readdir(). func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { - winPath, err := syscall.UTF16FromString(path) - if err != nil { - return nil, err - } - h, err := syscall.CreateFile(&winPath[0], - access, - share, + h, err := fs.CreateFile(path, + fs.AccessMask(access), + fs.FileShareMode(share), nil, - createmode, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, - 0) + fs.FileCreationDisposition(createmode), + fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT, + 0, + ) if err != nil { err = &os.PathError{Op: "open", Path: path, Err: err} return nil, err diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 175a99d3f429f..fe82a180dbddb 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -15,26 +15,11 @@ import ( "golang.org/x/sys/windows" ) -//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult - -type atomicBool int32 - -func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } -func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } -func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } - -//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg -func (b *atomicBool) swap(new bool) bool { - var newInt int32 - if new { - newInt = 1 - } - return atomic.SwapInt32((*int32)(b), newInt) == 1 -} +//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult var ( ErrFileClosed = errors.New("file has already been closed") @@ -50,7 +35,7 @@ func (*timeoutError) Temporary() bool { return true } type timeoutChan chan struct{} var ioInitOnce sync.Once -var ioCompletionPort syscall.Handle +var ioCompletionPort windows.Handle // ioResult contains the result of an asynchronous IO operation. type ioResult struct { @@ -60,12 +45,12 @@ type ioResult struct { // ioOperation represents an outstanding asynchronous Win32 IO. type ioOperation struct { - o syscall.Overlapped + o windows.Overlapped ch chan ioResult } func initIO() { - h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) if err != nil { panic(err) } @@ -76,10 +61,10 @@ func initIO() { // win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. // It takes ownership of this handle and will close it if it is garbage collected. type win32File struct { - handle syscall.Handle + handle windows.Handle wg sync.WaitGroup wgLock sync.RWMutex - closing atomicBool + closing atomic.Bool socket bool readDeadline deadlineHandler writeDeadline deadlineHandler @@ -90,11 +75,11 @@ type deadlineHandler struct { channel timeoutChan channelLock sync.RWMutex timer *time.Timer - timedout atomicBool + timedout atomic.Bool } // makeWin32File makes a new win32File from an existing file handle. -func makeWin32File(h syscall.Handle) (*win32File, error) { +func makeWin32File(h windows.Handle) (*win32File, error) { f := &win32File{handle: h} ioInitOnce.Do(initIO) _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) @@ -110,7 +95,12 @@ func makeWin32File(h syscall.Handle) (*win32File, error) { return f, nil } +// Deprecated: use NewOpenFile instead. func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + return NewOpenFile(windows.Handle(h)) +} + +func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) { // If we return the result of makeWin32File directly, it can result in an // interface-wrapped nil, rather than a nil interface value. f, err := makeWin32File(h) @@ -124,13 +114,13 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { func (f *win32File) closeHandle() { f.wgLock.Lock() // Atomically set that we are closing, releasing the resources only once. - if !f.closing.swap(true) { + if !f.closing.Swap(true) { f.wgLock.Unlock() // cancel all IO and wait for it to complete _ = cancelIoEx(f.handle, nil) f.wg.Wait() // at this point, no new IO can start - syscall.Close(f.handle) + windows.Close(f.handle) f.handle = 0 } else { f.wgLock.Unlock() @@ -145,14 +135,14 @@ func (f *win32File) Close() error { // IsClosed checks if the file has been closed. func (f *win32File) IsClosed() bool { - return f.closing.isSet() + return f.closing.Load() } // prepareIO prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. func (f *win32File) prepareIO() (*ioOperation, error) { f.wgLock.RLock() - if f.closing.isSet() { + if f.closing.Load() { f.wgLock.RUnlock() return nil, ErrFileClosed } @@ -164,12 +154,12 @@ func (f *win32File) prepareIO() (*ioOperation, error) { } // ioCompletionProcessor processes completed async IOs forever. -func ioCompletionProcessor(h syscall.Handle) { +func ioCompletionProcessor(h windows.Handle) { for { var bytes uint32 var key uintptr var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE) if op == nil { panic(err) } @@ -182,11 +172,11 @@ func ioCompletionProcessor(h syscall.Handle) { // asyncIO processes the return value from ReadFile or WriteFile, blocking until // the operation has actually completed. func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno + if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno return int(bytes), err } - if f.closing.isSet() { + if f.closing.Load() { _ = cancelIoEx(f.handle, &c.o) } @@ -201,8 +191,8 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er select { case r = <-c.ch: err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno - if f.closing.isSet() { + if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + if f.closing.Load() { err = ErrFileClosed } } else if err != nil && f.socket { @@ -214,7 +204,7 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er _ = cancelIoEx(f.handle, &c.o) r = <-c.ch err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno err = ErrTimeout } } @@ -235,23 +225,22 @@ func (f *win32File) Read(b []byte) (int, error) { } defer f.wg.Done() - if f.readDeadline.timedout.isSet() { + if f.readDeadline.timedout.Load() { return 0, ErrTimeout } var bytes uint32 - err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + err = windows.ReadFile(f.handle, b, &bytes, &c.o) n, err := f.asyncIO(c, &f.readDeadline, bytes, err) runtime.KeepAlive(b) // Handle EOF conditions. if err == nil && n == 0 && len(b) != 0 { return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno + } else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno return 0, io.EOF - } else { - return n, err } + return n, err } // Write writes to a file handle. @@ -262,12 +251,12 @@ func (f *win32File) Write(b []byte) (int, error) { } defer f.wg.Done() - if f.writeDeadline.timedout.isSet() { + if f.writeDeadline.timedout.Load() { return 0, ErrTimeout } var bytes uint32 - err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + err = windows.WriteFile(f.handle, b, &bytes, &c.o) n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) runtime.KeepAlive(b) return n, err @@ -282,7 +271,7 @@ func (f *win32File) SetWriteDeadline(deadline time.Time) error { } func (f *win32File) Flush() error { - return syscall.FlushFileBuffers(f.handle) + return windows.FlushFileBuffers(f.handle) } func (f *win32File) Fd() uintptr { @@ -299,7 +288,7 @@ func (d *deadlineHandler) set(deadline time.Time) error { } d.timer = nil } - d.timedout.setFalse() + d.timedout.Store(false) select { case <-d.channel: @@ -314,7 +303,7 @@ func (d *deadlineHandler) set(deadline time.Time) error { } timeoutIO := func() { - d.timedout.setTrue() + d.timedout.Store(true) close(d.channel) } diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go index 702950e72a49c..c860eb9917a5c 100644 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -18,9 +18,18 @@ type FileBasicInfo struct { _ uint32 // padding } +// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing +// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64 +// alignment is necessary to pass this as FILE_BASIC_INFO. +type alignedFileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64 + FileAttributes uint32 + _ uint32 // padding +} + // GetFileBasicInfo retrieves times and attributes for a file. func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { - bi := &FileBasicInfo{} + bi := &alignedFileBasicInfo{} if err := windows.GetFileInformationByHandleEx( windows.Handle(f.Fd()), windows.FileBasicInfo, @@ -30,16 +39,21 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) - return bi, nil + // Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the + // public API of this module. The data may be unnecessarily aligned. + return (*FileBasicInfo)(unsafe.Pointer(bi)), nil } // SetFileBasicInfo sets times and attributes for a file. func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + // Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is + // suitable to pass to GetFileInformationByHandleEx. + biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi)) if err := windows.SetFileInformationByHandle( windows.Handle(f.Fd()), windows.FileBasicInfo, - (*byte)(unsafe.Pointer(bi)), - uint32(unsafe.Sizeof(*bi)), + (*byte)(unsafe.Pointer(&biAligned)), + uint32(unsafe.Sizeof(biAligned)), ); err != nil { return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} } diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index c881916583e41..c4fdd9d4aec23 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -10,7 +10,6 @@ import ( "io" "net" "os" - "syscall" "time" "unsafe" @@ -181,13 +180,13 @@ type HvsockConn struct { var _ net.Conn = &HvsockConn{} func newHVSocket() (*win32File, error) { - fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1) + fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1) if err != nil { return nil, os.NewSyscallError("socket", err) } f, err := makeWin32File(fd) if err != nil { - syscall.Close(fd) + windows.Close(fd) return nil, err } f.socket = true @@ -197,16 +196,24 @@ func newHVSocket() (*win32File, error) { // ListenHvsock listens for connections on the specified hvsock address. func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { l := &HvsockListener{addr: *addr} - sock, err := newHVSocket() + + var sock *win32File + sock, err = newHVSocket() if err != nil { return nil, l.opErr("listen", err) } + defer func() { + if err != nil { + _ = sock.Close() + } + }() + sa := addr.raw() - err = socket.Bind(windows.Handle(sock.handle), &sa) + err = socket.Bind(sock.handle, &sa) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("socket", err)) } - err = syscall.Listen(sock.handle, 16) + err = windows.Listen(sock.handle, 16) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("listen", err)) } @@ -246,7 +253,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { var addrbuf [addrlen * 2]byte var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) + err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } @@ -263,7 +270,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) // initialize the accepted socket and update its properties with those of the listening socket - if err = windows.Setsockopt(windows.Handle(sock.handle), + if err = windows.Setsockopt(sock.handle, windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) @@ -334,7 +341,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock }() sa := addr.raw() - err = socket.Bind(windows.Handle(sock.handle), &sa) + err = socket.Bind(sock.handle, &sa) if err != nil { return nil, conn.opErr(op, os.NewSyscallError("bind", err)) } @@ -347,7 +354,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock var bytes uint32 for i := uint(0); i <= d.Retries; i++ { err = socket.ConnectEx( - windows.Handle(sock.handle), + sock.handle, &sa, nil, // sendBuf 0, // sendDataLen @@ -367,7 +374,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock // update the connection properties, so shutdown can be used if err = windows.Setsockopt( - windows.Handle(sock.handle), + sock.handle, windows.SOL_SOCKET, windows.SO_UPDATE_CONNECT_CONTEXT, nil, // optvalue @@ -378,7 +385,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock // get the local name var sal rawHvsockAddr - err = socket.GetSockName(windows.Handle(sock.handle), &sal) + err = socket.GetSockName(sock.handle, &sal) if err != nil { return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) } @@ -421,7 +428,7 @@ func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { return ctx.Err() } -// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall. +// assumes error is a plain, unwrapped windows.Errno provided by direct syscall. func canRedial(err error) bool { //nolint:errorlint // guaranteed to be an Errno switch err { @@ -447,9 +454,9 @@ func (conn *HvsockConn) Read(b []byte) (int, error) { return 0, conn.opErr("read", err) } defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} var flags, bytes uint32 - err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) if err != nil { var eno windows.Errno @@ -482,9 +489,9 @@ func (conn *HvsockConn) write(b []byte) (int, error) { return 0, conn.opErr("write", err) } defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} var bytes uint32 - err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) if err != nil { var eno windows.Errno @@ -511,7 +518,7 @@ func (conn *HvsockConn) shutdown(how int) error { return socket.ErrSocketClosed } - err := syscall.Shutdown(conn.sock.handle, how) + err := windows.Shutdown(conn.sock.handle, how) if err != nil { // If the connection was closed, shutdowns fail with "not connected" if errors.Is(err, windows.WSAENOTCONN) || @@ -525,7 +532,7 @@ func (conn *HvsockConn) shutdown(how int) error { // CloseRead shuts down the read end of the socket, preventing future read operations. func (conn *HvsockConn) CloseRead() error { - err := conn.shutdown(syscall.SHUT_RD) + err := conn.shutdown(windows.SHUT_RD) if err != nil { return conn.opErr("closeread", err) } @@ -535,7 +542,7 @@ func (conn *HvsockConn) CloseRead() error { // CloseWrite shuts down the write end of the socket, preventing future write operations and // notifying the other endpoint that no more data will be written. func (conn *HvsockConn) CloseWrite() error { - err := conn.shutdown(syscall.SHUT_WR) + err := conn.shutdown(windows.SHUT_WR) if err != nil { return conn.opErr("closewrite", err) } diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go index 509b3ec64107a..0cd9621df785f 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go @@ -11,12 +11,14 @@ import ( //go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew -//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW +//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW const NullHandle windows.Handle = 0 // AccessMask defines standard, specific, and generic rights. // +// Used with CreateFile and NtCreateFile (and co.). +// // Bitmask: // 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 // 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 @@ -47,6 +49,12 @@ const ( // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters FILE_ANY_ACCESS AccessMask = 0 + GENERIC_READ AccessMask = 0x8000_0000 + GENERIC_WRITE AccessMask = 0x4000_0000 + GENERIC_EXECUTE AccessMask = 0x2000_0000 + GENERIC_ALL AccessMask = 0x1000_0000 + ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000 + // Specific Object Access // from ntioapi.h @@ -124,14 +132,32 @@ const ( TRUNCATE_EXISTING FileCreationDisposition = 0x05 ) +// Create disposition values for NtCreate* +type NTFileCreationDisposition uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // From ntioapi.h + + FILE_SUPERSEDE NTFileCreationDisposition = 0x00 + FILE_OPEN NTFileCreationDisposition = 0x01 + FILE_CREATE NTFileCreationDisposition = 0x02 + FILE_OPEN_IF NTFileCreationDisposition = 0x03 + FILE_OVERWRITE NTFileCreationDisposition = 0x04 + FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05 + FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05 +) + // CreateFile and co. take flags or attributes together as one parameter. // Define alias until we can use generics to allow both - +// // https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants type FileFlagOrAttribute uint32 //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( // from winnt.h +const ( + // from winnt.h + FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 @@ -145,17 +171,51 @@ const ( // from winnt.h FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 ) +// NtCreate* functions take a dedicated CreateOptions parameter. +// +// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile +// +// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file +type NTCreateOptions uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // From ntioapi.h + + FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001 + FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002 + FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004 + FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008 + + FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010 + FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020 + FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040 + FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080 + + FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100 + FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200 + FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400 + FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800 + + FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000 + FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000 + FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000 + FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000 +) + type FileSQSFlag = FileFlagOrAttribute //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( // from winbase.h +const ( + // from winbase.h + SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) - SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000 - SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000 + SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000 + SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000 ) // GetFinalPathNameByHandle flags diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go index e2f7bb24e5f71..a94e234c706b6 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -45,7 +42,7 @@ var ( procCreateFileW = modkernel32.NewProc("CreateFileW") ) -func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { +func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(name) if err != nil { @@ -54,8 +51,8 @@ func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall. return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) } -func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) +func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = windows.Handle(r0) if handle == windows.InvalidHandle { err = errnoErr(e1) diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go index aeb7b7250f587..88580d974ecb2 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -156,9 +156,7 @@ func connectEx( bytesSent *uint32, overlapped *windows.Overlapped, ) (err error) { - // todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN - r1, _, e1 := syscall.Syscall9(connectExFunc.addr, - 7, + r1, _, e1 := syscall.SyscallN(connectExFunc.addr, uintptr(s), uintptr(name), uintptr(namelen), @@ -166,8 +164,8 @@ func connectEx( uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), - 0, - 0) + ) + if r1 == 0 { if e1 != 0 { err = error(e1) diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go index 6d2e1a9e44389..e1504126aa6e8 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -48,7 +45,7 @@ var ( ) func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socketError { err = errnoErr(e1) } @@ -56,7 +53,7 @@ func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { } func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) if r1 == socketError { err = errnoErr(e1) } @@ -64,7 +61,7 @@ func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err err } func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) if r1 == socketError { err = errnoErr(e1) } diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go index 7ad505702405d..42ebc019fcb81 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go +++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go @@ -62,7 +62,7 @@ func (b *WString) Free() { // ResizeTo grows the buffer to at least c and returns the new capacity, freeing the // previous buffer back into pool. func (b *WString) ResizeTo(c uint32) uint32 { - // allready sufficient (or n is 0) + // already sufficient (or n is 0) if c <= b.Cap() { return b.Cap() } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index 25cc811031bba..a2da6639d00d0 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -11,7 +11,6 @@ import ( "net" "os" "runtime" - "syscall" "time" "unsafe" @@ -20,20 +19,44 @@ import ( "github.com/Microsoft/go-winio/internal/fs" ) -//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe -//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo -//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW -//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc -//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile +//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW +//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe +//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile //sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb //sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U //sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl +type PipeConn interface { + net.Conn + Disconnect() error + Flush() error +} + +// type aliases for mkwinsyscall code +type ( + ntAccessMask = fs.AccessMask + ntFileShareMode = fs.FileShareMode + ntFileCreationDisposition = fs.NTFileCreationDisposition + ntFileOptions = fs.NTCreateOptions +) + type ioStatusBlock struct { Status, Information uintptr } +// typedef struct _OBJECT_ATTRIBUTES { +// ULONG Length; +// HANDLE RootDirectory; +// PUNICODE_STRING ObjectName; +// ULONG Attributes; +// PVOID SecurityDescriptor; +// PVOID SecurityQualityOfService; +// } OBJECT_ATTRIBUTES; +// +// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes type objectAttributes struct { Length uintptr RootDirectory uintptr @@ -49,6 +72,17 @@ type unicodeString struct { Buffer uintptr } +// typedef struct _SECURITY_DESCRIPTOR { +// BYTE Revision; +// BYTE Sbz1; +// SECURITY_DESCRIPTOR_CONTROL Control; +// PSID Owner; +// PSID Group; +// PACL Sacl; +// PACL Dacl; +// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR; +// +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor type securityDescriptor struct { Revision byte Sbz1 byte @@ -80,6 +114,8 @@ type win32Pipe struct { path string } +var _ PipeConn = (*win32Pipe)(nil) + type win32MessageBytePipe struct { win32Pipe writeClosed bool @@ -103,6 +139,10 @@ func (f *win32Pipe) SetDeadline(t time.Time) error { return f.SetWriteDeadline(t) } +func (f *win32Pipe) Disconnect() error { + return disconnectNamedPipe(f.win32File.handle) +} + // CloseWrite closes the write side of a message pipe in byte mode. func (f *win32MessageBytePipe) CloseWrite() error { if f.writeClosed { @@ -146,7 +186,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { // zero-byte message, ensure that all future Read() calls // also return EOF. f.readEOF = true - } else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno + } else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno // ERROR_MORE_DATA indicates that the pipe's read mode is message mode // and the message still has more bytes. Treat this as a success, since // this package presents all named pipes as byte streams. @@ -164,21 +204,20 @@ func (s pipeAddress) String() string { } // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) { +func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) { for { select { case <-ctx.Done(): - return syscall.Handle(0), ctx.Err() + return windows.Handle(0), ctx.Err() default: - wh, err := fs.CreateFile(*path, + h, err := fs.CreateFile(*path, access, 0, // mode nil, // security attributes fs.OPEN_EXISTING, - fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS, + fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel), 0, // template file handle ) - h := syscall.Handle(wh) if err == nil { return h, nil } @@ -214,15 +253,33 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { // DialPipeContext attempts to connect to a named pipe by `path` until `ctx` // cancellation or timeout. func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { - return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) + return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE)) } +// PipeImpLevel is an enumeration of impersonation levels that may be set +// when calling DialPipeAccessImpersonation. +type PipeImpLevel uint32 + +const ( + PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS) + PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION) + PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION) + PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION) +) + // DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` // cancellation or timeout. func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { + return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous) +} + +// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with +// `access` at `impLevel` until `ctx` cancellation or timeout. The other +// DialPipe* implementations use PipeImpLevelAnonymous. +func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) { var err error - var h syscall.Handle - h, err = tryDialPipe(ctx, &path, fs.AccessMask(access)) + var h windows.Handle + h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel) if err != nil { return nil, err } @@ -235,7 +292,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, f, err := makeWin32File(h) if err != nil { - syscall.Close(h) + windows.Close(h) return nil, err } @@ -255,7 +312,7 @@ type acceptResponse struct { } type win32PipeListener struct { - firstHandle syscall.Handle + firstHandle windows.Handle path string config PipeConfig acceptCh chan (chan acceptResponse) @@ -263,8 +320,8 @@ type win32PipeListener struct { doneCh chan int } -func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - path16, err := syscall.UTF16FromString(path) +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) { + path16, err := windows.UTF16FromString(path) if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } @@ -280,16 +337,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy ).Err(); err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } - defer localFree(ntPath.Buffer) + defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck oa.ObjectName = &ntPath oa.Attributes = windows.OBJ_CASE_INSENSITIVE // The security descriptor is only needed for the first pipe. if first { if sd != nil { + //todo: does `sdb` need to be allocated on the heap, or can go allocate it? l := uint32(len(sd)) - sdb := localAlloc(0, l) - defer localFree(sdb) + sdb, err := windows.LocalAlloc(0, l) + if err != nil { + return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err) + } + defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) } else { @@ -298,7 +359,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { return 0, fmt.Errorf("getting default named pipe ACL: %w", err) } - defer localFree(dacl) + defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck sdb := &securityDescriptor{ Revision: 1, @@ -314,27 +375,27 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy typ |= windows.FILE_PIPE_MESSAGE_TYPE } - disposition := uint32(windows.FILE_OPEN) - access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + disposition := fs.FILE_OPEN + access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE if first { - disposition = windows.FILE_CREATE + disposition = fs.FILE_CREATE // By not asking for read or write access, the named pipe file system // will put this pipe into an initially disconnected state, blocking // client connections until the next call with first == false. - access = syscall.SYNCHRONIZE + access = fs.SYNCHRONIZE } timeout := int64(-50 * 10000) // 50ms var ( - h syscall.Handle + h windows.Handle iosb ioStatusBlock ) err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, + fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE, disposition, 0, typ, @@ -359,7 +420,7 @@ func (l *win32PipeListener) makeServerPipe() (*win32File, error) { } f, err := makeWin32File(h) if err != nil { - syscall.Close(h) + windows.Close(h) return nil, err } return f, nil @@ -418,7 +479,7 @@ func (l *win32PipeListener) listenerRoutine() { closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno } } - syscall.Close(l.firstHandle) + windows.Close(l.firstHandle) l.firstHandle = 0 // Notify Close() and Accept() callers that the handle has been closed. close(l.doneCh) diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go index 0ff9dac906d3c..d9b90b6e8614c 100644 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -9,7 +9,6 @@ import ( "fmt" "runtime" "sync" - "syscall" "unicode/utf16" "golang.org/x/sys/windows" @@ -18,8 +17,8 @@ import ( //sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges //sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf //sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h windows.Handle) = GetCurrentThread //sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW //sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW //sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW @@ -29,7 +28,7 @@ const ( SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED //revive:disable-next-line:var-naming ALL_CAPS - ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED + ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED SeBackupPrivilege = "SeBackupPrivilege" SeRestorePrivilege = "SeRestorePrivilege" @@ -177,7 +176,7 @@ func newThreadToken() (windows.Token, error) { } var token windows.Token - err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token) if err != nil { rerr := revertToSelf() if rerr != nil { diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go index 5550ef6b61ef2..c3685e98e14d4 100644 --- a/vendor/github.com/Microsoft/go-winio/sd.go +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -5,7 +5,7 @@ package winio import ( "errors" - "syscall" + "fmt" "unsafe" "golang.org/x/sys/windows" @@ -15,10 +15,6 @@ import ( //sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW //sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW //sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW -//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW -//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW -//sys localFree(mem uintptr) = LocalFree -//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength type AccountLookupError struct { Name string @@ -64,7 +60,7 @@ func LookupSidByName(name string) (sid string, err error) { var sidSize, sidNameUse, refDomainSize uint32 err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno return "", &AccountLookupError{name, err} } sidBuffer := make([]byte, sidSize) @@ -78,8 +74,8 @@ func LookupSidByName(name string) (sid string, err error) { if err != nil { return "", &AccountLookupError{name, err} } - sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) - localFree(uintptr(unsafe.Pointer(strBuffer))) + sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + _, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer))) return sid, nil } @@ -100,7 +96,7 @@ func LookupNameBySid(sid string) (name string, err error) { if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { return "", &AccountLookupError{sid, err} } - defer localFree(uintptr(unsafe.Pointer(sidPtr))) + defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck var nameSize, refDomainSize, sidNameUse uint32 err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) @@ -120,25 +116,18 @@ func LookupNameBySid(sid string) (name string, err error) { } func SddlToSecurityDescriptor(sddl string) ([]byte, error) { - var sdBuffer uintptr - err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + sd, err := windows.SecurityDescriptorFromString(sddl) if err != nil { - return nil, &SddlConversionError{sddl, err} + return nil, &SddlConversionError{Sddl: sddl, Err: err} } - defer localFree(sdBuffer) - sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) - copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) - return sd, nil + b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) + return b, nil } func SecurityDescriptorToSddl(sd []byte) (string, error) { - var sddl *uint16 - // The returned string length seems to include an arbitrary number of terminating NULs. - // Don't use it. - err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) - if err != nil { - return "", err + if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { + return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) } - defer localFree(uintptr(unsafe.Pointer(sddl))) - return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil + s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) + return s.String(), nil } diff --git a/vendor/github.com/Microsoft/go-winio/tools.go b/vendor/github.com/Microsoft/go-winio/tools.go deleted file mode 100644 index 2aa045843ea81..0000000000000 --- a/vendor/github.com/Microsoft/go-winio/tools.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build tools - -package winio - -import _ "golang.org/x/tools/cmd/stringer" diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 469b16f6398dd..89b66eda8ccf1 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -45,38 +42,34 @@ var ( modntdll = windows.NewLazySystemDLL("ntdll.dll") modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procBackupRead = modkernel32.NewProc("BackupRead") - procBackupWrite = modkernel32.NewProc("BackupWrite") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") ) func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { @@ -84,7 +77,7 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou if releaseAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) success = r0 != 0 if true { err = errnoErr(e1) @@ -92,33 +85,8 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou return } -func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return - } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) -} - -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str))) if r1 == 0 { err = errnoErr(e1) } @@ -126,21 +94,15 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func convertStringSidToSid(str *uint16, sid **byte) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } return } -func getSecurityDescriptorLength(sd uintptr) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) - len = uint32(r0) - return -} - func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level)) if r1 == 0 { err = errnoErr(e1) } @@ -157,7 +119,7 @@ func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSiz } func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) if r1 == 0 { err = errnoErr(e1) } @@ -165,7 +127,7 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS } func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) if r1 == 0 { err = errnoErr(e1) } @@ -182,7 +144,7 @@ func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, } func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId))) if r1 == 0 { err = errnoErr(e1) } @@ -199,7 +161,7 @@ func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size * } func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -221,19 +183,19 @@ func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err err } func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } return } -func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { +func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { var _p0 uint32 if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -241,14 +203,14 @@ func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, } func revertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } return } -func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { +func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { var _p0 *byte if len(b) > 0 { _p0 = &b[0] @@ -261,14 +223,14 @@ func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, proce if processSecurity { _p2 = 1 } - r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } return } -func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { +func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { var _p0 *byte if len(b) > 0 { _p0 = &b[0] @@ -281,39 +243,39 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p if processSecurity { _p2 = 1 } - r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } return } -func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) +func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) { + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } return } -func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) +func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) { + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } return } -func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) - newport = syscall.Handle(r0) +func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount)) + newport = windows.Handle(r0) if newport == 0 { err = errnoErr(e1) } return } -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(name) if err != nil { @@ -322,96 +284,93 @@ func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances ui return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) } -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { err = errnoErr(e1) } return } -func getCurrentThread() (h syscall.Handle) { - r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - h = syscall.Handle(r0) - return -} - -func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) +func disconnectNamedPipe(pipe windows.Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } return } -func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) - if r1 == 0 { - err = errnoErr(e1) - } +func getCurrentThread() (h windows.Handle) { + r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr()) + h = windows.Handle(r0) return } -func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) +func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } return } -func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) - ptr = uintptr(r0) +func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) + if r1 == 0 { + err = errnoErr(e1) + } return } -func localFree(mem uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) +func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout)) + if r1 == 0 { + err = errnoErr(e1) + } return } -func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) +func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } return } -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) +func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) status = ntStatus(r0) return } func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl))) status = ntStatus(r0) return } func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved)) status = ntStatus(r0) return } func rtlNtStatusToDosError(status ntStatus) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status)) if r0 != 0 { winerr = syscall.Errno(r0) } return } -func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { +func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { var _p0 uint32 if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } diff --git a/vendor/github.com/benbjohnson/immutable/LICENSE b/vendor/github.com/benbjohnson/immutable/LICENSE new file mode 100644 index 0000000000000..428ce7de28be2 --- /dev/null +++ b/vendor/github.com/benbjohnson/immutable/LICENSE @@ -0,0 +1,19 @@ +Copyright 2019 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/benbjohnson/immutable/README.md b/vendor/github.com/benbjohnson/immutable/README.md new file mode 100644 index 0000000000000..f66ef6281249e --- /dev/null +++ b/vendor/github.com/benbjohnson/immutable/README.md @@ -0,0 +1,301 @@ +Immutable ![release](https://img.shields.io/github/release/benbjohnson/immutable.svg) ![test](https://github.com/benbjohnson/immutable/workflows/test/badge.svg) ![coverage](https://img.shields.io/codecov/c/github/benbjohnson/immutable/master.svg) ![license](https://img.shields.io/github/license/benbjohnson/immutable.svg) +========= + +This repository contains *generic* immutable collection types for Go. It includes +`List`, `Map`, and `SortedMap` implementations. Immutable collections can +provide efficient, lock free sharing of data by requiring that edits to the +collections return new collections. + +The collection types in this library are meant to mimic Go built-in collections +such as`slice` and `map`. The primary usage difference between Go collections +and `immutable` collections is that `immutable` collections always return a new +collection on mutation so you will need to save the new reference. + +Immutable collections are not for every situation, however, as they can incur +additional CPU and memory overhead. Please evaluate the cost/benefit for your +particular project. + +Special thanks to the [Immutable.js](https://immutable-js.github.io/immutable-js/) +team as the `List` & `Map` implementations are loose ports from that project. + + +## List + +The `List` type represents a sorted, indexed collection of values and operates +similarly to a Go slice. It supports efficient append, prepend, update, and +slice operations. + + +### Adding list elements + +Elements can be added to the end of the list with the `Append()` method or added +to the beginning of the list with the `Prepend()` method. Unlike Go slices, +prepending is as efficient as appending. + +```go +// Create a list with 3 elements. +l := immutable.NewList[string]() +l = l.Append("foo") +l = l.Append("bar") +l = l.Prepend("baz") + +fmt.Println(l.Len()) // 3 +fmt.Println(l.Get(0)) // "baz" +fmt.Println(l.Get(1)) // "foo" +fmt.Println(l.Get(2)) // "bar" +``` + +Note that each change to the list results in a new list being created. These +lists are all snapshots at that point in time and cannot be changed so they +are safe to share between multiple goroutines. + +### Updating list elements + +You can also overwrite existing elements by using the `Set()` method. In the +following example, we'll update the third element in our list and return the +new list to a new variable. You can see that our old `l` variable retains a +snapshot of the original value. + +```go +l := immutable.NewList[string]() +l = l.Append("foo") +l = l.Append("bar") +newList := l.Set(2, "baz") + +fmt.Println(l.Get(1)) // "bar" +fmt.Println(newList.Get(1)) // "baz" +``` + +### Deriving sublists + +You can create a sublist by using the `Slice()` method. This method works with +the same rules as subslicing a Go slice: + +```go +l = l.Slice(0, 2) + +fmt.Println(l.Len()) // 2 +fmt.Println(l.Get(0)) // "baz" +fmt.Println(l.Get(1)) // "foo" +``` + +Please note that since `List` follows the same rules as slices, it will panic if +you try to `Get()`, `Set()`, or `Slice()` with indexes that are outside of +the range of the `List`. + + + +### Iterating lists + +Iterators provide a clean, simple way to iterate over the elements of the list +in order. This is more efficient than simply calling `Get()` for each index. + +Below is an example of iterating over all elements of our list from above: + +```go +itr := l.Iterator() +for !itr.Done() { + index, value, _ := itr.Next() + fmt.Printf("Index %d equals %v\n", index, value) +} + +// Index 0 equals baz +// Index 1 equals foo +``` + +By default iterators start from index zero, however, the `Seek()` method can be +used to jump to a given index. + + +### Efficiently building lists + +If you are building large lists, it is significantly more efficient to use the +`ListBuilder`. It uses nearly the same API as `List` except that it updates +a list in-place until you are ready to use it. This can improve bulk list +building by 10x or more. + +```go +b := immutable.NewListBuilder[string]() +b.Append("foo") +b.Append("bar") +b.Set(2, "baz") + +l := b.List() +fmt.Println(l.Get(0)) // "foo" +fmt.Println(l.Get(1)) // "baz" +``` + +Builders are invalid after the call to `List()`. + + +## Map + +The `Map` represents an associative array that maps unique keys to values. It +is implemented to act similarly to the built-in Go `map` type. It is implemented +as a [Hash-Array Mapped Trie](https://lampwww.epfl.ch/papers/idealhashtrees.pdf). + +Maps require a `Hasher` to hash keys and check for equality. There are built-in +hasher implementations for most primitive types such as `int`, `uint`, `string`, +and `[]byte` keys. You may pass in a `nil` hasher to `NewMap()` if you are using +one of these key types. + + +### Setting map key/value pairs + +You can add a key/value pair to the map by using the `Set()` method. It will +add the key if it does not exist or it will overwrite the value for the key if +it does exist. + +Values may be fetched for a key using the `Get()` method. This method returns +the value as well as a flag indicating if the key existed. The flag is useful +to check if a `nil` value was set for a key versus a key did not exist. + +```go +m := immutable.NewMap[string,int](nil) +m = m.Set("jane", 100) +m = m.Set("susy", 200) +m = m.Set("jane", 300) // overwrite + +fmt.Println(m.Len()) // 2 + +v, ok := m.Get("jane") +fmt.Println(v, ok) // 300 true + +v, ok = m.Get("susy") +fmt.Println(v, ok) // 200, true + +v, ok = m.Get("john") +fmt.Println(v, ok) // nil, false +``` + + +### Removing map keys + +Keys may be removed from the map by using the `Delete()` method. If the key does +not exist then the original map is returned instead of a new one. + +```go +m := immutable.NewMap[string,int](nil) +m = m.Set("jane", 100) +m = m.Delete("jane") + +fmt.Println(m.Len()) // 0 + +v, ok := m.Get("jane") +fmt.Println(v, ok) // nil false +``` + + +### Iterating maps + +Maps are unsorted, however, iterators can be used to loop over all key/value +pairs in the collection. Unlike Go maps, iterators are deterministic when +iterating over key/value pairs. + +```go +m := immutable.NewMap[string,int](nil) +m = m.Set("jane", 100) +m = m.Set("susy", 200) + +itr := m.Iterator() +for !itr.Done() { + k, v := itr.Next() + fmt.Println(k, v) +} + +// susy 200 +// jane 100 +``` + +Note that you should not rely on two maps with the same key/value pairs to +iterate in the same order. Ordering can be insertion order dependent when two +keys generate the same hash. + + +### Efficiently building maps + +If you are executing multiple mutations on a map, it can be much more efficient +to use the `MapBuilder`. It uses nearly the same API as `Map` except that it +updates a map in-place until you are ready to use it. + +```go +b := immutable.NewMapBuilder[string,int](nil) +b.Set("foo", 100) +b.Set("bar", 200) +b.Set("foo", 300) + +m := b.Map() +fmt.Println(m.Get("foo")) // "300" +fmt.Println(m.Get("bar")) // "200" +``` + +Builders are invalid after the call to `Map()`. + + +### Implementing a custom Hasher + +If you need to use a key type besides `int`, `uint`, `string`, or `[]byte` then +you'll need to create a custom `Hasher` implementation and pass it to `NewMap()` +on creation. + +Hashers are fairly simple. They only need to generate hashes for a given key +and check equality given two keys. + +```go +type Hasher[K constraints.Ordered] interface { + Hash(key K) uint32 + Equal(a, b K) bool +} +``` + +Please see the internal `intHasher`, `uintHasher`, `stringHasher`, and +`byteSliceHasher` for examples. + + +## Sorted Map + +The `SortedMap` represents an associative array that maps unique keys to values. +Unlike the `Map`, however, keys can be iterated over in-order. It is implemented +as a B+tree. + +Sorted maps require a `Comparer` to sort keys and check for equality. There are +built-in comparer implementations for `int`, `uint`, `string`, and `[]byte` keys. +You may pass a `nil` comparer to `NewSortedMap()` if you are using one of these +key types. + +The API is identical to the `Map` implementation. The sorted map also has a +companion `SortedMapBuilder` for more efficiently building maps. + + +### Implementing a custom Comparer + +If you need to use a key type besides `int`, `uint`, `string`, or `[]byte` +then you'll need to create a custom `Comparer` implementation and pass it to +`NewSortedMap()` on creation. + +Comparers on have one method—`Compare()`. It works the same as the +`strings.Compare()` function. It returns `-1` if `a` is less than `b`, returns +`1` if a is greater than `b`, and returns `0` if `a` is equal to `b`. + +```go +type Comparer[K constraints.Ordered] interface { + Compare(a, b K) int +} +``` + +Please see the internal `intComparer`, `uintComparer`, `stringComparer`, and +`byteSliceComparer` for examples. + + + +## Contributing + +The goal of `immutable` is to provide stable, reasonably performant, immutable +collections library for Go that has a simple, idiomatic API. As such, additional +features and minor performance improvements will generally not be accepted. If +you have a suggestion for a clearer API or substantial performance improvement, +_please_ open an issue first to discuss. All pull requests without a related +issue will be closed immediately. + +Please submit issues relating to bugs & documentation improvements. + diff --git a/vendor/github.com/benbjohnson/immutable/immutable.go b/vendor/github.com/benbjohnson/immutable/immutable.go new file mode 100644 index 0000000000000..b189612363117 --- /dev/null +++ b/vendor/github.com/benbjohnson/immutable/immutable.go @@ -0,0 +1,2400 @@ +// Package immutable provides immutable collection types. +// +// Introduction +// +// Immutable collections provide an efficient, safe way to share collections +// of data while minimizing locks. The collections in this package provide +// List, Map, and SortedMap implementations. These act similarly to slices +// and maps, respectively, except that altering a collection returns a new +// copy of the collection with that change. +// +// Because collections are unable to change, they are safe for multiple +// goroutines to read from at the same time without a mutex. However, these +// types of collections come with increased CPU & memory usage as compared +// with Go's built-in collection types so please evaluate for your specific +// use. +// +// Collection Types +// +// The List type provides an API similar to Go slices. They allow appending, +// prepending, and updating of elements. Elements can also be fetched by index +// or iterated over using a ListIterator. +// +// The Map & SortedMap types provide an API similar to Go maps. They allow +// values to be assigned to unique keys and allow for the deletion of keys. +// Values can be fetched by key and key/value pairs can be iterated over using +// the appropriate iterator type. Both map types provide the same API. The +// SortedMap, however, provides iteration over sorted keys while the Map +// provides iteration over unsorted keys. Maps improved performance and memory +// usage as compared to SortedMaps. +// +// Hashing and Sorting +// +// Map types require the use of a Hasher implementation to calculate hashes for +// their keys and check for key equality. SortedMaps require the use of a +// Comparer implementation to sort keys in the map. +// +// These collection types automatically provide built-in hasher and comparers +// for int, string, and byte slice keys. If you are using one of these key types +// then simply pass a nil into the constructor. Otherwise you will need to +// implement a custom Hasher or Comparer type. Please see the provided +// implementations for reference. +package immutable + +import ( + "fmt" + "math/bits" + "reflect" + "sort" + "strings" + + "golang.org/x/exp/constraints" +) + +// List is a dense, ordered, indexed collections. They are analogous to slices +// in Go. They can be updated by appending to the end of the list, prepending +// values to the beginning of the list, or updating existing indexes in the +// list. +type List[T comparable] struct { + root listNode[T] // root node + origin int // offset to zero index element + size int // total number of elements in use +} + +// NewList returns a new empty instance of List. +func NewList[T comparable]() *List[T] { + return &List[T]{ + root: &listLeafNode[T]{}, + } +} + +// clone returns a copy of the list. +func (l *List[T]) clone() *List[T] { + other := *l + return &other +} + +// Len returns the number of elements in the list. +func (l *List[T]) Len() int { + return l.size +} + +// cap returns the total number of possible elements for the current depth. +func (l *List[T]) cap() int { + return 1 << (l.root.depth() * listNodeBits) +} + +// Get returns the value at the given index. Similar to slices, this method will +// panic if index is below zero or is greater than or equal to the list size. +func (l *List[T]) Get(index int) T { + if index < 0 || index >= l.size { + panic(fmt.Sprintf("immutable.List.Get: index %d out of bounds", index)) + } + return l.root.get(l.origin + index) +} + +// Set returns a new list with value set at index. Similar to slices, this +// method will panic if index is below zero or if the index is greater than +// or equal to the list size. +func (l *List[T]) Set(index int, value T) *List[T] { + return l.set(index, value, false) +} + +func (l *List[T]) set(index int, value T, mutable bool) *List[T] { + if index < 0 || index >= l.size { + panic(fmt.Sprintf("immutable.List.Set: index %d out of bounds", index)) + } + other := l + if !mutable { + other = l.clone() + } + other.root = other.root.set(l.origin+index, value, mutable) + return other +} + +// Append returns a new list with value added to the end of the list. +func (l *List[T]) Append(value T) *List[T] { + return l.append(value, false) +} + +func (l *List[T]) append(value T, mutable bool) *List[T] { + other := l + if !mutable { + other = l.clone() + } + + // Expand list to the right if no slots remain. + if other.size+other.origin >= l.cap() { + newRoot := &listBranchNode[T]{d: other.root.depth() + 1} + newRoot.children[0] = other.root + other.root = newRoot + } + + // Increase size and set the last element to the new value. + other.size++ + other.root = other.root.set(other.origin+other.size-1, value, mutable) + return other +} + +// Prepend returns a new list with value added to the beginning of the list. +func (l *List[T]) Prepend(value T) *List[T] { + return l.prepend(value, false) +} + +func (l *List[T]) prepend(value T, mutable bool) *List[T] { + other := l + if !mutable { + other = l.clone() + } + + // Expand list to the left if no slots remain. + if other.origin == 0 { + newRoot := &listBranchNode[T]{d: other.root.depth() + 1} + newRoot.children[listNodeSize-1] = other.root + other.root = newRoot + other.origin += (listNodeSize - 1) << (other.root.depth() * listNodeBits) + } + + // Increase size and move origin back. Update first element to value. + other.size++ + other.origin-- + other.root = other.root.set(other.origin, value, mutable) + return other +} + +// Slice returns a new list of elements between start index and end index. +// Similar to slices, this method will panic if start or end are below zero or +// greater than the list size. A panic will also occur if start is greater than +// end. +// +// Unlike Go slices, references to inaccessible elements will be automatically +// removed so they can be garbage collected. +func (l *List[T]) Slice(start, end int) *List[T] { + return l.slice(start, end, false) +} + +func (l *List[T]) slice(start, end int, mutable bool) *List[T] { + // Panics similar to Go slices. + if start < 0 || start > l.size { + panic(fmt.Sprintf("immutable.List.Slice: start index %d out of bounds", start)) + } else if end < 0 || end > l.size { + panic(fmt.Sprintf("immutable.List.Slice: end index %d out of bounds", end)) + } else if start > end { + panic(fmt.Sprintf("immutable.List.Slice: invalid slice index: [%d:%d]", start, end)) + } + + // Return the same list if the start and end are the entire range. + if start == 0 && end == l.size { + return l + } + + // Create copy, if immutable. + other := l + if !mutable { + other = l.clone() + } + + // Update origin/size. + other.origin = l.origin + start + other.size = end - start + + // Contract tree while the start & end are in the same child node. + for other.root.depth() > 1 { + i := (other.origin >> (other.root.depth() * listNodeBits)) & listNodeMask + j := ((other.origin + other.size - 1) >> (other.root.depth() * listNodeBits)) & listNodeMask + if i != j { + break // branch contains at least two nodes, exit + } + + // Replace the current root with the single child & update origin offset. + other.origin -= i << (other.root.depth() * listNodeBits) + other.root = other.root.(*listBranchNode[T]).children[i] + } + + // Ensure all references are removed before start & after end. + other.root = other.root.deleteBefore(other.origin, mutable) + other.root = other.root.deleteAfter(other.origin+other.size-1, mutable) + + return other +} + +// Iterator returns a new iterator for this list positioned at the first index. +func (l *List[T]) Iterator() *ListIterator[T] { + itr := &ListIterator[T]{list: l} + itr.First() + return itr +} + +// ListBuilder represents an efficient builder for creating new Lists. +type ListBuilder[T comparable] struct { + list *List[T] // current state +} + +// NewListBuilder returns a new instance of ListBuilder. +func NewListBuilder[T comparable]() *ListBuilder[T] { + return &ListBuilder[T]{list: NewList[T]()} +} + +// List returns the current copy of the list. +// The builder should not be used again after the list after this call. +func (b *ListBuilder[T]) List() *List[T] { + assert(b.list != nil, "immutable.ListBuilder.List(): duplicate call to fetch list") + list := b.list + b.list = nil + return list +} + +// Len returns the number of elements in the underlying list. +func (b *ListBuilder[T]) Len() int { + assert(b.list != nil, "immutable.ListBuilder: builder invalid after List() invocation") + return b.list.Len() +} + +// Get returns the value at the given index. Similar to slices, this method will +// panic if index is below zero or is greater than or equal to the list size. +func (b *ListBuilder[T]) Get(index int) T { + assert(b.list != nil, "immutable.ListBuilder: builder invalid after List() invocation") + return b.list.Get(index) +} + +// Set updates the value at the given index. Similar to slices, this method will +// panic if index is below zero or if the index is greater than or equal to the +// list size. +func (b *ListBuilder[T]) Set(index int, value T) { + assert(b.list != nil, "immutable.ListBuilder: builder invalid after List() invocation") + b.list = b.list.set(index, value, true) +} + +// Append adds value to the end of the list. +func (b *ListBuilder[T]) Append(value T) { + assert(b.list != nil, "immutable.ListBuilder: builder invalid after List() invocation") + b.list = b.list.append(value, true) +} + +// Prepend adds value to the beginning of the list. +func (b *ListBuilder[T]) Prepend(value T) { + assert(b.list != nil, "immutable.ListBuilder: builder invalid after List() invocation") + b.list = b.list.prepend(value, true) +} + +// Slice updates the list with a sublist of elements between start and end index. +// See List.Slice() for more details. +func (b *ListBuilder[T]) Slice(start, end int) { + assert(b.list != nil, "immutable.ListBuilder: builder invalid after List() invocation") + b.list = b.list.slice(start, end, true) +} + +// Iterator returns a new iterator for the underlying list. +func (b *ListBuilder[T]) Iterator() *ListIterator[T] { + assert(b.list != nil, "immutable.ListBuilder: builder invalid after List() invocation") + return b.list.Iterator() +} + +// Constants for bit shifts used for levels in the List trie. +const ( + listNodeBits = 5 + listNodeSize = 1 << listNodeBits + listNodeMask = listNodeSize - 1 +) + +// listNode represents either a branch or leaf node in a List. +type listNode[T comparable] interface { + depth() uint + get(index int) T + set(index int, v T, mutable bool) listNode[T] + + containsBefore(index int) bool + containsAfter(index int) bool + + deleteBefore(index int, mutable bool) listNode[T] + deleteAfter(index int, mutable bool) listNode[T] +} + +// newListNode returns a leaf node for depth zero, otherwise returns a branch node. +func newListNode[T comparable](depth uint) listNode[T] { + if depth == 0 { + return &listLeafNode[T]{} + } + return &listBranchNode[T]{d: depth} +} + +// listBranchNode represents a branch of a List tree at a given depth. +type listBranchNode[T comparable] struct { + d uint // depth + children [listNodeSize]listNode[T] +} + +// depth returns the depth of this branch node from the leaf. +func (n *listBranchNode[T]) depth() uint { return n.d } + +// get returns the child node at the segment of the index for this depth. +func (n *listBranchNode[T]) get(index int) T { + idx := (index >> (n.d * listNodeBits)) & listNodeMask + return n.children[idx].get(index) +} + +// set recursively updates the value at index for each lower depth from the node. +func (n *listBranchNode[T]) set(index int, v T, mutable bool) listNode[T] { + idx := (index >> (n.d * listNodeBits)) & listNodeMask + + // Find child for the given value in the branch. Create new if it doesn't exist. + child := n.children[idx] + if child == nil { + child = newListNode[T](n.depth() - 1) + } + + // Return a copy of this branch with the new child. + var other *listBranchNode[T] + if mutable { + other = n + } else { + tmp := *n + other = &tmp + } + other.children[idx] = child.set(index, v, mutable) + return other +} + +// containsBefore returns true if non-nil values exists between [0,index). +func (n *listBranchNode[T]) containsBefore(index int) bool { + idx := (index >> (n.d * listNodeBits)) & listNodeMask + + // Quickly check if any direct children exist before this segment of the index. + for i := 0; i < idx; i++ { + if n.children[i] != nil { + return true + } + } + + // Recursively check for children directly at the given index at this segment. + if n.children[idx] != nil && n.children[idx].containsBefore(index) { + return true + } + return false +} + +// containsAfter returns true if non-nil values exists between (index,listNodeSize). +func (n *listBranchNode[T]) containsAfter(index int) bool { + idx := (index >> (n.d * listNodeBits)) & listNodeMask + + // Quickly check if any direct children exist after this segment of the index. + for i := idx + 1; i < len(n.children); i++ { + if n.children[i] != nil { + return true + } + } + + // Recursively check for children directly at the given index at this segment. + if n.children[idx] != nil && n.children[idx].containsAfter(index) { + return true + } + return false +} + +// deleteBefore returns a new node with all elements before index removed. +func (n *listBranchNode[T]) deleteBefore(index int, mutable bool) listNode[T] { + // Ignore if no nodes exist before the given index. + if !n.containsBefore(index) { + return n + } + + // Return a copy with any nodes prior to the index removed. + idx := (index >> (n.d * listNodeBits)) & listNodeMask + + var other *listBranchNode[T] + if mutable { + other = n + for i := 0; i < idx; i++ { + n.children[i] = nil + } + } else { + other = &listBranchNode[T]{d: n.d} + copy(other.children[idx:][:], n.children[idx:][:]) + } + + if other.children[idx] != nil { + other.children[idx] = other.children[idx].deleteBefore(index, mutable) + } + return other +} + +// deleteBefore returns a new node with all elements before index removed. +func (n *listBranchNode[T]) deleteAfter(index int, mutable bool) listNode[T] { + // Ignore if no nodes exist after the given index. + if !n.containsAfter(index) { + return n + } + + // Return a copy with any nodes after the index removed. + idx := (index >> (n.d * listNodeBits)) & listNodeMask + + var other *listBranchNode[T] + if mutable { + other = n + for i := idx + 1; i < len(n.children); i++ { + n.children[i] = nil + } + } else { + other = &listBranchNode[T]{d: n.d} + copy(other.children[:idx+1], n.children[:idx+1]) + } + + if other.children[idx] != nil { + other.children[idx] = other.children[idx].deleteAfter(index, mutable) + } + return other +} + +// listLeafNode represents a leaf node in a List. +type listLeafNode[T comparable] struct { + children [listNodeSize]T +} + +// depth always returns 0 for leaf nodes. +func (n *listLeafNode[T]) depth() uint { return 0 } + +// get returns the value at the given index. +func (n *listLeafNode[T]) get(index int) T { + return n.children[index&listNodeMask] +} + +// set returns a copy of the node with the value at the index updated to v. +func (n *listLeafNode[T]) set(index int, v T, mutable bool) listNode[T] { + idx := index & listNodeMask + var other *listLeafNode[T] + if mutable { + other = n + } else { + tmp := *n + other = &tmp + } + other.children[idx] = v + var otherLN listNode[T] + otherLN = other + return otherLN +} + +// containsBefore returns true if non-nil values exists between [0,index). +func (n *listLeafNode[T]) containsBefore(index int) bool { + idx := index & listNodeMask + var empty T + for i := 0; i < idx; i++ { + if n.children[i] != empty { + return true + } + } + return false +} + +// containsAfter returns true if non-nil values exists between (index,listNodeSize). +func (n *listLeafNode[T]) containsAfter(index int) bool { + idx := index & listNodeMask + var empty T + for i := idx + 1; i < len(n.children); i++ { + if n.children[i] != empty { + return true + } + } + return false +} + +// deleteBefore returns a new node with all elements before index removed. +func (n *listLeafNode[T]) deleteBefore(index int, mutable bool) listNode[T] { + if !n.containsBefore(index) { + return n + } + + idx := index & listNodeMask + var other *listLeafNode[T] + if mutable { + other = n + var empty T + for i := 0; i < idx; i++ { + other.children[i] = empty + } + } else { + other = &listLeafNode[T]{} + copy(other.children[idx:][:], n.children[idx:][:]) + } + return other +} + +// deleteBefore returns a new node with all elements before index removed. +func (n *listLeafNode[T]) deleteAfter(index int, mutable bool) listNode[T] { + if !n.containsAfter(index) { + return n + } + + idx := index & listNodeMask + var other *listLeafNode[T] + if mutable { + other = n + var empty T + for i := idx + 1; i < len(n.children); i++ { + other.children[i] = empty + } + } else { + other = &listLeafNode[T]{} + copy(other.children[:idx+1][:], n.children[:idx+1][:]) + } + return other +} + +// ListIterator represents an ordered iterator over a list. +type ListIterator[T comparable] struct { + list *List[T] // source list + index int // current index position + + stack [32]listIteratorElem[T] // search stack + depth int // stack depth +} + +// Done returns true if no more elements remain in the iterator. +func (itr *ListIterator[T]) Done() bool { + return itr.index < 0 || itr.index >= itr.list.Len() +} + +// First positions the iterator on the first index. +// If source list is empty then no change is made. +func (itr *ListIterator[T]) First() { + if itr.list.Len() != 0 { + itr.Seek(0) + } +} + +// Last positions the iterator on the last index. +// If source list is empty then no change is made. +func (itr *ListIterator[T]) Last() { + if n := itr.list.Len(); n != 0 { + itr.Seek(n - 1) + } +} + +// Seek moves the iterator position to the given index in the list. +// Similar to Go slices, this method will panic if index is below zero or if +// the index is greater than or equal to the list size. +func (itr *ListIterator[T]) Seek(index int) { + // Panic similar to Go slices. + if index < 0 || index >= itr.list.Len() { + panic(fmt.Sprintf("immutable.ListIterator.Seek: index %d out of bounds", index)) + } + itr.index = index + + // Reset to the bottom of the stack at seek to the correct position. + itr.stack[0] = listIteratorElem[T]{node: itr.list.root} + itr.depth = 0 + itr.seek(index) +} + +// Next returns the current index and its value & moves the iterator forward. +// Returns an index of -1 if the there are no more elements to return. +func (itr *ListIterator[T]) Next() (index int, value T) { + // Exit immediately if there are no elements remaining. + var empty T + if itr.Done() { + return -1, empty + } + + // Retrieve current index & value. + elem := &itr.stack[itr.depth] + index, value = itr.index, elem.node.(*listLeafNode[T]).children[elem.index] + + // Increase index. If index is at the end then return immediately. + itr.index++ + if itr.Done() { + return index, value + } + + // Move up stack until we find a node that has remaining position ahead. + for ; itr.depth > 0 && itr.stack[itr.depth].index >= listNodeSize-1; itr.depth-- { + } + + // Seek to correct position from current depth. + itr.seek(itr.index) + + return index, value +} + +// Prev returns the current index and value and moves the iterator backward. +// Returns an index of -1 if the there are no more elements to return. +func (itr *ListIterator[T]) Prev() (index int, value T) { + // Exit immediately if there are no elements remaining. + var empty T + if itr.Done() { + return -1, empty + } + + // Retrieve current index & value. + elem := &itr.stack[itr.depth] + index, value = itr.index, elem.node.(*listLeafNode[T]).children[elem.index] + + // Decrease index. If index is past the beginning then return immediately. + itr.index-- + if itr.Done() { + return index, value + } + + // Move up stack until we find a node that has remaining position behind. + for ; itr.depth > 0 && itr.stack[itr.depth].index == 0; itr.depth-- { + } + + // Seek to correct position from current depth. + itr.seek(itr.index) + + return index, value +} + +// seek positions the stack to the given index from the current depth. +// Elements and indexes below the current depth are assumed to be correct. +func (itr *ListIterator[T]) seek(index int) { + // Iterate over each level until we reach a leaf node. + for { + elem := &itr.stack[itr.depth] + elem.index = ((itr.list.origin + index) >> (elem.node.depth() * listNodeBits)) & listNodeMask + + switch node := elem.node.(type) { + case *listBranchNode[T]: + child := node.children[elem.index] + itr.stack[itr.depth+1] = listIteratorElem[T]{node: child} + itr.depth++ + case *listLeafNode[T]: + return + } + } +} + +// listIteratorElem represents the node and it's child index within the stack. +type listIteratorElem[T comparable] struct { + node listNode[T] + index int +} + +// Size thresholds for each type of branch node. +const ( + maxArrayMapSize = 8 + maxBitmapIndexedSize = 16 +) + +// Segment bit shifts within the map tree. +const ( + mapNodeBits = 5 + mapNodeSize = 1 << mapNodeBits + mapNodeMask = mapNodeSize - 1 +) + +// Map represents an immutable hash map implementation. The map uses a Hasher +// to generate hashes and check for equality of key values. +// +// It is implemented as an Hash Array Mapped Trie. +type Map[K constraints.Ordered, V any] struct { + size int // total number of key/value pairs + root mapNode[K, V] // root node of trie + hasher Hasher[K] // hasher implementation +} + +// NewMap returns a new instance of Map. If hasher is nil, a default hasher +// implementation will automatically be chosen based on the first key added. +// Default hasher implementations only exist for int, string, and byte slice types. +func NewMap[K constraints.Ordered, V any](hasher Hasher[K]) *Map[K, V] { + return &Map[K, V]{ + hasher: hasher, + } +} + +// Len returns the number of elements in the map. +func (m *Map[K, V]) Len() int { + return m.size +} + +// clone returns a shallow copy of m. +func (m *Map[K, V]) clone() *Map[K, V] { + other := *m + return &other +} + +// Get returns the value for a given key and a flag indicating whether the +// key exists. This flag distinguishes a nil value set on a key versus a +// non-existent key in the map. +func (m *Map[K, V]) Get(key K) (value V, ok bool) { + var empty V + if m.root == nil { + return empty, false + } + keyHash := m.hasher.Hash(key) + return m.root.get(key, 0, keyHash, m.hasher) +} + +// Set returns a map with the key set to the new value. A nil value is allowed. +// +// This function will return a new map even if the updated value is the same as +// the existing value because Map does not track value equality. +func (m *Map[K, V]) Set(key K, value V) *Map[K, V] { + return m.set(key, value, false) +} + +func (m *Map[K, V]) set(key K, value V, mutable bool) *Map[K, V] { + // Set a hasher on the first value if one does not already exist. + hasher := m.hasher + if hasher == nil { + hasher = NewHasher(key) + } + + // Generate copy if necessary. + other := m + if !mutable { + other = m.clone() + } + other.hasher = hasher + + // If the map is empty, initialize with a simple array node. + if m.root == nil { + other.size = 1 + other.root = &mapArrayNode[K, V]{entries: []mapEntry[K, V]{{key: key, value: value}}} + return other + } + + // Otherwise copy the map and delegate insertion to the root. + // Resized will return true if the key does not currently exist. + var resized bool + other.root = m.root.set(key, value, 0, hasher.Hash(key), hasher, mutable, &resized) + if resized { + other.size++ + } + return other +} + +// Delete returns a map with the given key removed. +// Removing a non-existent key will cause this method to return the same map. +func (m *Map[K, V]) Delete(key K) *Map[K, V] { + return m.delete(key, false) +} + +func (m *Map[K, V]) delete(key K, mutable bool) *Map[K, V] { + // Return original map if no keys exist. + if m.root == nil { + return m + } + + // If the delete did not change the node then return the original map. + var resized bool + newRoot := m.root.delete(key, 0, m.hasher.Hash(key), m.hasher, mutable, &resized) + if !resized { + return m + } + + // Generate copy if necessary. + other := m + if !mutable { + other = m.clone() + } + + // Return copy of map with new root and decreased size. + other.size = m.size - 1 + other.root = newRoot + return other +} + +// Iterator returns a new iterator for the map. +func (m *Map[K, V]) Iterator() *MapIterator[K, V] { + itr := &MapIterator[K, V]{m: m} + itr.First() + return itr +} + +// MapBuilder represents an efficient builder for creating Maps. +type MapBuilder[K constraints.Ordered, V any] struct { + m *Map[K, V] // current state +} + +// NewMapBuilder returns a new instance of MapBuilder. +func NewMapBuilder[K constraints.Ordered, V any](hasher Hasher[K]) *MapBuilder[K, V] { + return &MapBuilder[K, V]{m: NewMap[K, V](hasher)} +} + +// Map returns the underlying map. Only call once. +// Builder is invalid after call. Will panic on second invocation. +func (b *MapBuilder[K, V]) Map() *Map[K, V] { + assert(b.m != nil, "immutable.SortedMapBuilder.Map(): duplicate call to fetch map") + m := b.m + b.m = nil + return m +} + +// Len returns the number of elements in the underlying map. +func (b *MapBuilder[K, V]) Len() int { + assert(b.m != nil, "immutable.MapBuilder: builder invalid after Map() invocation") + return b.m.Len() +} + +// Get returns the value for the given key. +func (b *MapBuilder[K, V]) Get(key K) (value V, ok bool) { + assert(b.m != nil, "immutable.MapBuilder: builder invalid after Map() invocation") + return b.m.Get(key) +} + +// Set sets the value of the given key. See Map.Set() for additional details. +func (b *MapBuilder[K, V]) Set(key K, value V) { + assert(b.m != nil, "immutable.MapBuilder: builder invalid after Map() invocation") + b.m = b.m.set(key, value, true) +} + +// Delete removes the given key. See Map.Delete() for additional details. +func (b *MapBuilder[K, V]) Delete(key K) { + assert(b.m != nil, "immutable.MapBuilder: builder invalid after Map() invocation") + b.m = b.m.delete(key, true) +} + +// Iterator returns a new iterator for the underlying map. +func (b *MapBuilder[K, V]) Iterator() *MapIterator[K, V] { + assert(b.m != nil, "immutable.MapBuilder: builder invalid after Map() invocation") + return b.m.Iterator() +} + +// mapNode represents any node in the map tree. +type mapNode[K constraints.Ordered, V any] interface { + get(key K, shift uint, keyHash uint32, h Hasher[K]) (value V, ok bool) + set(key K, value V, shift uint, keyHash uint32, h Hasher[K], mutable bool, resized *bool) mapNode[K, V] + delete(key K, shift uint, keyHash uint32, h Hasher[K], mutable bool, resized *bool) mapNode[K, V] +} + +var _ mapNode[string, any] = (*mapArrayNode[string, any])(nil) +var _ mapNode[string, any] = (*mapBitmapIndexedNode[string, any])(nil) +var _ mapNode[string, any] = (*mapHashArrayNode[string, any])(nil) +var _ mapNode[string, any] = (*mapValueNode[string, any])(nil) +var _ mapNode[string, any] = (*mapHashCollisionNode[string, any])(nil) + +// mapLeafNode represents a node that stores a single key hash at the leaf of the map tree. +type mapLeafNode[K constraints.Ordered, V any] interface { + mapNode[K, V] + keyHashValue() uint32 +} + +var _ mapLeafNode[string, any] = (*mapValueNode[string, any])(nil) +var _ mapLeafNode[string, any] = (*mapHashCollisionNode[string, any])(nil) + +// mapArrayNode is a map node that stores key/value pairs in a slice. +// Entries are stored in insertion order. An array node expands into a bitmap +// indexed node once a given threshold size is crossed. +type mapArrayNode[K constraints.Ordered, V any] struct { + entries []mapEntry[K, V] +} + +// indexOf returns the entry index of the given key. Returns -1 if key not found. +func (n *mapArrayNode[K, V]) indexOf(key K, h Hasher[K]) int { + for i := range n.entries { + if h.Equal(n.entries[i].key, key) { + return i + } + } + return -1 +} + +// get returns the value for the given key. +func (n *mapArrayNode[K, V]) get(key K, shift uint, keyHash uint32, h Hasher[K]) (value V, ok bool) { + i := n.indexOf(key, h) + if i == -1 { + return value, false + } + return n.entries[i].value, true +} + +// set inserts or updates the value for a given key. If the key is inserted and +// the new size crosses the max size threshold, a bitmap indexed node is returned. +func (n *mapArrayNode[K, V]) set(key K, value V, shift uint, keyHash uint32, h Hasher[K], mutable bool, resized *bool) mapNode[K, V] { + idx := n.indexOf(key, h) + + // Mark as resized if the key doesn't exist. + if idx == -1 { + *resized = true + } + + // If we are adding and it crosses the max size threshold, expand the node. + // We do this by continually setting the entries to a value node and expanding. + if idx == -1 && len(n.entries) >= maxArrayMapSize { + var node mapNode[K, V] = newMapValueNode(h.Hash(key), key, value) + for _, entry := range n.entries { + node = node.set(entry.key, entry.value, 0, h.Hash(entry.key), h, false, resized) + } + return node + } + + // Update in-place if mutable. + if mutable { + if idx != -1 { + n.entries[idx] = mapEntry[K, V]{key, value} + } else { + n.entries = append(n.entries, mapEntry[K, V]{key, value}) + } + return n + } + + // Update existing entry if a match is found. + // Otherwise append to the end of the element list if it doesn't exist. + var other mapArrayNode[K, V] + if idx != -1 { + other.entries = make([]mapEntry[K, V], len(n.entries)) + copy(other.entries, n.entries) + other.entries[idx] = mapEntry[K, V]{key, value} + } else { + other.entries = make([]mapEntry[K, V], len(n.entries)+1) + copy(other.entries, n.entries) + other.entries[len(other.entries)-1] = mapEntry[K, V]{key, value} + } + return &other +} + +// delete removes the given key from the node. Returns the same node if key does +// not exist. Returns a nil node when removing the last entry. +func (n *mapArrayNode[K, V]) delete(key K, shift uint, keyHash uint32, h Hasher[K], mutable bool, resized *bool) mapNode[K, V] { + idx := n.indexOf(key, h) + + // Return original node if key does not exist. + if idx == -1 { + return n + } + *resized = true + + // Return nil if this node will contain no nodes. + if len(n.entries) == 1 { + return nil + } + + // Update in-place, if mutable. + if mutable { + copy(n.entries[idx:], n.entries[idx+1:]) + n.entries[len(n.entries)-1] = mapEntry[K, V]{} + n.entries = n.entries[:len(n.entries)-1] + return n + } + + // Otherwise create a copy with the given entry removed. + other := &mapArrayNode[K, V]{entries: make([]mapEntry[K, V], len(n.entries)-1)} + copy(other.entries[:idx], n.entries[:idx]) + copy(other.entries[idx:], n.entries[idx+1:]) + return other +} + +// mapBitmapIndexedNode represents a map branch node with a variable number of +// node slots and indexed using a bitmap. Indexes for the node slots are +// calculated by counting the number of set bits before the target bit using popcount. +type mapBitmapIndexedNode[K constraints.Ordered, V any] struct { + bitmap uint32 + nodes []mapNode[K, V] +} + +// get returns the value for the given key. +func (n *mapBitmapIndexedNode[K, V]) get(key K, shift uint, keyHash uint32, h Hasher[K]) (value V, ok bool) { + bit := uint32(1) << ((keyHash >> shift) & mapNodeMask) + if (n.bitmap & bit) == 0 { + return value, false + } + child := n.nodes[bits.OnesCount32(n.bitmap&(bit-1))] + return child.get(key, shift+mapNodeBits, keyHash, h) +} + +// set inserts or updates the value for the given key. If a new key is inserted +// and the size crosses the max size threshold then a hash array node is returned. +func (n *mapBitmapIndexedNode[K, V]) set(key K, value V, shift uint, keyHash uint32, h Hasher[K], mutable bool, resized *bool) mapNode[K, V] { + // Extract the index for the bit segment of the key hash. + keyHashFrag := (keyHash >> shift) & mapNodeMask + + // Determine the bit based on the hash index. + bit := uint32(1) << keyHashFrag + exists := (n.bitmap & bit) != 0 + + // Mark as resized if the key doesn't exist. + if !exists { + *resized = true + } + + // Find index of node based on popcount of bits before it. + idx := bits.OnesCount32(n.bitmap & (bit - 1)) + + // If the node already exists, delegate set operation to it. + // If the node doesn't exist then create a simple value leaf node. + var newNode mapNode[K, V] + if exists { + newNode = n.nodes[idx].set(key, value, shift+mapNodeBits, keyHash, h, mutable, resized) + } else { + newNode = newMapValueNode[K, V](keyHash, key, value) + } + + // Convert to a hash-array node once we exceed the max bitmap size. + // Copy each node based on their bit position within the bitmap. + if !exists && len(n.nodes) > maxBitmapIndexedSize { + var other mapHashArrayNode[K, V] + for i := uint(0); i < uint(len(other.nodes)); i++ { + if n.bitmap&(uint32(1)<> shift) & mapNodeMask) + + // Return original node if key does not exist. + if (n.bitmap & bit) == 0 { + return n + } + + // Find index of node based on popcount of bits before it. + idx := bits.OnesCount32(n.bitmap & (bit - 1)) + + // Delegate delete to child node. + child := n.nodes[idx] + newChild := child.delete(key, shift+mapNodeBits, keyHash, h, mutable, resized) + + // Return original node if key doesn't exist in child. + if !*resized { + return n + } + + // Remove if returned child has been deleted. + if newChild == nil { + // If we won't have any children then return nil. + if len(n.nodes) == 1 { + return nil + } + + // Update in-place if mutable. + if mutable { + n.bitmap ^= bit + copy(n.nodes[idx:], n.nodes[idx+1:]) + n.nodes[len(n.nodes)-1] = nil + n.nodes = n.nodes[:len(n.nodes)-1] + return n + } + + // Return copy with bit removed from bitmap and node removed from node list. + other := &mapBitmapIndexedNode[K, V]{bitmap: n.bitmap ^ bit, nodes: make([]mapNode[K, V], len(n.nodes)-1)} + copy(other.nodes[:idx], n.nodes[:idx]) + copy(other.nodes[idx:], n.nodes[idx+1:]) + return other + } + + // Generate copy, if necessary. + other := n + if !mutable { + other = &mapBitmapIndexedNode[K, V]{bitmap: n.bitmap, nodes: make([]mapNode[K, V], len(n.nodes))} + copy(other.nodes, n.nodes) + } + + // Update child. + other.nodes[idx] = newChild + return other +} + +// mapHashArrayNode is a map branch node that stores nodes in a fixed length +// array. Child nodes are indexed by their index bit segment for the current depth. +type mapHashArrayNode[K constraints.Ordered, V any] struct { + count uint // number of set nodes + nodes [mapNodeSize]mapNode[K, V] // child node slots, may contain empties +} + +// clone returns a shallow copy of n. +func (n *mapHashArrayNode[K, V]) clone() *mapHashArrayNode[K, V] { + other := *n + return &other +} + +// get returns the value for the given key. +func (n *mapHashArrayNode[K, V]) get(key K, shift uint, keyHash uint32, h Hasher[K]) (value V, ok bool) { + node := n.nodes[(keyHash>>shift)&mapNodeMask] + if node == nil { + return value, false + } + return node.get(key, shift+mapNodeBits, keyHash, h) +} + +// set returns a node with the value set for the given key. +func (n *mapHashArrayNode[K, V]) set(key K, value V, shift uint, keyHash uint32, h Hasher[K], mutable bool, resized *bool) mapNode[K, V] { + idx := (keyHash >> shift) & mapNodeMask + node := n.nodes[idx] + + // If node at index doesn't exist, create a simple value leaf node. + // Otherwise delegate set to child node. + var newNode mapNode[K, V] + if node == nil { + *resized = true + newNode = newMapValueNode(keyHash, key, value) + } else { + newNode = node.set(key, value, shift+mapNodeBits, keyHash, h, mutable, resized) + } + + // Generate copy, if necessary. + other := n + if !mutable { + other = n.clone() + } + + // Update child node (and update size, if new). + if node == nil { + other.count++ + } + other.nodes[idx] = newNode + return other +} + +// delete returns a node with the given key removed. Returns the same node if +// the key does not exist. If node shrinks to within bitmap-indexed size then +// converts to a bitmap-indexed node. +func (n *mapHashArrayNode[K, V]) delete(key K, shift uint, keyHash uint32, h Hasher[K], mutable bool, resized *bool) mapNode[K, V] { + idx := (keyHash >> shift) & mapNodeMask + node := n.nodes[idx] + + // Return original node if child is not found. + if node == nil { + return n + } + + // Return original node if child is unchanged. + newNode := node.delete(key, shift+mapNodeBits, keyHash, h, mutable, resized) + if !*resized { + return n + } + + // If we remove a node and drop below a threshold, convert back to bitmap indexed node. + if newNode == nil && n.count <= maxBitmapIndexedSize { + other := &mapBitmapIndexedNode[K, V]{nodes: make([]mapNode[K, V], 0, n.count-1)} + for i, child := range n.nodes { + if child != nil && uint32(i) != idx { + other.bitmap |= 1 << uint(i) + other.nodes = append(other.nodes, child) + } + } + return other + } + + // Generate copy, if necessary. + other := n + if !mutable { + other = n.clone() + } + + // Return copy of node with child updated. + other.nodes[idx] = newNode + if newNode == nil { + other.count-- + } + return other +} + +// mapValueNode represents a leaf node with a single key/value pair. +// A value node can be converted to a hash collision leaf node if a different +// key with the same keyHash is inserted. +type mapValueNode[K constraints.Ordered, V any] struct { + keyHash uint32 + key K + value V +} + +// newMapValueNode returns a new instance of mapValueNode. +func newMapValueNode[K constraints.Ordered, V any](keyHash uint32, key K, value V) *mapValueNode[K, V] { + return &mapValueNode[K, V]{ + keyHash: keyHash, + key: key, + value: value, + } +} + +// keyHashValue returns the key hash for this node. +func (n *mapValueNode[K, V]) keyHashValue() uint32 { + return n.keyHash +} + +// get returns the value for the given key. +func (n *mapValueNode[K, V]) get(key K, shift uint, keyHash uint32, h Hasher[K]) (value V, ok bool) { + if !h.Equal(n.key, key) { + return value, false + } + return n.value, true +} + +// set returns a new node with the new value set for the key. If the key equals +// the node's key then a new value node is returned. If key is not equal to the +// node's key but has the same hash then a hash collision node is returned. +// Otherwise the nodes are merged into a branch node. +func (n *mapValueNode[K, V]) set(key K, value V, shift uint, keyHash uint32, h Hasher[K], mutable bool, resized *bool) mapNode[K, V] { + // If the keys match then return a new value node overwriting the value. + if h.Equal(n.key, key) { + // Update in-place if mutable. + if mutable { + n.value = value + return n + } + // Otherwise return a new copy. + return newMapValueNode(n.keyHash, key, value) + } + + *resized = true + + // Recursively merge nodes together if key hashes are different. + if n.keyHash != keyHash { + return mergeIntoNode[K, V](n, shift, keyHash, key, value) + } + + // Merge into collision node if hash matches. + return &mapHashCollisionNode[K, V]{keyHash: keyHash, entries: []mapEntry[K, V]{ + {key: n.key, value: n.value}, + {key: key, value: value}, + }} +} + +// delete returns nil if the key matches the node's key. Otherwise returns the original node. +func (n *mapValueNode[K, V]) delete(key K, shift uint, keyHash uint32, h Hasher[K], mutable bool, resized *bool) mapNode[K, V] { + // Return original node if the keys do not match. + if !h.Equal(n.key, key) { + return n + } + + // Otherwise remove the node if keys do match. + *resized = true + return nil +} + +// mapHashCollisionNode represents a leaf node that contains two or more key/value +// pairs with the same key hash. Single pairs for a hash are stored as value nodes. +type mapHashCollisionNode[K constraints.Ordered, V any] struct { + keyHash uint32 // key hash for all entries + entries []mapEntry[K, V] +} + +// keyHashValue returns the key hash for all entries on the node. +func (n *mapHashCollisionNode[K, V]) keyHashValue() uint32 { + return n.keyHash +} + +// indexOf returns the index of the entry for the given key. +// Returns -1 if the key does not exist in the node. +func (n *mapHashCollisionNode[K, V]) indexOf(key K, h Hasher[K]) int { + for i := range n.entries { + if h.Equal(n.entries[i].key, key) { + return i + } + } + return -1 +} + +// get returns the value for the given key. +func (n *mapHashCollisionNode[K, V]) get(key K, shift uint, keyHash uint32, h Hasher[K]) (value V, ok bool) { + for i := range n.entries { + if h.Equal(n.entries[i].key, key) { + return n.entries[i].value, true + } + } + return value, false +} + +// set returns a copy of the node with key set to the given value. +func (n *mapHashCollisionNode[K, V]) set(key K, value V, shift uint, keyHash uint32, h Hasher[K], mutable bool, resized *bool) mapNode[K, V] { + // Merge node with key/value pair if this is not a hash collision. + if n.keyHash != keyHash { + *resized = true + return mergeIntoNode[K, V](n, shift, keyHash, key, value) + } + + // Update in-place if mutable. + if mutable { + if idx := n.indexOf(key, h); idx == -1 { + *resized = true + n.entries = append(n.entries, mapEntry[K, V]{key, value}) + } else { + n.entries[idx] = mapEntry[K, V]{key, value} + } + return n + } + + // Append to end of node if key doesn't exist & mark resized. + // Otherwise copy nodes and overwrite at matching key index. + other := &mapHashCollisionNode[K, V]{keyHash: n.keyHash} + if idx := n.indexOf(key, h); idx == -1 { + *resized = true + other.entries = make([]mapEntry[K, V], len(n.entries)+1) + copy(other.entries, n.entries) + other.entries[len(other.entries)-1] = mapEntry[K, V]{key, value} + } else { + other.entries = make([]mapEntry[K, V], len(n.entries)) + copy(other.entries, n.entries) + other.entries[idx] = mapEntry[K, V]{key, value} + } + return other +} + +// delete returns a node with the given key deleted. Returns the same node if +// the key does not exist. If removing the key would shrink the node to a single +// entry then a value node is returned. +func (n *mapHashCollisionNode[K, V]) delete(key K, shift uint, keyHash uint32, h Hasher[K], mutable bool, resized *bool) mapNode[K, V] { + idx := n.indexOf(key, h) + + // Return original node if key is not found. + if idx == -1 { + return n + } + + // Mark as resized if key exists. + *resized = true + + // Convert to value node if we move to one entry. + if len(n.entries) == 2 { + return &mapValueNode[K, V]{ + keyHash: n.keyHash, + key: n.entries[idx^1].key, + value: n.entries[idx^1].value, + } + } + + // Remove entry in-place if mutable. + if mutable { + copy(n.entries[idx:], n.entries[idx+1:]) + n.entries[len(n.entries)-1] = mapEntry[K, V]{} + n.entries = n.entries[:len(n.entries)-1] + return n + } + + // Return copy without entry if immutable. + other := &mapHashCollisionNode[K, V]{keyHash: n.keyHash, entries: make([]mapEntry[K, V], len(n.entries)-1)} + copy(other.entries[:idx], n.entries[:idx]) + copy(other.entries[idx:], n.entries[idx+1:]) + return other +} + +// mergeIntoNode merges a key/value pair into an existing node. +// Caller must verify that node's keyHash is not equal to keyHash. +func mergeIntoNode[K constraints.Ordered, V any](node mapLeafNode[K, V], shift uint, keyHash uint32, key K, value V) mapNode[K, V] { + idx1 := (node.keyHashValue() >> shift) & mapNodeMask + idx2 := (keyHash >> shift) & mapNodeMask + + // Recursively build branch nodes to combine the node and its key. + other := &mapBitmapIndexedNode[K, V]{bitmap: (1 << idx1) | (1 << idx2)} + if idx1 == idx2 { + other.nodes = []mapNode[K, V]{mergeIntoNode(node, shift+mapNodeBits, keyHash, key, value)} + } else { + if newNode := newMapValueNode(keyHash, key, value); idx1 < idx2 { + other.nodes = []mapNode[K, V]{node, newNode} + } else { + other.nodes = []mapNode[K, V]{newNode, node} + } + } + return other +} + +// mapEntry represents a single key/value pair. +type mapEntry[K constraints.Ordered, V any] struct { + key K + value V +} + +// MapIterator represents an iterator over a map's key/value pairs. Although +// map keys are not sorted, the iterator's order is deterministic. +type MapIterator[K constraints.Ordered, V any] struct { + m *Map[K, V] // source map + + stack [32]mapIteratorElem[K, V] // search stack + depth int // stack depth +} + +// Done returns true if no more elements remain in the iterator. +func (itr *MapIterator[K, V]) Done() bool { + return itr.depth == -1 +} + +// First resets the iterator to the first key/value pair. +func (itr *MapIterator[K, V]) First() { + // Exit immediately if the map is empty. + if itr.m.root == nil { + itr.depth = -1 + return + } + + // Initialize the stack to the left most element. + itr.stack[0] = mapIteratorElem[K, V]{node: itr.m.root} + itr.depth = 0 + itr.first() +} + +// Next returns the next key/value pair. Returns a nil key when no elements remain. +func (itr *MapIterator[K, V]) Next() (key K, value V, ok bool) { + // Return nil key if iteration is done. + if itr.Done() { + return key, value, false + } + + // Retrieve current index & value. Current node is always a leaf. + elem := &itr.stack[itr.depth] + switch node := elem.node.(type) { + case *mapArrayNode[K, V]: + entry := &node.entries[elem.index] + key, value = entry.key, entry.value + case *mapValueNode[K, V]: + key, value = node.key, node.value + case *mapHashCollisionNode[K, V]: + entry := &node.entries[elem.index] + key, value = entry.key, entry.value + } + + // Move up stack until we find a node that has remaining position ahead + // and move that element forward by one. + itr.next() + return key, value, true +} + +// next moves to the next available key. +func (itr *MapIterator[K, V]) next() { + for ; itr.depth >= 0; itr.depth-- { + elem := &itr.stack[itr.depth] + + switch node := elem.node.(type) { + case *mapArrayNode[K, V]: + if elem.index < len(node.entries)-1 { + elem.index++ + return + } + + case *mapBitmapIndexedNode[K, V]: + if elem.index < len(node.nodes)-1 { + elem.index++ + itr.stack[itr.depth+1].node = node.nodes[elem.index] + itr.depth++ + itr.first() + return + } + + case *mapHashArrayNode[K, V]: + for i := elem.index + 1; i < len(node.nodes); i++ { + if node.nodes[i] != nil { + elem.index = i + itr.stack[itr.depth+1].node = node.nodes[elem.index] + itr.depth++ + itr.first() + return + } + } + + case *mapValueNode[K, V]: + continue // always the last value, traverse up + + case *mapHashCollisionNode[K, V]: + if elem.index < len(node.entries)-1 { + elem.index++ + return + } + } + } +} + +// first positions the stack left most index. +// Elements and indexes at and below the current depth are assumed to be correct. +func (itr *MapIterator[K, V]) first() { + for ; ; itr.depth++ { + elem := &itr.stack[itr.depth] + + switch node := elem.node.(type) { + case *mapBitmapIndexedNode[K, V]: + elem.index = 0 + itr.stack[itr.depth+1].node = node.nodes[0] + + case *mapHashArrayNode[K, V]: + for i := 0; i < len(node.nodes); i++ { + if node.nodes[i] != nil { // find first node + elem.index = i + itr.stack[itr.depth+1].node = node.nodes[i] + break + } + } + + default: // *mapArrayNode, mapLeafNode + elem.index = 0 + return + } + } +} + +// mapIteratorElem represents a node/index pair in the MapIterator stack. +type mapIteratorElem[K constraints.Ordered, V any] struct { + node mapNode[K, V] + index int +} + +// Sorted map child node limit size. +const ( + sortedMapNodeSize = 32 +) + +// SortedMap represents a map of key/value pairs sorted by key. The sort order +// is determined by the Comparer used by the map. +// +// This map is implemented as a B+tree. +type SortedMap[K constraints.Ordered, V any] struct { + size int // total number of key/value pairs + root sortedMapNode[K, V] // root of b+tree + comparer Comparer[K] +} + +// NewSortedMap returns a new instance of SortedMap. If comparer is nil then +// a default comparer is set after the first key is inserted. Default comparers +// exist for int, string, and byte slice keys. +func NewSortedMap[K constraints.Ordered, V any](comparer Comparer[K]) *SortedMap[K, V] { + return &SortedMap[K, V]{ + comparer: comparer, + } +} + +// Len returns the number of elements in the sorted map. +func (m *SortedMap[K, V]) Len() int { + return m.size +} + +// Get returns the value for a given key and a flag indicating if the key is set. +// The flag can be used to distinguish between a nil-set key versus an unset key. +func (m *SortedMap[K, V]) Get(key K) (V, bool) { + if m.root == nil { + var v V + return v, false + } + return m.root.get(key, m.comparer) +} + +// Set returns a copy of the map with the key set to the given value. +func (m *SortedMap[K, V]) Set(key K, value V) *SortedMap[K, V] { + return m.set(key, value, false) +} + +func (m *SortedMap[K, V]) set(key K, value V, mutable bool) *SortedMap[K, V] { + // Set a comparer on the first value if one does not already exist. + comparer := m.comparer + if comparer == nil { + comparer = NewComparer[K](key) + } + + // Create copy, if necessary. + other := m + if !mutable { + other = m.clone() + } + other.comparer = comparer + + // If no values are set then initialize with a leaf node. + if m.root == nil { + other.size = 1 + other.root = &sortedMapLeafNode[K, V]{entries: []mapEntry[K, V]{{key: key, value: value}}} + return other + } + + // Otherwise delegate to root node. + // If a split occurs then grow the tree from the root. + var resized bool + newRoot, splitNode := m.root.set(key, value, comparer, mutable, &resized) + if splitNode != nil { + newRoot = newSortedMapBranchNode(newRoot, splitNode) + } + + // Update root and size (if resized). + other.size = m.size + other.root = newRoot + if resized { + other.size++ + } + return other +} + +// Delete returns a copy of the map with the key removed. +// Returns the original map if key does not exist. +func (m *SortedMap[K, V]) Delete(key K) *SortedMap[K, V] { + return m.delete(key, false) +} + +func (m *SortedMap[K, V]) delete(key K, mutable bool) *SortedMap[K, V] { + // Return original map if no keys exist. + if m.root == nil { + return m + } + + // If the delete did not change the node then return the original map. + var resized bool + newRoot := m.root.delete(key, m.comparer, mutable, &resized) + if !resized { + return m + } + + // Create copy, if necessary. + other := m + if !mutable { + other = m.clone() + } + + // Update root and size. + other.size = m.size - 1 + other.root = newRoot + return other +} + +// clone returns a shallow copy of m. +func (m *SortedMap[K, V]) clone() *SortedMap[K, V] { + other := *m + return &other +} + +// Iterator returns a new iterator for this map positioned at the first key. +func (m *SortedMap[K, V]) Iterator() *SortedMapIterator[K, V] { + itr := &SortedMapIterator[K, V]{m: m} + itr.First() + return itr +} + +// SortedMapBuilder represents an efficient builder for creating sorted maps. +type SortedMapBuilder[K constraints.Ordered, V any] struct { + m *SortedMap[K, V] // current state +} + +// NewSortedMapBuilder returns a new instance of SortedMapBuilder. +func NewSortedMapBuilder[K constraints.Ordered, V any](comparer Comparer[K]) *SortedMapBuilder[K, V] { + return &SortedMapBuilder[K, V]{m: NewSortedMap[K, V](comparer)} +} + +// SortedMap returns the current copy of the map. +// The returned map is safe to use even if after the builder continues to be used. +func (b *SortedMapBuilder[K, V]) Map() *SortedMap[K, V] { + assert(b.m != nil, "immutable.SortedMapBuilder.Map(): duplicate call to fetch map") + m := b.m + b.m = nil + return m +} + +// Len returns the number of elements in the underlying map. +func (b *SortedMapBuilder[K, V]) Len() int { + assert(b.m != nil, "immutable.SortedMapBuilder: builder invalid after Map() invocation") + return b.m.Len() +} + +// Get returns the value for the given key. +func (b *SortedMapBuilder[K, V]) Get(key K) (value V, ok bool) { + assert(b.m != nil, "immutable.SortedMapBuilder: builder invalid after Map() invocation") + return b.m.Get(key) +} + +// Set sets the value of the given key. See SortedMap.Set() for additional details. +func (b *SortedMapBuilder[K, V]) Set(key K, value V) { + assert(b.m != nil, "immutable.SortedMapBuilder: builder invalid after Map() invocation") + b.m = b.m.set(key, value, true) +} + +// Delete removes the given key. See SortedMap.Delete() for additional details. +func (b *SortedMapBuilder[K, V]) Delete(key K) { + assert(b.m != nil, "immutable.SortedMapBuilder: builder invalid after Map() invocation") + b.m = b.m.delete(key, true) +} + +// Iterator returns a new iterator for the underlying map positioned at the first key. +func (b *SortedMapBuilder[K, V]) Iterator() *SortedMapIterator[K, V] { + assert(b.m != nil, "immutable.SortedMapBuilder: builder invalid after Map() invocation") + return b.m.Iterator() +} + +// sortedMapNode represents a branch or leaf node in the sorted map. +type sortedMapNode[K constraints.Ordered, V any] interface { + minKey() K + indexOf(key K, c Comparer[K]) int + get(key K, c Comparer[K]) (value V, ok bool) + set(key K, value V, c Comparer[K], mutable bool, resized *bool) (sortedMapNode[K, V], sortedMapNode[K, V]) + delete(key K, c Comparer[K], mutable bool, resized *bool) sortedMapNode[K, V] +} + +var _ sortedMapNode[string, any] = (*sortedMapBranchNode[string, any])(nil) +var _ sortedMapNode[string, any] = (*sortedMapLeafNode[string, any])(nil) + +// sortedMapBranchNode represents a branch in the sorted map. +type sortedMapBranchNode[K constraints.Ordered, V any] struct { + elems []sortedMapBranchElem[K, V] +} + +// newSortedMapBranchNode returns a new branch node with the given child nodes. +func newSortedMapBranchNode[K constraints.Ordered, V any](children ...sortedMapNode[K, V]) *sortedMapBranchNode[K, V] { + // Fetch min keys for every child. + elems := make([]sortedMapBranchElem[K, V], len(children)) + for i, child := range children { + elems[i] = sortedMapBranchElem[K, V]{ + key: child.minKey(), + node: child, + } + } + + return &sortedMapBranchNode[K, V]{elems: elems} +} + +// minKey returns the lowest key stored in this node's tree. +func (n *sortedMapBranchNode[K, V]) minKey() K { + return n.elems[0].node.minKey() +} + +// indexOf returns the index of the key within the child nodes. +func (n *sortedMapBranchNode[K, V]) indexOf(key K, c Comparer[K]) int { + if idx := sort.Search(len(n.elems), func(i int) bool { return c.Compare(n.elems[i].key, key) == 1 }); idx > 0 { + return idx - 1 + } + return 0 +} + +// get returns the value for the given key. +func (n *sortedMapBranchNode[K, V]) get(key K, c Comparer[K]) (value V, ok bool) { + idx := n.indexOf(key, c) + return n.elems[idx].node.get(key, c) +} + +// set returns a copy of the node with the key set to the given value. +func (n *sortedMapBranchNode[K, V]) set(key K, value V, c Comparer[K], mutable bool, resized *bool) (sortedMapNode[K, V], sortedMapNode[K, V]) { + idx := n.indexOf(key, c) + + // Delegate insert to child node. + newNode, splitNode := n.elems[idx].node.set(key, value, c, mutable, resized) + + // Update in-place, if mutable. + if mutable { + n.elems[idx] = sortedMapBranchElem[K, V]{key: newNode.minKey(), node: newNode} + if splitNode != nil { + n.elems = append(n.elems, sortedMapBranchElem[K, V]{}) + copy(n.elems[idx+1:], n.elems[idx:]) + n.elems[idx+1] = sortedMapBranchElem[K, V]{key: splitNode.minKey(), node: splitNode} + } + + // If the child splits and we have no more room then we split too. + if len(n.elems) > sortedMapNodeSize { + splitIdx := len(n.elems) / 2 + newNode := &sortedMapBranchNode[K, V]{elems: n.elems[:splitIdx:splitIdx]} + splitNode := &sortedMapBranchNode[K, V]{elems: n.elems[splitIdx:]} + return newNode, splitNode + } + return n, nil + } + + // If no split occurs, copy branch and update keys. + // If the child splits, insert new key/child into copy of branch. + var other sortedMapBranchNode[K, V] + if splitNode == nil { + other.elems = make([]sortedMapBranchElem[K, V], len(n.elems)) + copy(other.elems, n.elems) + other.elems[idx] = sortedMapBranchElem[K, V]{ + key: newNode.minKey(), + node: newNode, + } + } else { + other.elems = make([]sortedMapBranchElem[K, V], len(n.elems)+1) + copy(other.elems[:idx], n.elems[:idx]) + copy(other.elems[idx+1:], n.elems[idx:]) + other.elems[idx] = sortedMapBranchElem[K, V]{ + key: newNode.minKey(), + node: newNode, + } + other.elems[idx+1] = sortedMapBranchElem[K, V]{ + key: splitNode.minKey(), + node: splitNode, + } + } + + // If the child splits and we have no more room then we split too. + if len(other.elems) > sortedMapNodeSize { + splitIdx := len(other.elems) / 2 + newNode := &sortedMapBranchNode[K, V]{elems: other.elems[:splitIdx:splitIdx]} + splitNode := &sortedMapBranchNode[K, V]{elems: other.elems[splitIdx:]} + return newNode, splitNode + } + + // Otherwise return the new branch node with the updated entry. + return &other, nil +} + +// delete returns a node with the key removed. Returns the same node if the key +// does not exist. Returns nil if all child nodes are removed. +func (n *sortedMapBranchNode[K, V]) delete(key K, c Comparer[K], mutable bool, resized *bool) sortedMapNode[K, V] { + idx := n.indexOf(key, c) + + // Return original node if child has not changed. + newNode := n.elems[idx].node.delete(key, c, mutable, resized) + if !*resized { + return n + } + + // Remove child if it is now nil. + if newNode == nil { + // If this node will become empty then simply return nil. + if len(n.elems) == 1 { + return nil + } + + // If mutable, update in-place. + if mutable { + copy(n.elems[idx:], n.elems[idx+1:]) + n.elems[len(n.elems)-1] = sortedMapBranchElem[K, V]{} + n.elems = n.elems[:len(n.elems)-1] + return n + } + + // Return a copy without the given node. + other := &sortedMapBranchNode[K, V]{elems: make([]sortedMapBranchElem[K, V], len(n.elems)-1)} + copy(other.elems[:idx], n.elems[:idx]) + copy(other.elems[idx:], n.elems[idx+1:]) + return other + } + + // If mutable, update in-place. + if mutable { + n.elems[idx] = sortedMapBranchElem[K, V]{key: newNode.minKey(), node: newNode} + return n + } + + // Return a copy with the updated node. + other := &sortedMapBranchNode[K, V]{elems: make([]sortedMapBranchElem[K, V], len(n.elems))} + copy(other.elems, n.elems) + other.elems[idx] = sortedMapBranchElem[K, V]{ + key: newNode.minKey(), + node: newNode, + } + return other +} + +type sortedMapBranchElem[K constraints.Ordered, V any] struct { + key K + node sortedMapNode[K, V] +} + +// sortedMapLeafNode represents a leaf node in the sorted map. +type sortedMapLeafNode[K constraints.Ordered, V any] struct { + entries []mapEntry[K, V] +} + +// minKey returns the first key stored in this node. +func (n *sortedMapLeafNode[K, V]) minKey() K { + return n.entries[0].key +} + +// indexOf returns the index of the given key. +func (n *sortedMapLeafNode[K, V]) indexOf(key K, c Comparer[K]) int { + return sort.Search(len(n.entries), func(i int) bool { + return c.Compare(n.entries[i].key, key) != -1 // GTE + }) +} + +// get returns the value of the given key. +func (n *sortedMapLeafNode[K, V]) get(key K, c Comparer[K]) (value V, ok bool) { + idx := n.indexOf(key, c) + + // If the index is beyond the entry count or the key is not equal then return 'not found'. + if idx == len(n.entries) || c.Compare(n.entries[idx].key, key) != 0 { + return value, false + } + + // If the key matches then return its value. + return n.entries[idx].value, true +} + +// set returns a copy of node with the key set to the given value. If the update +// causes the node to grow beyond the maximum size then it is split in two. +func (n *sortedMapLeafNode[K, V]) set(key K, value V, c Comparer[K], mutable bool, resized *bool) (sortedMapNode[K, V], sortedMapNode[K, V]) { + // Find the insertion index for the key. + idx := n.indexOf(key, c) + exists := idx < len(n.entries) && c.Compare(n.entries[idx].key, key) == 0 + + // Update in-place, if mutable. + if mutable { + if !exists { + *resized = true + n.entries = append(n.entries, mapEntry[K, V]{}) + copy(n.entries[idx+1:], n.entries[idx:]) + } + n.entries[idx] = mapEntry[K, V]{key: key, value: value} + + // If the key doesn't exist and we exceed our max allowed values then split. + if len(n.entries) > sortedMapNodeSize { + splitIdx := len(n.entries) / 2 + newNode := &sortedMapLeafNode[K, V]{entries: n.entries[:splitIdx:splitIdx]} + splitNode := &sortedMapLeafNode[K, V]{entries: n.entries[splitIdx:]} + return newNode, splitNode + } + return n, nil + } + + // If the key matches then simply return a copy with the entry overridden. + // If there is no match then insert new entry and mark as resized. + var newEntries []mapEntry[K, V] + if exists { + newEntries = make([]mapEntry[K, V], len(n.entries)) + copy(newEntries, n.entries) + newEntries[idx] = mapEntry[K, V]{key: key, value: value} + } else { + *resized = true + newEntries = make([]mapEntry[K, V], len(n.entries)+1) + copy(newEntries[:idx], n.entries[:idx]) + newEntries[idx] = mapEntry[K, V]{key: key, value: value} + copy(newEntries[idx+1:], n.entries[idx:]) + } + + // If the key doesn't exist and we exceed our max allowed values then split. + if len(newEntries) > sortedMapNodeSize { + splitIdx := len(newEntries) / 2 + newNode := &sortedMapLeafNode[K, V]{entries: newEntries[:splitIdx:splitIdx]} + splitNode := &sortedMapLeafNode[K, V]{entries: newEntries[splitIdx:]} + return newNode, splitNode + } + + // Otherwise return the new leaf node with the updated entry. + return &sortedMapLeafNode[K, V]{entries: newEntries}, nil +} + +// delete returns a copy of node with key removed. Returns the original node if +// the key does not exist. Returns nil if the removed key is the last remaining key. +func (n *sortedMapLeafNode[K, V]) delete(key K, c Comparer[K], mutable bool, resized *bool) sortedMapNode[K, V] { + idx := n.indexOf(key, c) + + // Return original node if key is not found. + if idx >= len(n.entries) || c.Compare(n.entries[idx].key, key) != 0 { + return n + } + *resized = true + + // If this is the last entry then return nil. + if len(n.entries) == 1 { + return nil + } + + // Update in-place, if mutable. + if mutable { + copy(n.entries[idx:], n.entries[idx+1:]) + n.entries[len(n.entries)-1] = mapEntry[K, V]{} + n.entries = n.entries[:len(n.entries)-1] + return n + } + + // Return copy of node with entry removed. + other := &sortedMapLeafNode[K, V]{entries: make([]mapEntry[K, V], len(n.entries)-1)} + copy(other.entries[:idx], n.entries[:idx]) + copy(other.entries[idx:], n.entries[idx+1:]) + return other +} + +// SortedMapIterator represents an iterator over a sorted map. +// Iteration can occur in natural or reverse order based on use of Next() or Prev(). +type SortedMapIterator[K constraints.Ordered, V any] struct { + m *SortedMap[K, V] // source map + + stack [32]sortedMapIteratorElem[K, V] // search stack + depth int // stack depth +} + +// Done returns true if no more key/value pairs remain in the iterator. +func (itr *SortedMapIterator[K, V]) Done() bool { + return itr.depth == -1 +} + +// First moves the iterator to the first key/value pair. +func (itr *SortedMapIterator[K, V]) First() { + if itr.m.root == nil { + itr.depth = -1 + return + } + itr.stack[0] = sortedMapIteratorElem[K, V]{node: itr.m.root} + itr.depth = 0 + itr.first() +} + +// Last moves the iterator to the last key/value pair. +func (itr *SortedMapIterator[K, V]) Last() { + if itr.m.root == nil { + itr.depth = -1 + return + } + itr.stack[0] = sortedMapIteratorElem[K, V]{node: itr.m.root} + itr.depth = 0 + itr.last() +} + +// Seek moves the iterator position to the given key in the map. +// If the key does not exist then the next key is used. If no more keys exist +// then the iteartor is marked as done. +func (itr *SortedMapIterator[K, V]) Seek(key K) { + if itr.m.root == nil { + itr.depth = -1 + return + } + itr.stack[0] = sortedMapIteratorElem[K, V]{node: itr.m.root} + itr.depth = 0 + itr.seek(key) +} + +// Next returns the current key/value pair and moves the iterator forward. +// Returns a nil key if the there are no more elements to return. +func (itr *SortedMapIterator[K, V]) Next() (key K, value V, ok bool) { + // Return nil key if iteration is complete. + if itr.Done() { + return key, value, false + } + + // Retrieve current key/value pair. + leafElem := &itr.stack[itr.depth] + leafNode := leafElem.node.(*sortedMapLeafNode[K, V]) + leafEntry := &leafNode.entries[leafElem.index] + key, value = leafEntry.key, leafEntry.value + + // Move to the next available key/value pair. + itr.next() + + // Only occurs when iterator is done. + return key, value, true +} + +// next moves to the next key. If no keys are after then depth is set to -1. +func (itr *SortedMapIterator[K, V]) next() { + for ; itr.depth >= 0; itr.depth-- { + elem := &itr.stack[itr.depth] + + switch node := elem.node.(type) { + case *sortedMapLeafNode[K, V]: + if elem.index < len(node.entries)-1 { + elem.index++ + return + } + case *sortedMapBranchNode[K, V]: + if elem.index < len(node.elems)-1 { + elem.index++ + itr.stack[itr.depth+1].node = node.elems[elem.index].node + itr.depth++ + itr.first() + return + } + } + } +} + +// Prev returns the current key/value pair and moves the iterator backward. +// Returns a nil key if the there are no more elements to return. +func (itr *SortedMapIterator[K, V]) Prev() (key K, value V, ok bool) { + // Return nil key if iteration is complete. + if itr.Done() { + return key, value, false + } + + // Retrieve current key/value pair. + leafElem := &itr.stack[itr.depth] + leafNode := leafElem.node.(*sortedMapLeafNode[K, V]) + leafEntry := &leafNode.entries[leafElem.index] + key, value = leafEntry.key, leafEntry.value + + itr.prev() + return key, value, true +} + +// prev moves to the previous key. If no keys are before then depth is set to -1. +func (itr *SortedMapIterator[K, V]) prev() { + for ; itr.depth >= 0; itr.depth-- { + elem := &itr.stack[itr.depth] + + switch node := elem.node.(type) { + case *sortedMapLeafNode[K, V]: + if elem.index > 0 { + elem.index-- + return + } + case *sortedMapBranchNode[K, V]: + if elem.index > 0 { + elem.index-- + itr.stack[itr.depth+1].node = node.elems[elem.index].node + itr.depth++ + itr.last() + return + } + } + } +} + +// first positions the stack to the leftmost key from the current depth. +// Elements and indexes below the current depth are assumed to be correct. +func (itr *SortedMapIterator[K, V]) first() { + for { + elem := &itr.stack[itr.depth] + elem.index = 0 + + switch node := elem.node.(type) { + case *sortedMapBranchNode[K, V]: + itr.stack[itr.depth+1] = sortedMapIteratorElem[K, V]{node: node.elems[elem.index].node} + itr.depth++ + case *sortedMapLeafNode[K, V]: + return + } + } +} + +// last positions the stack to the rightmost key from the current depth. +// Elements and indexes below the current depth are assumed to be correct. +func (itr *SortedMapIterator[K, V]) last() { + for { + elem := &itr.stack[itr.depth] + + switch node := elem.node.(type) { + case *sortedMapBranchNode[K, V]: + elem.index = len(node.elems) - 1 + itr.stack[itr.depth+1] = sortedMapIteratorElem[K, V]{node: node.elems[elem.index].node} + itr.depth++ + case *sortedMapLeafNode[K, V]: + elem.index = len(node.entries) - 1 + return + } + } +} + +// seek positions the stack to the given key from the current depth. +// Elements and indexes below the current depth are assumed to be correct. +func (itr *SortedMapIterator[K, V]) seek(key K) { + for { + elem := &itr.stack[itr.depth] + elem.index = elem.node.indexOf(key, itr.m.comparer) + + switch node := elem.node.(type) { + case *sortedMapBranchNode[K, V]: + itr.stack[itr.depth+1] = sortedMapIteratorElem[K, V]{node: node.elems[elem.index].node} + itr.depth++ + case *sortedMapLeafNode[K, V]: + if elem.index == len(node.entries) { + itr.next() + } + return + } + } +} + +// sortedMapIteratorElem represents node/index pair in the SortedMapIterator stack. +type sortedMapIteratorElem[K constraints.Ordered, V any] struct { + node sortedMapNode[K, V] + index int +} + +// Hasher hashes keys and checks them for equality. +type Hasher[K constraints.Ordered] interface { + // Computes a hash for key. + Hash(key K) uint32 + + // Returns true if a and b are equal. + Equal(a, b K) bool +} + +// NewHasher returns the built-in hasher for a given key type. +func NewHasher[K constraints.Ordered](key K) Hasher[K] { + // Attempt to use non-reflection based hasher first. + switch (any(key)).(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, string: + return &defaultHasher[K]{} + } + + // Fallback to reflection-based hasher otherwise. + // This is used when caller wraps a type around a primitive type. + switch reflect.TypeOf(key).Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + return &reflectHasher[K]{} + } + + // If no hashers match then panic. + // This is a compile time issue so it should not return an error. + panic(fmt.Sprintf("immutable.NewHasher: must set hasher for %T type", key)) +} + +// Hash returns a hash for value. +func hashString(value string) uint32 { + var hash uint32 + for i, value := 0, value; i < len(value); i++ { + hash = 31*hash + uint32(value[i]) + } + return hash +} + +// reflectIntHasher implements a reflection-based Hasher for int keys. +type reflectHasher[K constraints.Ordered] struct{} + +// Hash returns a hash for key. +func (h *reflectHasher[K]) Hash(key K) uint32 { + switch reflect.TypeOf(key).Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return hashUint64(uint64(reflect.ValueOf(key).Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return hashUint64(reflect.ValueOf(key).Uint()) + case reflect.String: + var hash uint32 + s := reflect.ValueOf(key).String() + for i := 0; i < len(s); i++ { + hash = 31*hash + uint32(s[i]) + } + return hash + } + panic(fmt.Sprintf("immutable.reflectHasher.Hash: reflectHasher does not support %T type", key)) +} + +// Equal returns true if a is equal to b. Otherwise returns false. +// Panics if a and b are not ints. +func (h *reflectHasher[K]) Equal(a, b K) bool { + switch reflect.TypeOf(a).Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(a).Int() == reflect.ValueOf(b).Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(a).Uint() == reflect.ValueOf(b).Uint() + case reflect.String: + return reflect.ValueOf(a).String() == reflect.ValueOf(b).String() + } + panic(fmt.Sprintf("immutable.reflectHasher.Equal: reflectHasher does not support %T type", a)) + +} + +// hashUint64 returns a 32-bit hash for a 64-bit value. +func hashUint64(value uint64) uint32 { + hash := value + for value > 0xffffffff { + value /= 0xffffffff + hash ^= value + } + return uint32(hash) +} + +// defaultHasher implements Hasher. +type defaultHasher[K constraints.Ordered] struct{} + +// Hash returns a hash for key. +func (h *defaultHasher[K]) Hash(key K) uint32 { + // Attempt to use non-reflection based hasher first. + switch x := (any(key)).(type) { + case int: + return hashUint64(uint64(x)) + case int8: + return hashUint64(uint64(x)) + case int16: + return hashUint64(uint64(x)) + case int32: + return hashUint64(uint64(x)) + case int64: + return hashUint64(uint64(x)) + case uint: + return hashUint64(uint64(x)) + case uint8: + return hashUint64(uint64(x)) + case uint16: + return hashUint64(uint64(x)) + case uint32: + return hashUint64(uint64(x)) + case uint64: + return hashUint64(uint64(x)) + case uintptr: + return hashUint64(uint64(x)) + case string: + return hashString(x) + } + panic(fmt.Sprintf("immutable.defaultHasher.Hash: must set comparer for %T type", key)) +} + +// Equal returns true if a is equal to b. Otherwise returns false. +// Panics if a and b are not ints. +func (h *defaultHasher[K]) Equal(a, b K) bool { + return a == b +} + +// Comparer allows the comparison of two keys for the purpose of sorting. +type Comparer[K constraints.Ordered] interface { + // Returns -1 if a is less than b, returns 1 if a is greater than b, + // and returns 0 if a is equal to b. + Compare(a, b K) int +} + +// NewComparer returns the built-in comparer for a given key type. +func NewComparer[K constraints.Ordered](key K) Comparer[K] { + // Attempt to use non-reflection based comparer first. + switch (any(key)).(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, string: + return &defaultComparer[K]{} + } + // Fallback to reflection-based comparer otherwise. + // This is used when caller wraps a type around a primitive type. + switch reflect.TypeOf(key).Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + return &reflectComparer[K]{} + } + // If no comparers match then panic. + // This is a compile time issue so it should not return an error. + panic(fmt.Sprintf("immutable.NewComparer: must set comparer for %T type", key)) +} + +// defaultComparer compares two integers. Implements Comparer. +type defaultComparer[K constraints.Ordered] struct{} + +// Compare returns -1 if a is less than b, returns 1 if a is greater than b, and +// returns 0 if a is equal to b. Panic if a or b is not an int. +func (c *defaultComparer[K]) Compare(i K, j K) int { + if i < j { + return -1 + } else if i > j { + return 1 + } + return 0 +} + +// reflectIntComparer compares two int values using reflection. Implements Comparer. +type reflectComparer[K constraints.Ordered] struct{} + +// Compare returns -1 if a is less than b, returns 1 if a is greater than b, and +// returns 0 if a is equal to b. Panic if a or b is not an int. +func (c *reflectComparer[K]) Compare(a, b K) int { + switch reflect.TypeOf(a).Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if i, j := reflect.ValueOf(a).Int(), reflect.ValueOf(b).Int(); i < j { + return -1 + } else if i > j { + return 1 + } + return 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + if i, j := reflect.ValueOf(a).Uint(), reflect.ValueOf(b).Uint(); i < j { + return -1 + } else if i > j { + return 1 + } + return 0 + case reflect.String: + return strings.Compare(reflect.ValueOf(a).String(), reflect.ValueOf(b).String()) + } + panic(fmt.Sprintf("immutable.reflectComparer.Compare: must set comparer for %T type", a)) +} + +func assert(condition bool, message string) { + if !condition { + panic(message) + } +} diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b781532..33c88305c46ed 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c9dcc7..78bddf1ceed74 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a40c1a4..78f95f2561037 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bba4b79..118e49e819e00 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5fd8e4c..05f5e7dfe7b72 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd88a1d..cf9d42aed5369 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE new file mode 100644 index 0000000000000..584149b6ee28c --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 0000000000000..8915f02773f56 --- /dev/null +++ b/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go new file mode 100644 index 0000000000000..6656465efbca5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go @@ -0,0 +1,62 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package userns + +import ( + "bufio" + "fmt" + "os" + "sync" +) + +var ( + inUserNS bool + nsOnce sync.Once +) + +// RunningInUserNS detects whether we are currently running in a user namespace. +// Originally copied from github.com/lxc/lxd/shared/util.go +func RunningInUserNS() bool { + nsOnce.Do(func() { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return + } + + line := string(l) + var a, b, c int64 + fmt.Sscanf(line, "%d %d %d", &a, &b, &c) + + /* + * We assume we are in the initial user namespace if we have a full + * range - 4294967295 uids starting at uid 0. + */ + if a == 0 && b == 0 && c == 4294967295 { + return + } + inUserNS = true + }) + return inUserNS +} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go new file mode 100644 index 0000000000000..c67f773d0a1e9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go @@ -0,0 +1,25 @@ +//go:build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package userns + +// RunningInUserNS is a stub for non-Linux systems +// Always returns false +func RunningInUserNS() bool { + return false +} diff --git a/vendor/github.com/containerd/fifo/.golangci.yml b/vendor/github.com/containerd/fifo/.golangci.yml index fcba5e885f0a0..c124d3ea677c3 100644 --- a/vendor/github.com/containerd/fifo/.golangci.yml +++ b/vendor/github.com/containerd/fifo/.golangci.yml @@ -1,19 +1,35 @@ linters: enable: - - structcheck - - varcheck - - staticcheck - - unconvert + - exportloopref # Checks for pointers to enclosing loop variables - gofmt - goimports - - golint + - gosec - ineffassign - - vet - - unused - misspell + - nolintlint + - revive + - staticcheck + - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 + - unconvert + - unused + - vet + - dupword # Checks for duplicate words in the source code disable: - errcheck +linters-settings: + gosec: + # The following issues surfaced when `gosec` linter + # was enabled. They are temporarily excluded to unblock + # the existing workflow, but still to be addressed by + # future works. + excludes: + - G204 + - G305 + - G306 + - G402 + - G404 + run: timeout: 3m skip-dirs: diff --git a/vendor/github.com/containerd/fifo/fifo.go b/vendor/github.com/containerd/fifo/fifo.go index 45a9b38402d43..173bce960ba01 100644 --- a/vendor/github.com/containerd/fifo/fifo.go +++ b/vendor/github.com/containerd/fifo/fifo.go @@ -1,4 +1,4 @@ -// +build !windows +//go:build !windows /* Copyright The containerd Authors. @@ -20,13 +20,13 @@ package fifo import ( "context" + "fmt" "io" "os" "runtime" "sync" "syscall" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -48,12 +48,12 @@ var leakCheckWg *sync.WaitGroup func OpenFifoDup2(ctx context.Context, fn string, flag int, perm os.FileMode, fd int) (io.ReadWriteCloser, error) { f, err := openFifo(ctx, fn, flag, perm) if err != nil { - return nil, errors.Wrap(err, "fifo error") + return nil, fmt.Errorf("fifo error: %w", err) } if err := unix.Dup2(int(f.file.Fd()), fd); err != nil { _ = f.Close() - return nil, errors.Wrap(err, "dup2 error") + return nil, fmt.Errorf("dup2 error: %w", err) } return f, nil @@ -62,22 +62,28 @@ func OpenFifoDup2(ctx context.Context, fn string, flag int, perm os.FileMode, fd // OpenFifo opens a fifo. Returns io.ReadWriteCloser. // Context can be used to cancel this function until open(2) has not returned. // Accepted flags: -// - syscall.O_CREAT - create new fifo if one doesn't exist -// - syscall.O_RDONLY - open fifo only from reader side -// - syscall.O_WRONLY - open fifo only from writer side -// - syscall.O_RDWR - open fifo from both sides, never block on syscall level -// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the +// - syscall.O_CREAT - create new fifo if one doesn't exist +// - syscall.O_RDONLY - open fifo only from reader side +// - syscall.O_WRONLY - open fifo only from writer side +// - syscall.O_RDWR - open fifo from both sides, never block on syscall level +// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the // fifo isn't open. read/write will be connected after the actual fifo is // open or after fifo is closed. func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) { - return openFifo(ctx, fn, flag, perm) + fifo, err := openFifo(ctx, fn, flag, perm) + if fifo == nil { + // Do not return a non-nil ReadWriteCloser((*fifo)(nil)) value + // as that can confuse callers. + return nil, err + } + return fifo, err } func openFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (*fifo, error) { if _, err := os.Stat(fn); err != nil { if os.IsNotExist(err) && flag&syscall.O_CREAT != 0 { if err := syscall.Mkfifo(fn, uint32(perm&os.ModePerm)); err != nil && !os.IsExist(err) { - return nil, errors.Wrapf(err, "error creating fifo %v", fn) + return nil, fmt.Errorf("error creating fifo %v: %w", fn, err) } } else { return nil, err @@ -138,7 +144,7 @@ func openFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (*fifo case <-ctx.Done(): err = ctx.Err() default: - err = errors.Errorf("fifo %v was closed before opening", h.Name()) + err = fmt.Errorf("fifo %v was closed before opening", h.Name()) } if file != nil { file.Close() @@ -206,6 +212,10 @@ func (f *fifo) Write(b []byte) (int, error) { // before open(2) has returned and fifo was never opened. func (f *fifo) Close() (retErr error) { for { + if f == nil { + return + } + select { case <-f.closed: f.handle.Close() diff --git a/vendor/github.com/containerd/fifo/handle_linux.go b/vendor/github.com/containerd/fifo/handle_linux.go index 0ee2c9feeba41..9710ccaa93612 100644 --- a/vendor/github.com/containerd/fifo/handle_linux.go +++ b/vendor/github.com/containerd/fifo/handle_linux.go @@ -1,4 +1,4 @@ -// +build linux +//go:build linux /* Copyright The containerd Authors. @@ -23,11 +23,9 @@ import ( "os" "sync" "syscall" - - "github.com/pkg/errors" ) -//nolint:golint +//nolint:revive const O_PATH = 010000000 type handle struct { @@ -42,7 +40,7 @@ type handle struct { func getHandle(fn string) (*handle, error) { f, err := os.OpenFile(fn, O_PATH, 0) if err != nil { - return nil, errors.Wrapf(err, "failed to open %v with O_PATH", fn) + return nil, fmt.Errorf("failed to open %v with O_PATH: %w", fn, err) } var ( @@ -51,7 +49,7 @@ func getHandle(fn string) (*handle, error) { ) if err := syscall.Fstat(int(fd), &stat); err != nil { f.Close() - return nil, errors.Wrapf(err, "failed to stat handle %v", fd) + return nil, fmt.Errorf("failed to stat handle %v: %w", fd, err) } h := &handle{ @@ -66,7 +64,7 @@ func getHandle(fn string) (*handle, error) { // check /proc just in case if _, err := os.Stat(h.procPath()); err != nil { f.Close() - return nil, errors.Wrapf(err, "couldn't stat %v", h.procPath()) + return nil, fmt.Errorf("couldn't stat %v: %w", h.procPath(), err) } return h, nil @@ -83,11 +81,11 @@ func (h *handle) Name() string { func (h *handle) Path() (string, error) { var stat syscall.Stat_t if err := syscall.Stat(h.procPath(), &stat); err != nil { - return "", errors.Wrapf(err, "path %v could not be statted", h.procPath()) + return "", fmt.Errorf("path %v could not be statted: %w", h.procPath(), err) } //nolint:unconvert if uint64(stat.Dev) != h.dev || stat.Ino != h.ino { - return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino) + return "", fmt.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino) } return h.procPath(), nil } diff --git a/vendor/github.com/containerd/fifo/handle_nolinux.go b/vendor/github.com/containerd/fifo/handle_nolinux.go index 81ca308fe50ea..f6863cf32a0fb 100644 --- a/vendor/github.com/containerd/fifo/handle_nolinux.go +++ b/vendor/github.com/containerd/fifo/handle_nolinux.go @@ -1,4 +1,4 @@ -// +build !linux,!windows +//go:build !linux && !windows /* Copyright The containerd Authors. @@ -19,9 +19,8 @@ package fifo import ( + "fmt" "syscall" - - "github.com/pkg/errors" ) type handle struct { @@ -33,13 +32,13 @@ type handle struct { func getHandle(fn string) (*handle, error) { var stat syscall.Stat_t if err := syscall.Stat(fn, &stat); err != nil { - return nil, errors.Wrapf(err, "failed to stat %v", fn) + return nil, fmt.Errorf("failed to stat %v: %w", fn, err) } h := &handle{ fn: fn, - dev: uint64(stat.Dev), //nolint: unconvert - ino: uint64(stat.Ino), //nolint: unconvert + dev: uint64(stat.Dev), //nolint:unconvert,nolintlint + ino: uint64(stat.Ino), //nolint:unconvert,nolintlint } return h, nil @@ -48,10 +47,10 @@ func getHandle(fn string) (*handle, error) { func (h *handle) Path() (string, error) { var stat syscall.Stat_t if err := syscall.Stat(h.fn, &stat); err != nil { - return "", errors.Wrapf(err, "path %v could not be statted", h.fn) + return "", fmt.Errorf("path %v could not be statted: %w", h.fn, err) } - if uint64(stat.Dev) != h.dev || uint64(stat.Ino) != h.ino { //nolint: unconvert - return "", errors.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn) + if uint64(stat.Dev) != h.dev || uint64(stat.Ino) != h.ino { //nolint:unconvert,nolintlint + return "", fmt.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn) } return h.fn, nil } diff --git a/vendor/github.com/containerd/fifo/raw.go b/vendor/github.com/containerd/fifo/raw.go index cead94ca27b0f..9f18f76d2c687 100644 --- a/vendor/github.com/containerd/fifo/raw.go +++ b/vendor/github.com/containerd/fifo/raw.go @@ -1,4 +1,4 @@ -// +build !windows +//go:build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/github.com/coreos/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE new file mode 100644 index 0000000000000..b39ddfa5cbdea --- /dev/null +++ b/vendor/github.com/coreos/etcd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go new file mode 100644 index 0000000000000..4ce15dc6bcf1f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go @@ -0,0 +1,27 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package fileutil + +import "os" + +const ( + // PrivateDirMode grants owner to make/remove files inside the directory. + PrivateDirMode = 0700 +) + +// OpenDir opens a directory for syncing. +func OpenDir(path string) (*os.File, error) { return os.Open(path) } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go new file mode 100644 index 0000000000000..a10a90583c798 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go @@ -0,0 +1,51 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "os" + "syscall" +) + +const ( + // PrivateDirMode grants owner to make/remove files inside the directory. + PrivateDirMode = 0777 +) + +// OpenDir opens a directory in windows with write access for syncing. +func OpenDir(path string) (*os.File, error) { + fd, err := openDir(path) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func openDir(path string) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE) + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) + createmode := uint32(syscall.OPEN_EXISTING) + fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) + return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go new file mode 100644 index 0000000000000..3c73916a1e1c6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go @@ -0,0 +1,147 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fileutil implements utility functions related to files and paths. +package fileutil + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + + "github.com/coreos/pkg/capnslog" +) + +const ( + // PrivateFileMode grants owner to read/write a file. + PrivateFileMode = 0600 +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil") +) + +// IsDirWriteable checks if dir is writable by writing and removing a file +// to dir. It returns nil if dir is writable. +func IsDirWriteable(dir string) error { + f := filepath.Join(dir, ".touch") + if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { + return err + } + return os.Remove(f) +} + +// ReadDir returns the filenames in the given directory in sorted order. +func ReadDir(dirpath string) ([]string, error) { + dir, err := os.Open(dirpath) + if err != nil { + return nil, err + } + defer dir.Close() + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory +// does not exists. TouchDirAll also ensures the given directory is writable. +func TouchDirAll(dir string) error { + // If path is already a directory, MkdirAll does nothing and returns nil, so, + // first check if dir exist with an expected permission mode. + if Exist(dir) { + err := CheckDirPermission(dir, PrivateDirMode) + if err != nil { + plog.Warningf("check file permission: %v", err) + } + } else { + err := os.MkdirAll(dir, PrivateDirMode) + if err != nil { + // if mkdirAll("a/text") and "text" is not + // a directory, this will return syscall.ENOTDIR + return err + } + } + + return IsDirWriteable(dir) +} + +// CreateDirAll is similar to TouchDirAll but returns error +// if the deepest directory was not empty. +func CreateDirAll(dir string) error { + err := TouchDirAll(dir) + if err == nil { + var ns []string + ns, err = ReadDir(dir) + if err != nil { + return err + } + if len(ns) != 0 { + err = fmt.Errorf("expected %q to be empty, got %q", dir, ns) + } + } + return err +} + +func Exist(name string) bool { + _, err := os.Stat(name) + return err == nil +} + +// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily +// shorten the length of the file. +func ZeroToEnd(f *os.File) error { + // TODO: support FALLOC_FL_ZERO_RANGE + off, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + lenf, lerr := f.Seek(0, io.SeekEnd) + if lerr != nil { + return lerr + } + if err = f.Truncate(off); err != nil { + return err + } + // make sure blocks remain allocated + if err = Preallocate(f, lenf, true); err != nil { + return err + } + _, err = f.Seek(off, io.SeekStart) + return err +} + +// CheckDirPermission checks permission on an existing dir. +// Returns error if dir is empty or exist with a different permission than specified. +func CheckDirPermission(dir string, perm os.FileMode) error { + if !Exist(dir) { + return fmt.Errorf("directory %q empty, cannot check permission.", dir) + } + //check the existing permission on the directory + dirInfo, err := os.Stat(dir) + if err != nil { + return err + } + dirMode := dirInfo.Mode().Perm() + if dirMode != perm { + err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data.", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode)) + return err + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go new file mode 100644 index 0000000000000..338627f43c88d --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go @@ -0,0 +1,26 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "errors" + "os" +) + +var ( + ErrLocked = errors.New("fileutil: file already locked") +) + +type LockedFile struct{ *os.File } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go new file mode 100644 index 0000000000000..542550bc8a963 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!plan9,!solaris + +package fileutil + +import ( + "os" + "syscall" +) + +func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { + f.Close() + if err == syscall.EWOULDBLOCK { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go new file mode 100644 index 0000000000000..004d35fa23b48 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go @@ -0,0 +1,97 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "io" + "os" + "syscall" +) + +// This used to call syscall.Flock() but that call fails with EBADF on NFS. +// An alternative is lockf() which works on NFS but that call lets a process lock +// the same file twice. Instead, use Linux's non-standard open file descriptor +// locks which will block if the process already holds the file lock. +// +// constants from /usr/include/bits/fcntl-linux.h +const ( + F_OFD_GETLK = 36 + F_OFD_SETLK = 37 + F_OFD_SETLKW = 38 +) + +var ( + wrlck = syscall.Flock_t{ + Type: syscall.F_WRLCK, + Whence: int16(io.SeekStart), + Start: 0, + Len: 0, + } + + linuxTryLockFile = flockTryLockFile + linuxLockFile = flockLockFile +) + +func init() { + // use open file descriptor locks if the system supports it + getlk := syscall.Flock_t{Type: syscall.F_RDLCK} + if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil { + linuxTryLockFile = ofdTryLockFile + linuxLockFile = ofdLockFile + } +} + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return linuxTryLockFile(path, flag, perm) +} + +func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + + flock := wrlck + if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil { + f.Close() + if err == syscall.EWOULDBLOCK { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return linuxLockFile(path, flag, perm) +} + +func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + + flock := wrlck + err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock) + + if err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go new file mode 100644 index 0000000000000..fee6a7c8f4661 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go @@ -0,0 +1,45 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "syscall" + "time" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { + return nil, err + } + f, err := os.Open(path, flag, perm) + if err != nil { + return nil, ErrLocked + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { + return nil, err + } + for { + f, err := os.OpenFile(path, flag, perm) + if err == nil { + return &LockedFile{f}, nil + } + time.Sleep(10 * time.Millisecond) + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go new file mode 100644 index 0000000000000..352ca5590d13f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go @@ -0,0 +1,62 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build solaris + +package fileutil + +import ( + "os" + "syscall" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Type = syscall.F_WRLCK + lock.Whence = 0 + lock.Pid = 0 + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil { + f.Close() + if err == syscall.EAGAIN { + err = ErrLocked + } + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Type = syscall.F_WRLCK + lock.Whence = 0 + f, err := os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go new file mode 100644 index 0000000000000..ed01164de6e47 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go @@ -0,0 +1,29 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!plan9,!solaris,!linux + +package fileutil + +import ( + "os" +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return flockTryLockFile(path, flag, perm) +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + return flockLockFile(path, flag, perm) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go new file mode 100644 index 0000000000000..b1817230a3cfb --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go @@ -0,0 +1,125 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "errors" + "fmt" + "os" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + + errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.") +) + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + LOCKFILE_EXCLUSIVE_LOCK = 2 + LOCKFILE_FAIL_IMMEDIATELY = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func open(path string, flag int, perm os.FileMode) (*os.File, error) { + if path == "" { + return nil, fmt.Errorf("cannot open empty filename") + } + var access uint32 + switch flag { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + case syscall.O_WRONLY | syscall.O_CREAT: + access = syscall.GENERIC_ALL + default: + panic(fmt.Errorf("flag %v is not supported", flag)) + } + fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]), + access, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, + syscall.OPEN_ALWAYS, + syscall.FILE_ATTRIBUTE_NORMAL, + 0) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func lockFile(fd syscall.Handle, flags uint32) error { + var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK + flag |= flags + if fd == syscall.InvalidHandle { + return nil + } + err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err.Error() == errLocked.Error() { + return ErrLocked + } else if err != errLockViolation { + return err + } + return nil +} + +func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + var reserved uint32 = 0 + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go new file mode 100644 index 0000000000000..c747b7cf81f93 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go @@ -0,0 +1,54 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "io" + "os" +) + +// Preallocate tries to allocate the space for given +// file. This operation is only supported on linux by a +// few filesystems (btrfs, ext4, etc.). +// If the operation is unsupported, no error will be returned. +// Otherwise, the error encountered will be returned. +func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { + if sizeInBytes == 0 { + // fallocate will return EINVAL if length is 0; skip + return nil + } + if extendFile { + return preallocExtend(f, sizeInBytes) + } + return preallocFixed(f, sizeInBytes) +} + +func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { + curOff, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + size, err := f.Seek(sizeInBytes, io.SeekEnd) + if err != nil { + return err + } + if _, err = f.Seek(curOff, io.SeekStart); err != nil { + return err + } + if sizeInBytes > size { + return nil + } + return f.Truncate(sizeInBytes) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go new file mode 100644 index 0000000000000..5a6dccfa796f7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go @@ -0,0 +1,65 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin + +package fileutil + +import ( + "os" + "syscall" + "unsafe" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + if err := preallocFixed(f, sizeInBytes); err != nil { + return err + } + return preallocExtendTrunc(f, sizeInBytes) +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + // allocate all requested space or no space at all + // TODO: allocate contiguous space on disk with F_ALLOCATECONTIG flag + fstore := &syscall.Fstore_t{ + Flags: syscall.F_ALLOCATEALL, + Posmode: syscall.F_PEOFPOSMODE, + Length: sizeInBytes} + p := unsafe.Pointer(fstore) + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p)) + if errno == 0 || errno == syscall.ENOTSUP { + return nil + } + + // wrong argument to fallocate syscall + if errno == syscall.EINVAL { + // filesystem "st_blocks" are allocated in the units of + // "Allocation Block Size" (run "diskutil info /" command) + var stat syscall.Stat_t + syscall.Fstat(int(f.Fd()), &stat) + + // syscall.Statfs_t.Bsize is "optimal transfer block size" + // and contains matching 4096 value when latest OS X kernel + // supports 4,096 KB filesystem block size + var statfs syscall.Statfs_t + syscall.Fstatfs(int(f.Fd()), &statfs) + blockSize := int64(statfs.Bsize) + + if stat.Blocks*blockSize >= sizeInBytes { + // enough blocks are already allocated + return nil + } + } + return errno +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go new file mode 100644 index 0000000000000..50bd84f02ada8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + // use mode = 0 to change size + err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // not supported; fallback + // fallocate EINTRs frequently in some environments; fallback + if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { + return preallocExtendTrunc(f, sizeInBytes) + } + } + return err +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE + err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // treat not supported as nil error + if ok && errno == syscall.ENOTSUP { + return nil + } + } + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go new file mode 100644 index 0000000000000..162fbc5f7826c --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go @@ -0,0 +1,25 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin + +package fileutil + +import "os" + +func preallocExtend(f *os.File, sizeInBytes int64) error { + return preallocExtendTrunc(f, sizeInBytes) +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { return nil } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go new file mode 100644 index 0000000000000..c97368069198f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go @@ -0,0 +1,88 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "path/filepath" + "sort" + "strings" + "time" +) + +func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { + return purgeFile(dirname, suffix, max, interval, stop, nil, nil) +} + +func PurgeFileWithDoneNotify(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { + doneC := make(chan struct{}) + errC := purgeFile(dirname, suffix, max, interval, stop, nil, doneC) + return doneC, errC +} + +// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. +// if donec is non-nil, the function closes it to notify its exit. +func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error { + errC := make(chan error, 1) + go func() { + if donec != nil { + defer close(donec) + } + for { + fnames, err := ReadDir(dirname) + if err != nil { + errC <- err + return + } + newfnames := make([]string, 0) + for _, fname := range fnames { + if strings.HasSuffix(fname, suffix) { + newfnames = append(newfnames, fname) + } + } + sort.Strings(newfnames) + fnames = newfnames + for len(newfnames) > int(max) { + f := filepath.Join(dirname, newfnames[0]) + l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) + if err != nil { + break + } + if err = os.Remove(f); err != nil { + errC <- err + return + } + if err = l.Close(); err != nil { + plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) + errC <- err + return + } + plog.Infof("purged file %s successfully", f) + newfnames = newfnames[1:] + } + if purgec != nil { + for i := 0; i < len(fnames)-len(newfnames); i++ { + purgec <- fnames[i] + } + } + select { + case <-time.After(interval): + case <-stop: + return + } + } + }() + return errC +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go new file mode 100644 index 0000000000000..54dd41f4f3517 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go @@ -0,0 +1,29 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!darwin + +package fileutil + +import "os" + +// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. +func Fsync(f *os.File) error { + return f.Sync() +} + +// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform. +func Fdatasync(f *os.File) error { + return f.Sync() +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go new file mode 100644 index 0000000000000..c2f39bf204d23 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go @@ -0,0 +1,40 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin + +package fileutil + +import ( + "os" + "syscall" +) + +// Fsync on HFS/OSX flushes the data on to the physical drive but the drive +// may not write it to the persistent media for quite sometime and it may be +// written in out-of-order sequence. Using F_FULLFSYNC ensures that the +// physical drive's buffer will also get flushed to the media. +func Fsync(f *os.File) error { + _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0)) + if errno == 0 { + return nil + } + return errno +} + +// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence +// on physical drive media. +func Fdatasync(f *os.File) error { + return Fsync(f) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go new file mode 100644 index 0000000000000..1bbced915e9bf --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go @@ -0,0 +1,34 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. +func Fsync(f *os.File) error { + return f.Sync() +} + +// Fdatasync is similar to fsync(), but does not flush modified metadata +// unless that metadata is needed in order to allow a subsequent data retrieval +// to be correctly handled. +func Fdatasync(f *os.File) error { + return syscall.Fdatasync(int(f.Fd())) +} diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go new file mode 100644 index 0000000000000..a0f4837a02c9a --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/journal/journal.go @@ -0,0 +1,225 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +var ( + // This can be overridden at build-time: + // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable + journalSocket = "/run/systemd/journal/socket" + + // unixConnPtr atomically holds the local unconnected Unix-domain socket. + // Concrete safe pointer type: *net.UnixConn + unixConnPtr unsafe.Pointer + // onceConn ensures that unixConnPtr is initialized exactly once. + onceConn sync.Once +) + +func init() { + onceConn.Do(initConn) +} + +// Enabled checks whether the local systemd journal is available for logging. +func Enabled() bool { + onceConn.Do(initConn) + + if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { + return false + } + + if _, err := net.Dial("unixgram", journalSocket); err != nil { + return false + } + + return true +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + if conn == nil { + return errors.New("could not initialize socket to journald") + } + + socketAddr := &net.UnixAddr{ + Name: journalSocket, + Net: "unixgram", + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) + if err == nil { + return nil + } + if !isSocketSpaceError(err) { + return err + } + + // Large log entry, send it via tempfile and ancillary-fd. + file, err := tempFd() + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return err + } + rights := syscall.UnixRights(int(file.Fd())) + _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) + if err != nil { + return err + } + + return nil +} + +// Print prints a message to the local systemd journal using Send(). +func Print(priority Priority, format string, a ...interface{}) error { + return Send(fmt.Sprintf(format, a...), priority, nil) +} + +func appendVariable(w io.Writer, name, value string) { + if err := validVarName(name); err != nil { + fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +// validVarName validates a variable name to make sure journald will accept it. +// The variable name must be in uppercase and consist only of characters, +// numbers and underscores, and may not begin with an underscore: +// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html +func validVarName(name string) error { + if name == "" { + return errors.New("Empty variable name") + } else if name[0] == '_' { + return errors.New("Variable name begins with an underscore") + } + + for _, c := range name { + if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + return errors.New("Variable name contains invalid characters") + } + } + return nil +} + +// isSocketSpaceError checks whether the error is signaling +// an "overlarge message" condition. +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok || opErr == nil { + return false + } + + sysErr, ok := opErr.Err.(*os.SyscallError) + if !ok || sysErr == nil { + return false + } + + return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS +} + +// tempFd creates a temporary, unlinked file under `/dev/shm`. +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + err = syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +// initConn initializes the global `unixConnPtr` socket. +// It is meant to be called exactly once, at program startup. +func initConn() { + autobind, err := net.ResolveUnixAddr("unixgram", "") + if err != nil { + return + } + + sock, err := net.ListenUnixgram("unixgram", autobind) + if err != nil { + return + } + + atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) +} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE new file mode 100644 index 0000000000000..e06d2081865a7 --- /dev/null +++ b/vendor/github.com/coreos/pkg/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE new file mode 100644 index 0000000000000..b39ddfa5cbdea --- /dev/null +++ b/vendor/github.com/coreos/pkg/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md new file mode 100644 index 0000000000000..f79dbfca5cbe5 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/README.md @@ -0,0 +1,39 @@ +# capnslog, the CoreOS logging package + +There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). +capnslog provides a simple but consistent logging interface suitable for all kinds of projects. + +### Design Principles + +##### `package main` is the place where logging gets turned on and routed + +A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. + +##### All log options are runtime-configurable. + +Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. + +##### There is one log object per package. It is registered under its repository and package name. + +`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. + +##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. + +Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. + +Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application. + +##### Log objects are an interface + +An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. + +##### Log levels have specific meanings: + + * Critical: Unrecoverable. Must fail. + * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost + * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. + * Notice: Normal, but important (uncommon) log information. + * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. + * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. + * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. + diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go new file mode 100644 index 0000000000000..b305a845fb2bf --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/formatters.go @@ -0,0 +1,157 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "fmt" + "io" + "log" + "runtime" + "strings" + "time" +) + +type Formatter interface { + Format(pkg string, level LogLevel, depth int, entries ...interface{}) + Flush() +} + +func NewStringFormatter(w io.Writer) Formatter { + return &StringFormatter{ + w: bufio.NewWriter(w), + } +} + +type StringFormatter struct { + w *bufio.Writer +} + +func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { + now := time.Now().UTC() + s.w.WriteString(now.Format(time.RFC3339)) + s.w.WriteByte(' ') + writeEntries(s.w, pkg, l, i, entries...) + s.Flush() +} + +func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { + if pkg != "" { + w.WriteString(pkg + ": ") + } + str := fmt.Sprint(entries...) + endsInNL := strings.HasSuffix(str, "\n") + w.WriteString(str) + if !endsInNL { + w.WriteString("\n") + } +} + +func (s *StringFormatter) Flush() { + s.w.Flush() +} + +func NewPrettyFormatter(w io.Writer, debug bool) Formatter { + return &PrettyFormatter{ + w: bufio.NewWriter(w), + debug: debug, + } +} + +type PrettyFormatter struct { + w *bufio.Writer + debug bool +} + +func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { + now := time.Now() + ts := now.Format("2006-01-02 15:04:05") + c.w.WriteString(ts) + ms := now.Nanosecond() / 1000 + c.w.WriteString(fmt.Sprintf(".%06d", ms)) + if c.debug { + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) + } + c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) + writeEntries(c.w, pkg, l, depth, entries...) + c.Flush() +} + +func (c *PrettyFormatter) Flush() { + c.w.Flush() +} + +// LogFormatter emulates the form of the traditional built-in logger. +type LogFormatter struct { + logger *log.Logger + prefix string +} + +// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the +// golang log package to actually do the logging work so that logs look similar. +func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { + return &LogFormatter{ + logger: log.New(w, "", flag), // don't use prefix here + prefix: prefix, // save it instead + } +} + +// Format builds a log message for the LogFormatter. The LogLevel is ignored. +func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { + str := fmt.Sprint(entries...) + prefix := lf.prefix + if pkg != "" { + prefix = fmt.Sprintf("%s%s: ", prefix, pkg) + } + lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5 +} + +// Flush is included so that the interface is complete, but is a no-op. +func (lf *LogFormatter) Flush() { + // noop +} + +// NilFormatter is a no-op log formatter that does nothing. +type NilFormatter struct { +} + +// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no +// messages so that you can cause part of your logging to be silent. +func NewNilFormatter() Formatter { + return &NilFormatter{} +} + +// Format does nothing. +func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { + // noop +} + +// Flush is included so that the interface is complete, but is a no-op. +func (_ *NilFormatter) Flush() { + // noop +} diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go new file mode 100644 index 0000000000000..426603ef305c7 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go @@ -0,0 +1,96 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "bytes" + "io" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +var pid = os.Getpid() + +type GlogFormatter struct { + StringFormatter +} + +func NewGlogFormatter(w io.Writer) *GlogFormatter { + g := &GlogFormatter{} + g.w = bufio.NewWriter(w) + return g +} + +func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { + g.w.Write(GlogHeader(level, depth+1)) + g.StringFormatter.Format(pkg, level, depth+1, entries...) +} + +func GlogHeader(level LogLevel, depth int) []byte { + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + now := time.Now().UTC() + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + buf := &bytes.Buffer{} + buf.Grow(30) + _, month, day := now.Date() + hour, minute, second := now.Clock() + buf.WriteString(level.Char()) + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) + buf.WriteByte('Z') + buf.WriteByte(' ') + buf.WriteString(strconv.Itoa(pid)) + buf.WriteByte(' ') + buf.WriteString(file) + buf.WriteByte(':') + buf.WriteString(strconv.Itoa(line)) + buf.WriteByte(']') + buf.WriteByte(' ') + return buf.Bytes() +} + +const digits = "0123456789" + +func twoDigits(b *bytes.Buffer, d int) { + c2 := digits[d%10] + d /= 10 + c1 := digits[d%10] + b.WriteByte(c1) + b.WriteByte(c2) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go new file mode 100644 index 0000000000000..9b1e4e6df8d31 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init.go @@ -0,0 +1,50 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//go:build !windows +// +build !windows + +package capnslog + +import ( + "io" + "os" + "syscall" +) + +// Here's where the opinionation comes in. We need some sensible defaults, +// especially after taking over the log package. Your project (whatever it may +// be) may see things differently. That's okay; there should be no defaults in +// the main package that cannot be controlled or overridden programatically, +// otherwise it's a bug. Doing so is creating your own init_log.go file much +// like this one. + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewDefaultFormatter(os.Stderr)) + SetGlobalLogLevel(INFO) +} + +func NewDefaultFormatter(out io.Writer) Formatter { + if syscall.Getppid() == 1 { + // We're running under init, which may be systemd. + f, err := NewJournaldFormatter() + if err == nil { + return f + } + } + return NewPrettyFormatter(out, false) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go new file mode 100644 index 0000000000000..45530506537ff --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init_windows.go @@ -0,0 +1,25 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import "os" + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewPrettyFormatter(os.Stderr, false)) + SetGlobalLogLevel(INFO) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go new file mode 100644 index 0000000000000..51751ccb2903a --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go @@ -0,0 +1,69 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//go:build !windows +// +build !windows + +package capnslog + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/coreos/go-systemd/journal" +) + +func NewJournaldFormatter() (Formatter, error) { + if !journal.Enabled() { + return nil, errors.New("No systemd detected") + } + return &journaldFormatter{}, nil +} + +type journaldFormatter struct{} + +func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + var pri journal.Priority + switch l { + case CRITICAL: + pri = journal.PriCrit + case ERROR: + pri = journal.PriErr + case WARNING: + pri = journal.PriWarning + case NOTICE: + pri = journal.PriNotice + case INFO: + pri = journal.PriInfo + case DEBUG: + pri = journal.PriDebug + case TRACE: + pri = journal.PriDebug + default: + panic("Unhandled loglevel") + } + msg := fmt.Sprint(entries...) + tags := map[string]string{ + "PACKAGE": pkg, + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + } + err := journal.Send(msg, pri, tags) + if err != nil { + fmt.Fprintln(os.Stderr, err) + } +} + +func (j *journaldFormatter) Flush() {} diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go new file mode 100644 index 0000000000000..970086b9f9730 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go @@ -0,0 +1,39 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "log" +) + +func initHijack() { + pkg := NewPackageLogger("log", "") + w := packageWriter{pkg} + log.SetFlags(0) + log.SetPrefix("") + log.SetOutput(w) +} + +type packageWriter struct { + pl *PackageLogger +} + +func (p packageWriter) Write(b []byte) (int, error) { + if p.pl.level < INFO { + return 0, nil + } + p.pl.internalLog(calldepth+2, INFO, string(b)) + return len(b), nil +} diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go new file mode 100644 index 0000000000000..226b60c22534e --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/logmap.go @@ -0,0 +1,245 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "errors" + "strings" + "sync" +) + +// LogLevel is the set of all log levels. +type LogLevel int8 + +const ( + // CRITICAL is the lowest log level; only errors which will end the program will be propagated. + CRITICAL LogLevel = iota - 1 + // ERROR is for errors that are not fatal but lead to troubling behavior. + ERROR + // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. + WARNING + // NOTICE is for normal but significant conditions. + NOTICE + // INFO is a log level for common, everyday log updates. + INFO + // DEBUG is the default hidden level for more verbose updates about internal processes. + DEBUG + // TRACE is for (potentially) call by call tracing of programs. + TRACE +) + +// Char returns a single-character representation of the log level. +func (l LogLevel) Char() string { + switch l { + case CRITICAL: + return "C" + case ERROR: + return "E" + case WARNING: + return "W" + case NOTICE: + return "N" + case INFO: + return "I" + case DEBUG: + return "D" + case TRACE: + return "T" + default: + panic("Unhandled loglevel") + } +} + +// String returns a multi-character representation of the log level. +func (l LogLevel) String() string { + switch l { + case CRITICAL: + return "CRITICAL" + case ERROR: + return "ERROR" + case WARNING: + return "WARNING" + case NOTICE: + return "NOTICE" + case INFO: + return "INFO" + case DEBUG: + return "DEBUG" + case TRACE: + return "TRACE" + default: + panic("Unhandled loglevel") + } +} + +// Update using the given string value. Fulfills the flag.Value interface. +func (l *LogLevel) Set(s string) error { + value, err := ParseLevel(s) + if err != nil { + return err + } + + *l = value + return nil +} + +// Returns an empty string, only here to fulfill the pflag.Value interface. +func (l *LogLevel) Type() string { + return "" +} + +// ParseLevel translates some potential loglevel strings into their corresponding levels. +func ParseLevel(s string) (LogLevel, error) { + switch s { + case "CRITICAL", "C": + return CRITICAL, nil + case "ERROR", "0", "E": + return ERROR, nil + case "WARNING", "1", "W": + return WARNING, nil + case "NOTICE", "2", "N": + return NOTICE, nil + case "INFO", "3", "I": + return INFO, nil + case "DEBUG", "4", "D": + return DEBUG, nil + case "TRACE", "5", "T": + return TRACE, nil + } + return CRITICAL, errors.New("couldn't parse log level " + s) +} + +type RepoLogger map[string]*PackageLogger + +type loggerStruct struct { + sync.Mutex + repoMap map[string]RepoLogger + formatter Formatter +} + +// logger is the global logger +var logger = new(loggerStruct) + +// SetGlobalLogLevel sets the log level for all packages in all repositories +// registered with capnslog. +func SetGlobalLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + for _, r := range logger.repoMap { + r.setRepoLogLevelInternal(l) + } +} + +// GetRepoLogger may return the handle to the repository's set of packages' loggers. +func GetRepoLogger(repo string) (RepoLogger, error) { + logger.Lock() + defer logger.Unlock() + r, ok := logger.repoMap[repo] + if !ok { + return nil, errors.New("no packages registered for repo " + repo) + } + return r, nil +} + +// MustRepoLogger returns the handle to the repository's packages' loggers. +func MustRepoLogger(repo string) RepoLogger { + r, err := GetRepoLogger(repo) + if err != nil { + panic(err) + } + return r +} + +// SetRepoLogLevel sets the log level for all packages in the repository. +func (r RepoLogger) SetRepoLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + r.setRepoLogLevelInternal(l) +} + +func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { + for _, v := range r { + v.level = l + } +} + +// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in +// order, and returns a map of the results, for use in SetLogLevel. +func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { + setlist := strings.Split(conf, ",") + out := make(map[string]LogLevel) + for _, setstring := range setlist { + setting := strings.Split(setstring, "=") + if len(setting) != 2 { + return nil, errors.New("oddly structured `pkg=level` option: " + setstring) + } + l, err := ParseLevel(setting[1]) + if err != nil { + return nil, err + } + out[setting[0]] = l + } + return out, nil +} + +// SetLogLevel takes a map of package names within a repository to their desired +// loglevel, and sets the levels appropriately. Unknown packages are ignored. +// "*" is a special package name that corresponds to all packages, and will be +// processed first. +func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { + logger.Lock() + defer logger.Unlock() + if l, ok := m["*"]; ok { + r.setRepoLogLevelInternal(l) + } + for k, v := range m { + l, ok := r[k] + if !ok { + continue + } + l.level = v + } +} + +// SetFormatter sets the formatting function for all logs. +func SetFormatter(f Formatter) { + logger.Lock() + defer logger.Unlock() + logger.formatter = f +} + +// NewPackageLogger creates a package logger object. +// This should be defined as a global var in your package, referencing your repo. +func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { + logger.Lock() + defer logger.Unlock() + if logger.repoMap == nil { + logger.repoMap = make(map[string]RepoLogger) + } + r, rok := logger.repoMap[repo] + if !rok { + logger.repoMap[repo] = make(RepoLogger) + r = logger.repoMap[repo] + } + p, pok := r[pkg] + if !pok { + r[pkg] = &PackageLogger{ + pkg: pkg, + level: INFO, + } + p = r[pkg] + } + return +} diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go new file mode 100644 index 0000000000000..00ff37149aeb5 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go @@ -0,0 +1,191 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "fmt" + "os" +) + +type PackageLogger struct { + pkg string + level LogLevel +} + +const calldepth = 2 + +func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { + logger.Lock() + defer logger.Unlock() + if inLevel != CRITICAL && p.level < inLevel { + return + } + if logger.formatter != nil { + logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) + } +} + +// SetLevel allows users to change the current logging level. +func (p *PackageLogger) SetLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + p.level = l +} + +// LevelAt checks if the given log level will be outputted under current setting. +func (p *PackageLogger) LevelAt(l LogLevel) bool { + logger.Lock() + defer logger.Unlock() + return p.level >= l +} + +// Log a formatted string at any level between ERROR and TRACE +func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) +} + +// Log a message at any level between ERROR and TRACE +func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprint(args...)) +} + +// log stdlib compatibility + +func (p *PackageLogger) Println(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) +} + +func (p *PackageLogger) Printf(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Print(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprint(args...)) +} + +// Panic and fatal + +func (p *PackageLogger) Panicf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panic(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panicln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Fatalf(format string, args ...interface{}) { + p.Logf(CRITICAL, format, args...) + os.Exit(1) +} + +func (p *PackageLogger) Fatal(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +func (p *PackageLogger) Fatalln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +// Error Functions + +func (p *PackageLogger) Errorf(format string, args ...interface{}) { + p.Logf(ERROR, format, args...) +} + +func (p *PackageLogger) Error(entries ...interface{}) { + p.internalLog(calldepth, ERROR, entries...) +} + +// Warning Functions + +func (p *PackageLogger) Warningf(format string, args ...interface{}) { + p.Logf(WARNING, format, args...) +} + +func (p *PackageLogger) Warning(entries ...interface{}) { + p.internalLog(calldepth, WARNING, entries...) +} + +// Notice Functions + +func (p *PackageLogger) Noticef(format string, args ...interface{}) { + p.Logf(NOTICE, format, args...) +} + +func (p *PackageLogger) Notice(entries ...interface{}) { + p.internalLog(calldepth, NOTICE, entries...) +} + +// Info Functions + +func (p *PackageLogger) Infof(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Info(entries ...interface{}) { + p.internalLog(calldepth, INFO, entries...) +} + +// Debug Functions + +func (p *PackageLogger) Debugf(format string, args ...interface{}) { + if p.level < DEBUG { + return + } + p.Logf(DEBUG, format, args...) +} + +func (p *PackageLogger) Debug(entries ...interface{}) { + if p.level < DEBUG { + return + } + p.internalLog(calldepth, DEBUG, entries...) +} + +// Trace Functions + +func (p *PackageLogger) Tracef(format string, args ...interface{}) { + if p.level < TRACE { + return + } + p.Logf(TRACE, format, args...) +} + +func (p *PackageLogger) Trace(entries ...interface{}) { + if p.level < TRACE { + return + } + p.internalLog(calldepth, TRACE, entries...) +} + +func (p *PackageLogger) Flush() { + logger.Lock() + defer logger.Unlock() + logger.formatter.Flush() +} diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go new file mode 100644 index 0000000000000..5f10547891244 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go @@ -0,0 +1,66 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//go:build !windows +// +build !windows + +package capnslog + +import ( + "fmt" + "log/syslog" +) + +func NewSyslogFormatter(w *syslog.Writer) Formatter { + return &syslogFormatter{w} +} + +func NewDefaultSyslogFormatter(tag string) (Formatter, error) { + w, err := syslog.New(syslog.LOG_DEBUG, tag) + if err != nil { + return nil, err + } + return NewSyslogFormatter(w), nil +} + +type syslogFormatter struct { + w *syslog.Writer +} + +func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + for _, entry := range entries { + str := fmt.Sprint(entry) + switch l { + case CRITICAL: + s.w.Crit(str) + case ERROR: + s.w.Err(str) + case WARNING: + s.w.Warning(str) + case NOTICE: + s.w.Notice(str) + case INFO: + s.w.Info(str) + case DEBUG: + s.w.Debug(str) + case TRACE: + s.w.Debug(str) + default: + panic("Unhandled loglevel") + } + } +} + +func (s *syslogFormatter) Flush() { +} diff --git a/vendor/github.com/distribution/reference/README.md b/vendor/github.com/distribution/reference/README.md index e2531e49c4f99..172a02e0b3b5a 100644 --- a/vendor/github.com/distribution/reference/README.md +++ b/vendor/github.com/distribution/reference/README.md @@ -10,7 +10,7 @@ Go library to handle references to container images. [![codecov](https://codecov.io/gh/distribution/reference/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/reference) [![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference.svg?type=shield)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield) -This repository contains a library for handling refrences to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details. +This repository contains a library for handling references to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details. ## Contribution diff --git a/vendor/github.com/distribution/reference/normalize.go b/vendor/github.com/distribution/reference/normalize.go index a30229d01bb4e..f4128314c150b 100644 --- a/vendor/github.com/distribution/reference/normalize.go +++ b/vendor/github.com/distribution/reference/normalize.go @@ -123,20 +123,51 @@ func ParseDockerRef(ref string) (Named, error) { // splitDockerDomain splits a repository name to domain and remote-name. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoPrefix + remainder - } - return +func splitDockerDomain(name string) (domain, remoteName string) { + maybeDomain, maybeRemoteName, ok := strings.Cut(name, "/") + if !ok { + // Fast-path for single element ("familiar" names), such as "ubuntu" + // or "ubuntu:latest". Familiar names must be handled separately, to + // prevent them from being handled as "hostname:port". + // + // Canonicalize them as "docker.io/library/name[:tag]" + + // FIXME(thaJeztah): account for bare "localhost" or "example.com" names, which SHOULD be considered a domain. + return defaultDomain, officialRepoPrefix + name + } + + switch { + case maybeDomain == localhost: + // localhost is a reserved namespace and always considered a domain. + domain, remoteName = maybeDomain, maybeRemoteName + case maybeDomain == legacyDefaultDomain: + // canonicalize the Docker Hub and legacy "Docker Index" domains. + domain, remoteName = defaultDomain, maybeRemoteName + case strings.ContainsAny(maybeDomain, ".:"): + // Likely a domain or IP-address: + // + // - contains a "." (e.g., "example.com" or "127.0.0.1") + // - contains a ":" (e.g., "example:5000", "::1", or "[::1]:5000") + domain, remoteName = maybeDomain, maybeRemoteName + case strings.ToLower(maybeDomain) != maybeDomain: + // Uppercase namespaces are not allowed, so if the first element + // is not lowercase, we assume it to be a domain-name. + domain, remoteName = maybeDomain, maybeRemoteName + default: + // None of the above: it's not a domain, so use the default, and + // use the name input the remote-name. + domain, remoteName = defaultDomain, name + } + + if domain == defaultDomain && !strings.ContainsRune(remoteName, '/') { + // Canonicalize "familiar" names, but only on Docker Hub, not + // on other domains: + // + // "docker.io/ubuntu[:tag]" => "docker.io/library/ubuntu[:tag]" + remoteName = officialRepoPrefix + remoteName + } + + return domain, remoteName } // familiarizeName returns a shortened version of the name familiar diff --git a/vendor/github.com/distribution/reference/reference.go b/vendor/github.com/distribution/reference/reference.go index e98c44daa2a85..900398bde7d79 100644 --- a/vendor/github.com/distribution/reference/reference.go +++ b/vendor/github.com/distribution/reference/reference.go @@ -35,8 +35,13 @@ import ( ) const ( + // RepositoryNameTotalLengthMax is the maximum total number of characters in a repository name. + RepositoryNameTotalLengthMax = 255 + // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 + // + // Deprecated: use [RepositoryNameTotalLengthMax] instead. + NameTotalLengthMax = RepositoryNameTotalLengthMax ) var ( @@ -55,8 +60,8 @@ var ( // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + // ErrNameTooLong is returned when a repository name is longer than RepositoryNameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) // ErrNameNotCanonical is returned when a name is not canonical. ErrNameNotCanonical = errors.New("repository name must be canonical") @@ -165,6 +170,9 @@ func Path(named Named) (name string) { return path } +// splitDomain splits a named reference into a hostname and path string. +// If no valid hostname is found, the hostname is empty and the full value +// is returned as name func splitDomain(name string) (string, string) { match := anchoredNameRegexp.FindStringSubmatch(name) if len(match) != 3 { @@ -173,19 +181,6 @@ func splitDomain(name string) (string, string) { return match[1], match[2] } -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// -// Deprecated: Use [Domain] or [Path]. -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - // Parse parses s and returns a syntactically valid Reference. // If an error was encountered it is returned, along with a nil Reference. func Parse(s string) (Reference, error) { @@ -200,10 +195,6 @@ func Parse(s string) (Reference, error) { return nil, ErrReferenceInvalidFormat } - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) @@ -215,6 +206,10 @@ func Parse(s string) (Reference, error) { repo.path = matches[1] } + if len(repo.path) > RepositoryNameTotalLengthMax { + return nil, ErrNameTooLong + } + ref := reference{ namedRepository: repo, tag: matches[2], @@ -253,14 +248,15 @@ func ParseNamed(s string) (Named, error) { // WithName returns a named object representing the given string. If the input // is invalid ErrReferenceInvalidFormat will be returned. func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - match := anchoredNameRegexp.FindStringSubmatch(name) if match == nil || len(match) != 3 { return nil, ErrReferenceInvalidFormat } + + if len(match[2]) > RepositoryNameTotalLengthMax { + return nil, ErrNameTooLong + } + return repository{ domain: match[1], path: match[2], diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index e55a76fc63c38..201b54906441a 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -391,7 +391,11 @@ definitions: ReadOnlyNonRecursive: description: | Make the mount non-recursively read-only, but still leave the mount recursive - (unless NonRecursive is set to true in conjunction). + (unless NonRecursive is set to `true` in conjunction). + + Addded in v1.44, before that version all read-only mounts were + non-recursive by default. To match the previous behaviour this + will default to `true` for clients on versions prior to v1.44. type: "boolean" default: false ReadOnlyForceRecursive: @@ -1743,8 +1747,12 @@ definitions: description: | Date and time at which the image was created, formatted in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if present in the image, + and omitted otherwise. type: "string" - x-nullable: false + format: "dateTime" + x-nullable: true example: "2022-02-04T21:20:12.497794809Z" Container: description: | diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go index 613da5517df19..ee913d247e3c8 100644 --- a/vendor/github.com/docker/docker/api/types/backend/backend.go +++ b/vendor/github.com/docker/docker/api/types/backend/backend.go @@ -13,12 +13,13 @@ import ( // ContainerCreateConfig is the parameter set to ContainerCreate() type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - Platform *ocispec.Platform - AdjustCPUShares bool + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *ocispec.Platform + AdjustCPUShares bool + DefaultReadOnlyNonRecursive bool } // ContainerRmConfig holds arguments for the container remove diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index 5c56a0cafef19..56a8b77d45de1 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -72,7 +72,10 @@ type ImageInspect struct { // Created is the date and time at which the image was created, formatted in // RFC 3339 nano-seconds (time.RFC3339Nano). - Created string + // + // This information is only available if present in the image, + // and omitted otherwise. + Created string `json:",omitempty"` // Container is the ID of the container that was used to create the image. // diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 0b496b0fa66f4..f2eeb6c5702ed 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -265,17 +265,22 @@ func (cli *Client) Close() error { // This allows for version-dependent code to use the same version as will // be negotiated when making the actual requests, and for which cases // we cannot do the negotiation lazily. -func (cli *Client) checkVersion(ctx context.Context) { - if cli.negotiateVersion && !cli.negotiated { - cli.NegotiateAPIVersion(ctx) +func (cli *Client) checkVersion(ctx context.Context) error { + if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated { + ping, err := cli.Ping(ctx) + if err != nil { + return err + } + cli.negotiateAPIVersionPing(ping) } + return nil } // getAPIPath returns the versioned request path to call the API. // It appends the query parameters to the path if they are not empty. func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { var apiPath string - cli.checkVersion(ctx) + _ = cli.checkVersion(ctx) if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") apiPath = path.Join(cli.basePath, "/v"+v, p) @@ -307,7 +312,11 @@ func (cli *Client) ClientVersion() string { // added (1.24). func (cli *Client) NegotiateAPIVersion(ctx context.Context) { if !cli.manualOverride { - ping, _ := cli.Ping(ctx) + ping, err := cli.Ping(ctx) + if err != nil { + // FIXME(thaJeztah): Ping returns an error when failing to connect to the API; we should not swallow the error here, and instead returning it. + return + } cli.negotiateAPIVersionPing(ping) } } diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index 409f5b492a6ed..5442d4267d09e 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -28,7 +28,9 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. - cli.checkVersion(ctx) + if err := cli.checkVersion(ctx); err != nil { + return response, err + } if err := cli.NewVersionError(ctx, "1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { return response, err diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 3fff0c8288974..526a3876a4a77 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -18,7 +18,9 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. - cli.checkVersion(ctx) + if err := cli.checkVersion(ctx); err != nil { + return response, err + } if err := cli.NewVersionError(ctx, "1.25", "env"); len(config.Env) != 0 && err != nil { return response, err diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index 825d3e4e9d9bd..02b5079bc463c 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -23,7 +23,9 @@ func (cli *Client) ContainerRestart(ctx context.Context, containerID string, opt // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. - cli.checkVersion(ctx) + if err := cli.checkVersion(ctx); err != nil { + return err + } if versions.GreaterThanOrEqualTo(cli.version, "1.42") { query.Set("signal", options.Signal) } diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index ac0cab69de94d..7c98a354b42e4 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -27,7 +27,9 @@ func (cli *Client) ContainerStop(ctx context.Context, containerID string, option // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. - cli.checkVersion(ctx) + if err := cli.checkVersion(ctx); err != nil { + return err + } if versions.GreaterThanOrEqualTo(cli.version, "1.42") { query.Set("signal", options.Signal) } diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index b8d3bdef0db8e..8bb6be0a18b27 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -30,19 +30,22 @@ const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ // synchronize ContainerWait with other calls, such as specifying a // "next-exit" condition before issuing a ContainerStart request. func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { + resultC := make(chan container.WaitResponse) + errC := make(chan error, 1) + // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. - cli.checkVersion(ctx) + if err := cli.checkVersion(ctx); err != nil { + errC <- err + return resultC, errC + } if versions.LessThan(cli.ClientVersion(), "1.30") { return cli.legacyContainerWait(ctx, containerID) } - resultC := make(chan container.WaitResponse) - errC := make(chan error, 1) - query := url.Values{} if condition != "" { query.Set("condition", string(condition)) diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 4b96b02085852..0d01e243fe0ba 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -11,15 +11,16 @@ import ( // errConnectionFailed implements an error returned when connection failed. type errConnectionFailed struct { - host string + error } // Error returns a string representation of an errConnectionFailed -func (err errConnectionFailed) Error() string { - if err.host == "" { - return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" - } - return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +func (e errConnectionFailed) Error() string { + return e.error.Error() +} + +func (e errConnectionFailed) Unwrap() error { + return e.error } // IsErrConnectionFailed returns true if the error is caused by connection failed. @@ -29,7 +30,13 @@ func IsErrConnectionFailed(err error) bool { // ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. func ErrorConnectionFailed(host string) error { - return errConnectionFailed{host: host} + var err error + if host == "" { + err = fmt.Errorf("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") + } else { + err = fmt.Errorf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", host) + } + return errConnectionFailed{error: err} } // IsErrNotFound returns true if the error is a NotFound error, which is returned @@ -60,7 +67,9 @@ func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature str // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. - cli.checkVersion(ctx) + if err := cli.checkVersion(ctx); err != nil { + return err + } if cli.version != "" && versions.LessThan(cli.version, APIrequired) { return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) } diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index f3f2280e32497..fa6aecfc6ed0d 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -13,14 +13,17 @@ import ( // ImageList returns a list of images in the docker host. func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) { + var images []image.Summary + // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. - cli.checkVersion(ctx) + if err := cli.checkVersion(ctx); err != nil { + return images, err + } - var images []image.Summary query := url.Values{} optionFilters := options.Filters diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go index 668e87d653b2a..d510feb3db9b2 100644 --- a/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -10,12 +10,16 @@ import ( // NetworkCreate creates a new network in the docker host. func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + var response types.NetworkCreateResponse + // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. - cli.checkVersion(ctx) + if err := cli.checkVersion(ctx); err != nil { + return response, err + } networkCreateRequest := types.NetworkCreateRequest{ NetworkCreate: options, @@ -25,7 +29,6 @@ func (cli *Client) NetworkCreate(ctx context.Context, name string, options types networkCreateRequest.CheckDuplicate = true //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44. } - var response types.NetworkCreateResponse serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) defer ensureReaderClosed(serverResp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index dfd1042fab264..bf3e9b1cd6d59 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -14,7 +14,10 @@ import ( // Ping pings the server and returns the value of the "Docker-Experimental", // "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use // a HEAD request on the endpoint, but falls back to GET if HEAD is not supported -// by the daemon. +// by the daemon. It ignores internal server errors returned by the API, which +// may be returned if the daemon is in an unhealthy state, but returns errors +// for other non-success status codes, failing to connect to the API, or failing +// to parse the API response. func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index efe07bb9ea59d..50e213b50a088 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -134,17 +134,18 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u return resp, errdefs.FromStatusCode(err, resp.statusCode) } +// FIXME(thaJeztah): Should this actually return a serverResp when a connection error occurred? func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { serverResp := serverResponse{statusCode: -1, reqURL: req.URL} resp, err := cli.client.Do(req) if err != nil { if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + return serverResp, errConnectionFailed{fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)} } if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings") + return serverResp, errConnectionFailed{errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings")} } // Don't decorate context sentinel errors; users may be comparing to @@ -156,12 +157,13 @@ func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { if uErr, ok := err.(*url.Error); ok { if nErr, ok := uErr.Err.(*net.OpError); ok { if os.IsPermission(nErr.Err) { - return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + return serverResp, errConnectionFailed{errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host)} } } } if nErr, ok := err.(net.Error); ok { + // FIXME(thaJeztah): any net.Error should be considered a connection error (but we should include the original error)? if nErr.Timeout() { return serverResp, ErrorConnectionFailed(cli.host) } @@ -190,7 +192,7 @@ func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { } } - return serverResp, errors.Wrap(err, "error during connect") + return serverResp, errConnectionFailed{errors.Wrap(err, "error during connect")} } if resp != nil { diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index 2ebb5ee3a580d..b72cb420d49e3 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -25,7 +25,9 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. - cli.checkVersion(ctx) + if err := cli.checkVersion(ctx); err != nil { + return response, err + } // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index e05eebf56657f..d2f03f02f07c1 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -16,18 +16,18 @@ import ( // It should be the value as set *before* the update. You can find this value in the Meta field // of swarm.Service, which can be found using ServiceInspectWithRaw. func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) { + response := swarm.ServiceUpdateResponse{} + // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. - cli.checkVersion(ctx) - - var ( - query = url.Values{} - response = swarm.ServiceUpdateResponse{} - ) + if err := cli.checkVersion(ctx); err != nil { + return response, err + } + query := url.Values{} if options.RegistryAuthFrom != "" { query.Set("registryAuthFrom", options.RegistryAuthFrom) } diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index 31e08cb975972..b8bdc5ae85856 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -16,7 +16,9 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool // // Normally, version-negotiation (if enabled) would not happen until // the API request is made. - cli.checkVersion(ctx) + if err := cli.checkVersion(ctx); err != nil { + return err + } if versions.GreaterThanOrEqualTo(cli.version, "1.25") { query.Set("force", "1") } diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go index 37316ed4829af..503ac574a9091 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -10,6 +10,8 @@ import ( "strings" "sync" + "github.com/containerd/containerd/pkg/userns" + "github.com/containerd/log" "github.com/pkg/errors" ) @@ -56,10 +58,16 @@ func (l *LocalRegistry) Scan() ([]string, error) { for _, p := range l.specsPaths { dirEntries, err = os.ReadDir(p) - if err != nil && !os.IsNotExist(err) { + if err != nil { + if os.IsNotExist(err) { + continue + } + if os.IsPermission(err) && userns.RunningInUserNS() { + log.L.Debug(err.Error()) + continue + } return nil, errors.Wrap(err, "error reading dir entries") } - for _, entry := range dirEntries { if entry.IsDir() { infos, err := os.ReadDir(filepath.Join(p, entry.Name())) diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go index b0456e580dc9d..098df6b5236b9 100644 --- a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "sync" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/progress" @@ -109,6 +110,7 @@ type progressOutput struct { sf formatProgress out io.Writer newLines bool + mu sync.Mutex } // WriteProgress formats progress information from a ProgressReader. @@ -120,6 +122,9 @@ func (out *progressOutput) WriteProgress(prog progress.Progress) error { jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units} formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) } + + out.mu.Lock() + defer out.mu.Unlock() _, err := out.out.Write(formatted) if err != nil { return err diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go index 6c16c255ffba3..c6f66f10393d2 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -56,6 +56,7 @@ type Unmarshaler struct { // implement JSONPBMarshaler so that the custom format can be produced. // // The JSON unmarshaling must follow the JSON to proto specification: +// // https://developers.google.com/protocol-buffers/docs/proto3#json // // Deprecated: Custom types should implement protobuf reflection instead. diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go index 685c80a62bc91..e9438a93f331c 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/encode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -55,6 +55,7 @@ type Marshaler struct { // implement JSONPBUnmarshaler so that the custom format can be parsed. // // The JSON marshaling must follow the proto to JSON specification: +// // https://developers.google.com/protocol-buffers/docs/proto3#json // // Deprecated: Custom types should implement protobuf reflection instead. diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 85f9f57365fd4..fdff3fdb4cba3 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -127,9 +127,10 @@ func Is(any *anypb.Any, m proto.Message) bool { // The allocated message is stored in the embedded proto.Message. // // Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) // // Deprecated: Use the any.UnmarshalNew method instead to unmarshal // the any message contents into a new instance of the underlying message. diff --git a/vendor/github.com/gorilla/mux/.editorconfig b/vendor/github.com/gorilla/mux/.editorconfig new file mode 100644 index 0000000000000..c6b74c3e0d0c7 --- /dev/null +++ b/vendor/github.com/gorilla/mux/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/.gitignore b/vendor/github.com/gorilla/mux/.gitignore new file mode 100644 index 0000000000000..84039fec68771 --- /dev/null +++ b/vendor/github.com/gorilla/mux/.gitignore @@ -0,0 +1 @@ +coverage.coverprofile diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS deleted file mode 100644 index b722392ee5926..0000000000000 --- a/vendor/github.com/gorilla/mux/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the official list of gorilla/mux authors for copyright purposes. -# -# Please keep the list sorted. - -Google LLC (https://opensource.google.com/) -Kamil Kisielk -Matt Silverlock -Rodrigo Moraes (https://github.com/moraes) diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE index 6903df6386e98..bb9d80bc9b6bc 100644 --- a/vendor/github.com/gorilla/mux/LICENSE +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gorilla/mux/Makefile b/vendor/github.com/gorilla/mux/Makefile new file mode 100644 index 0000000000000..98f5ab75f9d7c --- /dev/null +++ b/vendor/github.com/gorilla/mux/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index 35eea9f106354..382513d57c4c6 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,12 +1,12 @@ # gorilla/mux -[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) -[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) +![testing](https://github.com/gorilla/mux/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/mux/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/mux) +[![godoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) -![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png) -https://www.gorillatoolkit.org/pkg/mux +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to their respective handler. @@ -247,32 +247,25 @@ type spaHandler struct { // file located at the index path on the SPA handler will be served. This // is suitable behavior for serving an SPA (single page application). func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // get the absolute path to prevent directory traversal - path, err := filepath.Abs(r.URL.Path) - if err != nil { - // if we failed to get the absolute path respond with a 400 bad request - // and stop - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // prepend the path with the path to the static directory - path = filepath.Join(h.staticPath, path) + // Join internally call path.Clean to prevent directory traversal + path := filepath.Join(h.staticPath, r.URL.Path) - // check whether a file exists at the given path - _, err = os.Stat(path) - if os.IsNotExist(err) { - // file does not exist, serve index.html + // check whether a file exists or is a directory at the given path + fi, err := os.Stat(path) + if os.IsNotExist(err) || fi.IsDir() { + // file does not exist or path is a directory, serve index.html http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) return - } else if err != nil { - // if we got an error (that wasn't that the file doesn't exist) stating the - // file, return a 500 internal server error and stop + } + + if err != nil { + // if we got an error (that wasn't that the file doesn't exist) stating the + // file, return a 500 internal server error and stop http.Error(w, err.Error(), http.StatusInternalServerError) - return + return } - // otherwise, use http.FileServer to serve the static dir + // otherwise, use http.FileServer to serve the static file http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) } @@ -375,6 +368,19 @@ url, err := r.Get("article").URL("subdomain", "news", "id", "42") ``` +To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available: +```go +r := mux.NewRouter() +r.Host("{domain}"). + Path("/{group}/{item_id}"). + Queries("some_data1", "{some_data1}"). + Queries("some_data2", "{some_data2}"). + Name("article") + +// Will print [domain group item_id some_data1 some_data2] +fmt.Println(r.Get("article").GetVarNames()) + +``` ### Walking Routes The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, @@ -572,7 +578,7 @@ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler r := mux.NewRouter() r.HandleFunc("/", handler) -amw := authenticationMiddleware{} +amw := authenticationMiddleware{tokenUsers: make(map[string]string)} amw.Populate() r.Use(amw.Middleware) @@ -758,7 +764,8 @@ func TestMetricsHandler(t *testing.T) { rr := httptest.NewRecorder() - // Need to create a router that we can pass the request through so that the vars will be added to the context + // To add the vars to the context, + // we need to create a router through which we can pass the request. router := mux.NewRouter() router.HandleFunc("/metrics/{type}", MetricsHandler) router.ServeHTTP(rr, req) diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index bd5a38b55d82f..80601351fd071 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -10,18 +10,18 @@ http.ServeMux, mux.Router matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts, paths and query values can have variables with an optional - regular expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. + - Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + - URL hosts, paths and query values can have variables with an optional + regular expression. + - Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + - Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + - It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. Let's start registering a couple of URL paths and handlers: @@ -301,6 +301,5 @@ A more complex authentication middleware, which maps session token to users, cou r.Use(amw.Middleware) Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. - */ package mux diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index 782a34b22a608..1e089906fad57 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -31,24 +31,26 @@ func NewRouter() *Router { // It implements the http.Handler interface, so it can be registered to serve // requests: // -// var router = mux.NewRouter() +// var router = mux.NewRouter() // -// func main() { -// http.Handle("/", router) -// } +// func main() { +// http.Handle("/", router) +// } // // Or, for Google App Engine, register it in a init() function: // -// func init() { -// http.Handle("/", router) -// } +// func init() { +// http.Handle("/", router) +// } // // This will send all incoming requests to the router. type Router struct { // Configurable Handler to be used when no route matches. + // This can be used to render your own 404 Not Found errors. NotFoundHandler http.Handler // Configurable Handler to be used when the request method does not match the route. + // This can be used to render your own 405 Method Not Allowed errors. MethodNotAllowedHandler http.Handler // Routes to be matched, in order. diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index 0144842bb23ee..5d05cfa0e9ea0 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -22,10 +22,10 @@ type routeRegexpOptions struct { type regexpType int const ( - regexpTypePath regexpType = 0 - regexpTypeHost regexpType = 1 - regexpTypePrefix regexpType = 2 - regexpTypeQuery regexpType = 3 + regexpTypePath regexpType = iota + regexpTypeHost + regexpTypePrefix + regexpTypeQuery ) // newRouteRegexp parses a route template and returns a routeRegexp, @@ -195,7 +195,7 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { // url builds a URL part using the given values. func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN), len(r.varsN)) + urlValues := make([]interface{}, len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] if !ok { diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index 750afe570d053..e8f11df221f08 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -64,8 +64,18 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { match.MatchErr = nil } - matchErr = nil + matchErr = nil // nolint:ineffassign return false + } else { + // Multiple routes may share the same path but use different HTTP methods. For instance: + // Route 1: POST "/users/{id}". + // Route 2: GET "/users/{id}", parameters: "id": "[0-9]+". + // + // The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2", + // The router should return a "Not Found" error as no route fully matches this request. + if match.MatchErr == ErrMethodMismatch { + match.MatchErr = nil + } } } @@ -230,9 +240,9 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { // Headers adds a matcher for request header values. // It accepts a sequence of key/value pairs to be matched. For example: // -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") +// r := mux.NewRouter().NewRoute() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both request header values match. // If the value is an empty string, it will match any value if the key is set. @@ -255,9 +265,9 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { // HeadersRegexp accepts a sequence of key/value pairs, where the value has regex // support. For example: // -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") +// r := mux.NewRouter().NewRoute() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both the request header matches both regular expressions. // If the value is an empty string, it will match any value if the key is set. @@ -283,10 +293,10 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route { // // For example: // -// r := mux.NewRouter() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") +// r := mux.NewRouter().NewRoute() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). @@ -342,11 +352,11 @@ func (r *Route) Methods(methods ...string) *Route { // // For example: // -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) +// r := mux.NewRouter().NewRoute() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). @@ -377,8 +387,8 @@ func (r *Route) PathPrefix(tpl string) *Route { // It accepts a sequence of key/value pairs. Values may define variables. // For example: // -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// r := mux.NewRouter().NewRoute() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") // // The above route will only match if the URL contains the defined queries // values, e.g.: ?foo=bar&id=42. @@ -473,11 +483,11 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { // // It will test the inner routes only if the parent route matched. For example: // -// r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// r := mux.NewRouter().NewRoute() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) // // Here, the routes registered in the subrouter won't be tested if the host // doesn't match. @@ -497,36 +507,36 @@ func (r *Route) Subrouter() *Router { // It accepts a sequence of key/value pairs for the route variables. For // example, given this route: // -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") // // ...a URL for it can be built using: // -// url, err := r.Get("article").URL("category", "technology", "id", "42") +// url, err := r.Get("article").URL("category", "technology", "id", "42") // // ...which will return an url.URL with the following path: // -// "/articles/technology/42" +// "/articles/technology/42" // // This also works for host variables: // -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Host("{subdomain}.domain.com"). -// Name("article") +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Host("{subdomain}.domain.com"). +// Name("article") // -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") // // The scheme of the resulting url will be the first argument that was passed to Schemes: // -// // url.String() will be "https://example.com" -// r := mux.NewRouter() -// url, err := r.Host("example.com") -// .Schemes("https", "http").URL() +// // url.String() will be "https://example.com" +// r := mux.NewRouter().NewRoute() +// url, err := r.Host("example.com") +// .Schemes("https", "http").URL() // // All variables defined in the route are required, and their values must // conform to the corresponding patterns. @@ -718,6 +728,25 @@ func (r *Route) GetHostTemplate() (string, error) { return r.regexp.host.template, nil } +// GetVarNames returns the names of all variables added by regexp matchers +// These can be used to know which route variables should be passed into r.URL() +func (r *Route) GetVarNames() ([]string, error) { + if r.err != nil { + return nil, r.err + } + var varNames []string + if r.regexp.host != nil { + varNames = append(varNames, r.regexp.host.varsN...) + } + if r.regexp.path != nil { + varNames = append(varNames, r.regexp.path.varsN...) + } + for _, regx := range r.regexp.queries { + varNames = append(varNames, regx.varsN...) + } + return varNames, nil +} + // prepareVars converts the route variable pairs into a map. If the route has a // BuildVarsFunc, it is invoked. func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { diff --git a/vendor/github.com/hashicorp/go-msgpack/LICENSE b/vendor/github.com/hashicorp/go-msgpack/LICENSE index ccae99f6a9a30..95a0f0541cdaa 100644 --- a/vendor/github.com/hashicorp/go-msgpack/LICENSE +++ b/vendor/github.com/hashicorp/go-msgpack/LICENSE @@ -1,25 +1,22 @@ -Copyright (c) 2012, 2013 Ugorji Nwoke. +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the author nor the names of its contributors may be used - to endorse or promote products derived from this software - without specific prior written permission. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go b/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go deleted file mode 100644 index c14d810a73e81..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -/* -High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc . - -Supported Serialization formats are: - - - msgpack: [https://github.com/msgpack/msgpack] - - binc: [http://github.com/ugorji/binc] - -To install: - - go get github.com/ugorji/go/codec - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. - This was achieved by taking extreme care on: - - managing allocation - - function frame size (important due to Go's use of split stacks), - - reflection use (and by-passing reflection for common types) - - recursion implications - - zero-copy mode (encoding/decoding to byte slice without using temp buffers) - - Correct. - Care was taken to precisely handle corner cases like: - overflows, nil maps and slices, nil value in stream, etc. - - Efficient zero-copying into temporary byte buffers - when encoding into or decoding from a byte slice. - - Standard field renaming via tags - - Encoding from any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Decoding into pointer to any non-nil typed value - (struct, slice, map, int, float32, bool, string, reflect.Value, etc) - - Supports extension functions to handle the encode/decode of custom types - - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler - - Schema-less decoding - (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Msgpack Specific: - - Provides extension functions to handle spec-defined extensions (binary, timestamp) - - Options to resolve ambiguities in handling raw bytes (as string or []byte) - during schema-less decoding (decoding into a nil interface{}) - - RPC Server/Client Codec for msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - - Fast Paths for some container types: - For some container types, we circumvent reflection and its associated overhead - and allocation costs, and encode/decode directly. These types are: - []interface{} - []int - []string - map[interface{}]interface{} - map[int]interface{} - map[string]interface{} - -Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -Representative Benchmark Results - -Run the benchmark suite using: - go test -bi -bench=. -benchmem - -To run full benchmark suite (including against vmsgpack and bson), -see notes in ext_dep_test.go - -*/ -package codec diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/README.md b/vendor/github.com/hashicorp/go-msgpack/codec/README.md deleted file mode 100644 index 6c95d1bfd2081..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/README.md +++ /dev/null @@ -1,174 +0,0 @@ -# Codec - -High Performance and Feature-Rich Idiomatic Go Library providing -encode/decode support for different serialization formats. - -Supported Serialization formats are: - - - msgpack: [https://github.com/msgpack/msgpack] - - binc: [http://github.com/ugorji/binc] - -To install: - - go get github.com/ugorji/go/codec - -Online documentation: [http://godoc.org/github.com/ugorji/go/codec] - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. - This was achieved by taking extreme care on: - - managing allocation - - function frame size (important due to Go's use of split stacks), - - reflection use (and by-passing reflection for common types) - - recursion implications - - zero-copy mode (encoding/decoding to byte slice without using temp buffers) - - Correct. - Care was taken to precisely handle corner cases like: - overflows, nil maps and slices, nil value in stream, etc. - - Efficient zero-copying into temporary byte buffers - when encoding into or decoding from a byte slice. - - Standard field renaming via tags - - Encoding from any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Decoding into pointer to any non-nil typed value - (struct, slice, map, int, float32, bool, string, reflect.Value, etc) - - Supports extension functions to handle the encode/decode of custom types - - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler - - Schema-less decoding - (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Msgpack Specific: - - Provides extension functions to handle spec-defined extensions (binary, timestamp) - - Options to resolve ambiguities in handling raw bytes (as string or []byte) - during schema-less decoding (decoding into a nil interface{}) - - RPC Server/Client Codec for msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - - Fast Paths for some container types: - For some container types, we circumvent reflection and its associated overhead - and allocation costs, and encode/decode directly. These types are: - []interface{} - []int - []string - map[interface{}]interface{} - map[int]interface{} - map[string]interface{} - -## Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -## RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -## Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -## Representative Benchmark Results - -A sample run of benchmark using "go test -bi -bench=. -benchmem": - - /proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT) - - .............................................. - BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT - To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=." - Benchmark: - Struct recursive Depth: 1 - ApproxDeepSize Of benchmark Struct: 4694 bytes - Benchmark One-Pass Run: - v-msgpack: len: 1600 bytes - bson: len: 3025 bytes - msgpack: len: 1560 bytes - binc: len: 1187 bytes - gob: len: 1972 bytes - json: len: 2538 bytes - .............................................. - PASS - Benchmark__Msgpack____Encode 50000 54359 ns/op 14953 B/op 83 allocs/op - Benchmark__Msgpack____Decode 10000 106531 ns/op 14990 B/op 410 allocs/op - Benchmark__Binc_NoSym_Encode 50000 53956 ns/op 14966 B/op 83 allocs/op - Benchmark__Binc_NoSym_Decode 10000 103751 ns/op 14529 B/op 386 allocs/op - Benchmark__Binc_Sym___Encode 50000 65961 ns/op 17130 B/op 88 allocs/op - Benchmark__Binc_Sym___Decode 10000 106310 ns/op 15857 B/op 287 allocs/op - Benchmark__Gob________Encode 10000 135944 ns/op 21189 B/op 237 allocs/op - Benchmark__Gob________Decode 5000 405390 ns/op 83460 B/op 1841 allocs/op - Benchmark__Json_______Encode 20000 79412 ns/op 13874 B/op 102 allocs/op - Benchmark__Json_______Decode 10000 247979 ns/op 14202 B/op 493 allocs/op - Benchmark__Bson_______Encode 10000 121762 ns/op 27814 B/op 514 allocs/op - Benchmark__Bson_______Decode 10000 162126 ns/op 16514 B/op 789 allocs/op - Benchmark__VMsgpack___Encode 50000 69155 ns/op 12370 B/op 344 allocs/op - Benchmark__VMsgpack___Decode 10000 151609 ns/op 20307 B/op 571 allocs/op - ok ugorji.net/codec 30.827s - -To run full benchmark suite (including against vmsgpack and bson), -see notes in ext\_dep\_test.go - diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/binc.go b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go index 2bb5e8fee8548..fd9f489063a6a 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/binc.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go @@ -1,20 +1,16 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec import ( "math" - // "reflect" - // "sync/atomic" + "reflect" "time" - //"fmt" ) const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. -//var _ = fmt.Printf - // vd as low 4 bits (there are 16 slots) const ( bincVdSpecial byte = iota @@ -59,31 +55,79 @@ const ( // others not currently supported ) +func bincdesc(vd, vs byte) string { + switch vd { + case bincVdSpecial: + switch vs { + case bincSpNil: + return "nil" + case bincSpFalse: + return "false" + case bincSpTrue: + return "true" + case bincSpNan, bincSpPosInf, bincSpNegInf, bincSpZeroFloat: + return "float" + case bincSpZero: + return "uint" + case bincSpNegOne: + return "int" + default: + return "unknown" + } + case bincVdSmallInt, bincVdPosInt: + return "uint" + case bincVdNegInt: + return "int" + case bincVdFloat: + return "float" + case bincVdSymbol: + return "string" + case bincVdString: + return "string" + case bincVdByteArray: + return "bytes" + case bincVdTimestamp: + return "time" + case bincVdCustomExt: + return "ext" + case bincVdArray: + return "array" + case bincVdMap: + return "map" + default: + return "unknown" + } +} + type bincEncDriver struct { - w encWriter + e *Encoder + h *BincHandle + w *encWriterSwitch m map[string]uint16 // symbols - s uint32 // symbols sequencer - b [8]byte + b [16]byte // scratch, used for encoding numbers - bigendian style + s uint16 // symbols sequencer + // c containerState + encDriverTrackContainerWriter + noBuiltInTypes + // encNoSeparator + _ [1]uint64 // padding } -func (e *bincEncDriver) isBuiltinType(rt uintptr) bool { - return rt == timeTypId +func (e *bincEncDriver) EncodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) } -func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) { - switch rt { - case timeTypId: - bs := encodeTime(v.(time.Time)) +func (e *bincEncDriver) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + } else { + bs := bincEncodeTime(t) e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) e.w.writeb(bs) } } -func (e *bincEncDriver) encodeNil() { - e.w.writen1(bincVdSpecial<<4 | bincSpNil) -} - -func (e *bincEncDriver) encodeBool(b bool) { +func (e *bincEncDriver) EncodeBool(b bool) { if b { e.w.writen1(bincVdSpecial<<4 | bincSpTrue) } else { @@ -91,21 +135,21 @@ func (e *bincEncDriver) encodeBool(b bool) { } } -func (e *bincEncDriver) encodeFloat32(f float32) { +func (e *bincEncDriver) EncodeFloat32(f float32) { if f == 0 { e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) return } e.w.writen1(bincVdFloat<<4 | bincFlBin32) - e.w.writeUint32(math.Float32bits(f)) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) } -func (e *bincEncDriver) encodeFloat64(f float64) { +func (e *bincEncDriver) EncodeFloat64(f float64) { if f == 0 { e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) return } - bigen.PutUint64(e.b[:], math.Float64bits(f)) + bigen.PutUint64(e.b[:8], math.Float64bits(f)) if bincDoPrune { i := 7 for ; i >= 0 && (e.b[i] == 0); i-- { @@ -119,7 +163,7 @@ func (e *bincEncDriver) encodeFloat64(f float64) { } } e.w.writen1(bincVdFloat<<4 | bincFlBin64) - e.w.writeb(e.b[:]) + e.w.writeb(e.b[:8]) } func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { @@ -138,64 +182,71 @@ func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) } } -func (e *bincEncDriver) encodeInt(v int64) { - const nbd byte = bincVdNegInt << 4 - switch { - case v >= 0: +func (e *bincEncDriver) EncodeInt(v int64) { + // const nbd byte = bincVdNegInt << 4 + if v >= 0 { e.encUint(bincVdPosInt<<4, true, uint64(v)) - case v == -1: + } else if v == -1 { e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) - default: + } else { e.encUint(bincVdNegInt<<4, false, uint64(-v)) } } -func (e *bincEncDriver) encodeUint(v uint64) { +func (e *bincEncDriver) EncodeUint(v uint64) { e.encUint(bincVdPosInt<<4, true, v) } func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { - switch { - case v == 0: + if v == 0 { e.w.writen1(bincVdSpecial<<4 | bincSpZero) - case pos && v >= 1 && v <= 16: + } else if pos && v >= 1 && v <= 16 { e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) - case v <= math.MaxUint8: + } else if v <= math.MaxUint8 { e.w.writen2(bd|0x0, byte(v)) - case v <= math.MaxUint16: + } else if v <= math.MaxUint16 { e.w.writen1(bd | 0x01) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { e.encIntegerPrune(bd, pos, v, 4) - default: + } else { e.encIntegerPrune(bd, pos, v, 8) } } +func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { e.encLen(bincVdCustomExt<<4, uint64(length)) e.w.writen1(xtag) } -func (e *bincEncDriver) encodeArrayPreamble(length int) { +func (e *bincEncDriver) WriteArrayStart(length int) { e.encLen(bincVdArray<<4, uint64(length)) + e.c = containerArrayStart } -func (e *bincEncDriver) encodeMapPreamble(length int) { +func (e *bincEncDriver) WriteMapStart(length int) { e.encLen(bincVdMap<<4, uint64(length)) + e.c = containerMapStart } -func (e *bincEncDriver) encodeString(c charEncoding, v string) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writestr(v) - } -} - -func (e *bincEncDriver) encodeSymbol(v string) { +func (e *bincEncDriver) EncodeSymbol(v string) { // if WriteSymbolsNoRefs { - // e.encodeString(c_UTF8, v) + // e.encodeString(cUTF8, v) // return // } @@ -204,12 +255,11 @@ func (e *bincEncDriver) encodeSymbol(v string) { //(bd with embedded length, and single byte for string val). l := len(v) - switch l { - case 0: - e.encBytesLen(c_UTF8, 0) + if l == 0 { + e.encBytesLen(cUTF8, 0) return - case 1: - e.encBytesLen(c_UTF8, 1) + } else if l == 1 { + e.encBytesLen(cUTF8, 1) e.w.writen1(v[0]) return } @@ -222,45 +272,72 @@ func (e *bincEncDriver) encodeSymbol(v string) { e.w.writen2(bincVdSymbol<<4, byte(ui)) } else { e.w.writen1(bincVdSymbol<<4 | 0x8) - e.w.writeUint16(ui) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) } } else { e.s++ - ui = uint16(e.s) + ui = e.s //ui = uint16(atomic.AddUint32(&e.s, 1)) e.m[v] = ui var lenprec uint8 - switch { - case l <= math.MaxUint8: + if l <= math.MaxUint8 { // lenprec = 0 - case l <= math.MaxUint16: + } else if l <= math.MaxUint16 { lenprec = 1 - case int64(l) <= math.MaxUint32: + } else if int64(l) <= math.MaxUint32 { lenprec = 2 - default: + } else { lenprec = 3 } if ui <= math.MaxUint8 { e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) } else { e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) - e.w.writeUint16(ui) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) } - switch lenprec { - case 0: + if lenprec == 0 { e.w.writen1(byte(l)) - case 1: - e.w.writeUint16(uint16(l)) - case 2: - e.w.writeUint32(uint32(l)) - default: - e.w.writeUint64(uint64(l)) + } else if lenprec == 1 { + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) + } else if lenprec == 2 { + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) + } else { + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) } e.w.writestr(v) } } -func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { +func (e *bincEncDriver) EncodeString(c charEncoding, v string) { + if e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 0 || e.h.AsSymbols == 1) { + e.EncodeSymbol(v) + return + } + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeStringEnc(c charEncoding, v string) { + if e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 0 || e.h.AsSymbols == 1) { + e.EncodeSymbol(v) + return + } + l := uint64(len(v)) + e.encLen(bincVdString<<4, l) // e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } + +} + +func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + if v == nil { + e.EncodeNil() + return + } l := uint64(len(v)) e.encBytesLen(c, l) if l > 0 { @@ -268,9 +345,21 @@ func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { } } +func (e *bincEncDriver) EncodeStringBytesRaw(v []byte) { + if v == nil { + e.EncodeNil() + return + } + l := uint64(len(v)) + e.encLen(bincVdByteArray<<4, l) // e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { //TODO: support bincUnicodeOther (for now, just use string or bytearray) - if c == c_RAW { + if c == cRAW { e.encLen(bincVdByteArray<<4, length) } else { e.encLen(bincVdString<<4, length) @@ -286,93 +375,90 @@ func (e *bincEncDriver) encLen(bd byte, l uint64) { } func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { - switch { - case v <= math.MaxUint8: + if v <= math.MaxUint8 { e.w.writen2(bd, byte(v)) - case v <= math.MaxUint16: + } else if v <= math.MaxUint16 { e.w.writen1(bd | 0x01) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { e.w.writen1(bd | 0x02) - e.w.writeUint32(uint32(v)) - default: + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { e.w.writen1(bd | 0x03) - e.w.writeUint64(uint64(v)) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) } } //------------------------------------ +type bincDecSymbol struct { + s string + b []byte + i uint16 +} + type bincDecDriver struct { - r decReader + decDriverNoopContainerReader + noBuiltInTypes + + d *Decoder + h *BincHandle + r *decReaderSwitch + br bool // bytes reader bdRead bool - bdType valueType bd byte vd byte vs byte - b [8]byte - m map[uint32]string // symbols (use uint32 as key, as map optimizes for it) + _ [3]byte // padding + // linear searching on this slice is ok, + // because we typically expect < 32 symbols in each stream. + s []bincDecSymbol + + // noStreamingCodec + // decNoSeparator + + b [(8 + 1) * 8]byte // scratch } -func (d *bincDecDriver) initReadNext() { - if d.bdRead { - return - } +func (d *bincDecDriver) readNextBd() { d.bd = d.r.readn1() d.vd = d.bd >> 4 d.vs = d.bd & 0x0f d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *bincDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - switch d.vd { - case bincVdSpecial: - switch d.vs { - case bincSpNil: - d.bdType = valueTypeNil - case bincSpFalse, bincSpTrue: - d.bdType = valueTypeBool - case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat: - d.bdType = valueTypeFloat - case bincSpZero: - d.bdType = valueTypeUint - case bincSpNegOne: - d.bdType = valueTypeInt - default: - decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs) - } - case bincVdSmallInt: - d.bdType = valueTypeUint - case bincVdPosInt: - d.bdType = valueTypeUint - case bincVdNegInt: - d.bdType = valueTypeInt - case bincVdFloat: - d.bdType = valueTypeFloat - case bincVdString: - d.bdType = valueTypeString - case bincVdSymbol: - d.bdType = valueTypeSymbol - case bincVdByteArray: - d.bdType = valueTypeBytes - case bincVdTimestamp: - d.bdType = valueTypeTimestamp - case bincVdCustomExt: - d.bdType = valueTypeExt - case bincVdArray: - d.bdType = valueTypeArray - case bincVdMap: - d.bdType = valueTypeMap - default: - decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd) - } +} + +func (d *bincDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false } - return d.bdType } -func (d *bincDecDriver) tryDecodeAsNil() bool { +func (d *bincDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdSpecial && d.vs == bincSpNil { + return valueTypeNil + } else if d.vd == bincVdByteArray { + return valueTypeBytes + } else if d.vd == bincVdString { + return valueTypeString + } else if d.vd == bincVdArray { + return valueTypeArray + } else if d.vd == bincVdMap { + return valueTypeMap + } + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset +} + +func (d *bincDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } if d.bd == bincVdSpecial<<4|bincSpNil { d.bdRead = false return true @@ -380,24 +466,24 @@ func (d *bincDecDriver) tryDecodeAsNil() bool { return false } -func (d *bincDecDriver) isBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) { - switch rt { - case timeTypId: - if d.vd != bincVdTimestamp { - decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) - } - tt, err := decodeTime(d.r.readn(int(d.vs))) - if err != nil { - panic(err) - } - var vt *time.Time = v.(*time.Time) - *vt = tt +func (d *bincDecDriver) DecodeTime() (t time.Time) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { d.bdRead = false + return } + if d.vd != bincVdTimestamp { + d.d.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return + } + t, err := bincDecodeTime(d.r.readx(uint(d.vs))) + if err != nil { + panic(err) + } + d.bdRead = false + return } func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { @@ -406,7 +492,8 @@ func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { } else { l := d.r.readn1() if l > 8 { - decErr("At most 8 bytes used to represent float. Received: %v bytes", l) + d.d.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", l) + return } for i := l; i < 8; i++ { d.b[i] = 0 @@ -416,16 +503,17 @@ func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { } func (d *bincDecDriver) decFloat() (f float64) { - //if true { f = math.Float64frombits(d.r.readUint64()); break; } - switch vs := d.vs; vs & 0x7 { - case bincFlBin32: - d.decFloatPre(vs, 4) + //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } + if x := d.vs & 0x7; x == bincFlBin32 { + d.decFloatPre(d.vs, 4) f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) - case bincFlBin64: - d.decFloatPre(vs, 8) + } else if x == bincFlBin64 { + d.decFloatPre(d.vs, 8) f = math.Float64frombits(bigen.Uint64(d.b[0:8])) - default: - decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) + } else { + d.d.errorf("read float - only float32 and float64 are supported - %s %x-%x/%s", + msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } return } @@ -436,130 +524,148 @@ func (d *bincDecDriver) decUint() (v uint64) { case 0: v = uint64(d.r.readn1()) case 1: - d.r.readb(d.b[6:]) - v = uint64(bigen.Uint16(d.b[6:])) + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) case 2: d.b[4] = 0 - d.r.readb(d.b[5:]) - v = uint64(bigen.Uint32(d.b[4:])) + d.r.readb(d.b[5:8]) + v = uint64(bigen.Uint32(d.b[4:8])) case 3: - d.r.readb(d.b[4:]) - v = uint64(bigen.Uint32(d.b[4:])) + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) case 4, 5, 6: - lim := int(7 - d.vs) - d.r.readb(d.b[lim:]) - for i := 0; i < lim; i++ { + lim := 7 - d.vs + d.r.readb(d.b[lim:8]) + for i := uint8(0); i < lim; i++ { d.b[i] = 0 } - v = uint64(bigen.Uint64(d.b[:])) + v = uint64(bigen.Uint64(d.b[:8])) case 7: - d.r.readb(d.b[:]) - v = uint64(bigen.Uint64(d.b[:])) + d.r.readb(d.b[:8]) + v = uint64(bigen.Uint64(d.b[:8])) default: - decErr("unsigned integers with greater than 64 bits of precision not supported") + d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") + return } return } -func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) { - switch d.vd { - case bincVdPosInt: +func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdPosInt { ui = d.decUint() - i = int64(ui) - case bincVdNegInt: + } else if vd == bincVdNegInt { ui = d.decUint() - i = -(int64(ui)) neg = true - case bincVdSmallInt: - i = int64(d.vs) + 1 + } else if vd == bincVdSmallInt { ui = uint64(d.vs) + 1 - case bincVdSpecial: - switch d.vs { - case bincSpZero: + } else if vd == bincVdSpecial { + if vs == bincSpZero { //i = 0 - case bincSpNegOne: + } else if vs == bincSpNegOne { neg = true ui = 1 - i = -1 - default: - decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs) + } else { + d.d.errorf("integer decode fails - invalid special value from descriptor %x-%x/%s", + d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } - default: - decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + } else { + d.d.errorf("integer can only be decoded from int/uint. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + return } return } -func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) { - _, i, _ = d.decIntAny() - checkOverflow(0, i, bitsize) +func (d *bincDecDriver) DecodeInt64() (i int64) { + ui, neg := d.decCheckInteger() + i = chkOvf.SignedIntV(ui) + if neg { + i = -i + } d.bdRead = false return } -func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) { - ui, i, neg := d.decIntAny() +func (d *bincDecDriver) DecodeUint64() (ui uint64) { + ui, neg := d.decCheckInteger() if neg { - decErr("Assigning negative signed value: %v, to unsigned type", i) + d.d.errorf("assigning negative signed value to unsigned integer type") + return } - checkOverflow(ui, 0, bitsize) d.bdRead = false return } -func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.vd { - case bincVdSpecial: +func (d *bincDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdSpecial { d.bdRead = false - switch d.vs { - case bincSpNan: + if vs == bincSpNan { return math.NaN() - case bincSpPosInf: + } else if vs == bincSpPosInf { return math.Inf(1) - case bincSpZeroFloat, bincSpZero: + } else if vs == bincSpZeroFloat || vs == bincSpZero { return - case bincSpNegInf: + } else if vs == bincSpNegInf { return math.Inf(-1) - default: - decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) + } else { + d.d.errorf("float - invalid special value from descriptor %x-%x/%s", + d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } - case bincVdFloat: + } else if vd == bincVdFloat { f = d.decFloat() - default: - _, i, _ := d.decIntAny() - f = float64(i) + } else { + f = float64(d.DecodeInt64()) } - checkOverflowFloat32(f, chkOverflow32) d.bdRead = false return } // bool can be decoded from bool only (single byte). -func (d *bincDecDriver) decodeBool() (b bool) { - switch d.bd { - case (bincVdSpecial | bincSpFalse): +func (d *bincDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { // b = false - case (bincVdSpecial | bincSpTrue): + } else if bd == (bincVdSpecial | bincSpTrue) { b = true - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } else { + d.d.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } d.bdRead = false return } -func (d *bincDecDriver) readMapLen() (length int) { +func (d *bincDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } if d.vd != bincVdMap { - decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) + d.d.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } length = d.decLen() d.bdRead = false return } -func (d *bincDecDriver) readArrayLen() (length int) { +func (d *bincDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } if d.vd != bincVdArray { - decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) + d.d.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } length = d.decLen() d.bdRead = false @@ -567,189 +673,299 @@ func (d *bincDecDriver) readArrayLen() (length int) { } func (d *bincDecDriver) decLen() int { - if d.vs <= 3 { - return int(d.decUint()) + if d.vs > 3 { + return int(d.vs - 4) } - return int(d.vs - 4) + return int(d.decLenNumber()) } -func (d *bincDecDriver) decodeString() (s string) { +func (d *bincDecDriver) decLenNumber() (v uint64) { + if x := d.vs; x == 0 { + v = uint64(d.r.readn1()) + } else if x == 1 { + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + } else if x == 2 { + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + } else { + d.r.readb(d.b[:8]) + v = bigen.Uint64(d.b[:8]) + } + return +} + +func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) ( + bs2 []byte, s string) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return + } + var slen = -1 + // var ok bool switch d.vd { case bincVdString, bincVdByteArray: - if length := d.decLen(); length > 0 { - s = string(d.r.readn(length)) + slen = d.decLen() + if zerocopy { + if d.br { + bs2 = d.r.readx(uint(slen)) + } else if len(bs) == 0 { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:]) + } else { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) + } + } else { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) + } + if withString { + s = string(bs2) } case bincVdSymbol: + // zerocopy doesn't apply for symbols, + // as the values must be stored in a table for later use. + // //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, //extract symbol //if containsStringVal, read it and put in map //else look in map for string value - var symbol uint32 + var symbol uint16 vs := d.vs - //fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4) if vs&0x8 == 0 { - symbol = uint32(d.r.readn1()) + symbol = uint16(d.r.readn1()) } else { - symbol = uint32(d.r.readUint16()) + symbol = uint16(bigen.Uint16(d.r.readx(2))) } - if d.m == nil { - d.m = make(map[uint32]string, 16) + if d.s == nil { + d.s = make([]bincDecSymbol, 0, 16) } if vs&0x4 == 0 { - s = d.m[symbol] + for i := range d.s { + j := &d.s[i] + if j.i == symbol { + bs2 = j.b + if withString { + if j.s == "" && bs2 != nil { + j.s = string(bs2) + } + s = j.s + } + break + } + } } else { - var slen int switch vs & 0x3 { case 0: slen = int(d.r.readn1()) case 1: - slen = int(d.r.readUint16()) + slen = int(bigen.Uint16(d.r.readx(2))) case 2: - slen = int(d.r.readUint32()) + slen = int(bigen.Uint32(d.r.readx(4))) case 3: - slen = int(d.r.readUint64()) + slen = int(bigen.Uint64(d.r.readx(8))) } - s = string(d.r.readn(slen)) - d.m[symbol] = s + // since using symbols, do not store any part of + // the parameter bs in the map, as it might be a shared buffer. + // bs2 = decByteSlice(d.r, slen, bs) + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil) + if withString { + s = string(bs2) + } + d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) } default: - decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, bincVdSymbol, d.vd) + d.d.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } d.bdRead = false return } -func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { +func (d *bincDecDriver) DecodeString() (s string) { + // DecodeBytes does not accommodate symbols, whose impl stores string version in map. + // Use decStringAndBytes directly. + // return string(d.DecodeBytes(d.b[:], true, true)) + _, s = d.decStringAndBytes(d.b[:], true, true) + return +} + +func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) { + s, _ = d.decStringAndBytes(d.b[:], false, true) + return +} + +func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return nil + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.vd == bincVdArray { + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } var clen int - switch d.vd { - case bincVdString, bincVdByteArray: + if d.vd == bincVdString || d.vd == bincVdByteArray { clen = d.decLen() - default: - decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, d.vd) - } - if clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true - } - d.r.readb(bs) + } else { + d.d.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(uint(clen)) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } return } -func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - switch d.vd { - case bincVdCustomExt: +func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdCustomExt { l := d.decLen() xtag = d.r.readn1() if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + d.d.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag) + return } - xbs = d.r.readn(l) - case bincVdByteArray: - xbs, _ = d.decodeBytes(nil) - default: - decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) + if d.br { + xbs = d.r.readx(uint(l)) + } else { + xbs = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) + } + } else if d.vd == bincVdByteArray { + xbs = d.DecodeBytes(nil, true) + } else { + d.d.errorf("ext - expecting extensions or byte array - %s %x-%x/%s", + msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return } d.bdRead = false return } -func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() +func (d *bincDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.naked() + var decodeFurther bool switch d.vd { case bincVdSpecial: switch d.vs { case bincSpNil: - vt = valueTypeNil + n.v = valueTypeNil case bincSpFalse: - vt = valueTypeBool - v = false + n.v = valueTypeBool + n.b = false case bincSpTrue: - vt = valueTypeBool - v = true + n.v = valueTypeBool + n.b = true case bincSpNan: - vt = valueTypeFloat - v = math.NaN() + n.v = valueTypeFloat + n.f = math.NaN() case bincSpPosInf: - vt = valueTypeFloat - v = math.Inf(1) + n.v = valueTypeFloat + n.f = math.Inf(1) case bincSpNegInf: - vt = valueTypeFloat - v = math.Inf(-1) + n.v = valueTypeFloat + n.f = math.Inf(-1) case bincSpZeroFloat: - vt = valueTypeFloat - v = float64(0) + n.v = valueTypeFloat + n.f = float64(0) case bincSpZero: - vt = valueTypeUint - v = int64(0) // int8(0) + n.v = valueTypeUint + n.u = uint64(0) // int8(0) case bincSpNegOne: - vt = valueTypeInt - v = int64(-1) // int8(-1) + n.v = valueTypeInt + n.i = int64(-1) // int8(-1) default: - decErr("decodeNaked: Unrecognized special value 0x%x", d.vs) + d.d.errorf("cannot infer value - unrecognized special value from descriptor %x-%x/%s", + d.vd, d.vs, bincdesc(d.vd, d.vs)) } case bincVdSmallInt: - vt = valueTypeUint - v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + n.v = valueTypeUint + n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 case bincVdPosInt: - vt = valueTypeUint - v = d.decUint() + n.v = valueTypeUint + n.u = d.decUint() case bincVdNegInt: - vt = valueTypeInt - v = -(int64(d.decUint())) + n.v = valueTypeInt + n.i = -(int64(d.decUint())) case bincVdFloat: - vt = valueTypeFloat - v = d.decFloat() + n.v = valueTypeFloat + n.f = d.decFloat() case bincVdSymbol: - vt = valueTypeSymbol - v = d.decodeString() + n.v = valueTypeSymbol + n.s = d.DecodeString() case bincVdString: - vt = valueTypeString - v = d.decodeString() + n.v = valueTypeString + n.s = d.DecodeString() case bincVdByteArray: - vt = valueTypeBytes - v, _ = d.decodeBytes(nil) + decNakedReadRawBytes(d, d.d, n, d.h.RawToString) case bincVdTimestamp: - vt = valueTypeTimestamp - tt, err := decodeTime(d.r.readn(int(d.vs))) + n.v = valueTypeTime + tt, err := bincDecodeTime(d.r.readx(uint(d.vs))) if err != nil { panic(err) } - v = tt + n.t = tt case bincVdCustomExt: - vt = valueTypeExt + n.v = valueTypeExt l := d.decLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(l) - v = &re - vt = valueTypeExt + n.u = uint64(d.r.readn1()) + if d.br { + n.l = d.r.readx(uint(l)) + } else { + n.l = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) + } case bincVdArray: - vt = valueTypeArray + n.v = valueTypeArray decodeFurther = true case bincVdMap: - vt = valueTypeMap + n.v = valueTypeMap decodeFurther = true default: - decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) + d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) } if !decodeFurther { d.bdRead = false } - return + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } } //------------------------------------ @@ -764,23 +980,224 @@ func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurthe // extended precision and decimal IEEE 754 floats are unsupported. // - Only UTF-8 strings supported. // Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +// //Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. type BincHandle struct { BasicHandle + binaryEncodingType + noElemSeparators + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Values: + // - 0: default: library uses best judgement + // - 1: use symbols + // - 2: do not use symbols + AsSymbols uint8 + + // AsSymbols: may later on introduce more options ... + // - m: map keys + // - s: struct fields + // - n: none + // - a: all: same as m, s, ... + + // _ [1]uint64 // padding +} + +// Name returns the name of the handle: binc +func (h *BincHandle) Name() string { return "binc" } + +// SetBytesExt sets an extension +func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) +} + +func (h *BincHandle) newEncDriver(e *Encoder) encDriver { + return &bincEncDriver{e: e, h: h, w: e.w} +} + +func (h *BincHandle) newDecDriver(d *Decoder) decDriver { + return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes} } -func (h *BincHandle) newEncDriver(w encWriter) encDriver { - return &bincEncDriver{w: w} +func (e *bincEncDriver) reset() { + e.w = e.e.w + e.s = 0 + e.c = 0 + e.m = nil } -func (h *BincHandle) newDecDriver(r decReader) decDriver { - return &bincDecDriver{r: r} +func (d *bincDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.s = nil + d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 } -func (_ *BincHandle) writeExt() bool { - return true +// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func bincEncodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] } -func (h *BincHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle +// bincDecodeTime decodes a []byte into a time.Time. +func bincDecodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printf.d. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + // i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return } + +var _ decDriver = (*bincDecDriver)(nil) +var _ encDriver = (*bincEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/build.sh b/vendor/github.com/hashicorp/go-msgpack/codec/build.sh new file mode 100644 index 0000000000000..dd79c13f81a26 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/build.sh @@ -0,0 +1,267 @@ +#!/bin/bash + +# Run all the different permutations of all the tests and other things +# This helps ensure that nothing gets broken. + +_tests() { + local gover=$( go version | cut -f 3 -d ' ' ) + # note that codecgen requires fastpath, so you cannot do "codecgen notfastpath" + local a=( "" "safe" "notfastpath" "notfastpath safe" "codecgen" "codecgen safe" ) + for i in "${a[@]}" + do + echo ">>>> TAGS: $i" + local i2=${i:-default} + case $gover in + go1.[0-6]*) go vet -printfuncs "errorf" "$@" && + go test ${zargs[*]} -vet off -tags "$i" "$@" ;; + *) go vet -printfuncs "errorf" "$@" && + go test ${zargs[*]} -vet off -tags "alltests $i" -run "Suite" -coverprofile "${i2// /-}.cov.out" "$@" ;; + esac + if [[ "$?" != 0 ]]; then return 1; fi + done + echo "++++++++ TEST SUITES ALL PASSED ++++++++" +} + + +# is a generation needed? +_ng() { + local a="$1" + if [[ ! -e "$a" ]]; then echo 1; return; fi + for i in `ls -1 *.go.tmpl gen.go values_test.go` + do + if [[ "$a" -ot "$i" ]]; then echo 1; return; fi + done +} + +_prependbt() { + cat > ${2} <> ${2} + rm -f ${1} +} + +# _build generates fast-path.go and gen-helper.go. +_build() { + if ! [[ "${zforce}" || $(_ng "fast-path.generated.go") || $(_ng "gen-helper.generated.go") || $(_ng "gen.generated.go") ]]; then return 0; fi + + if [ "${zbak}" ]; then + _zts=`date '+%m%d%Y_%H%M%S'` + _gg=".generated.go" + [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak + [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak + [ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak + fi + rm -f gen-helper.generated.go fast-path.generated.go gen.generated.go \ + *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go + + cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl + cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl + cat >> gen.generated.go <> gen.generated.go < gen-enc-chan.go.tmpl + cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.generated.go < " + fnameOut + " ______") +fin, err := os.Open(fnameIn) +if err != nil { panic(err) } +defer fin.Close() +fout, err := os.Create(fnameOut) +if err != nil { panic(err) } +defer fout.Close() +err = codec.GenInternalGoFile(fin, fout) +if err != nil { panic(err) } +} + +func main() { +run("fast-path.go.tmpl", "fast-path.generated.go") +run("gen-helper.go.tmpl", "gen-helper.generated.go") +run("mammoth-test.go.tmpl", "mammoth_generated_test.go") +run("mammoth2-test.go.tmpl", "mammoth2_generated_test.go") +} +EOF + + sed -e 's+// __DO_NOT_REMOVE__NEEDED_FOR_REPLACING__IMPORT_PATH__FOR_CODEC_BENCH__+import . "github.com/ugorji/go/codec"+' \ + shared_test.go > bench/shared_test.go + + # explicitly return 0 if this passes, else return 1 + go run -tags "notfastpath safe codecgen.exec" gen-from-tmpl.generated.go && + rm -f gen-from-tmpl.*generated.go && + return 0 + return 1 +} + +_codegenerators() { + local c5="_generated_test.go" + local c7="$PWD/codecgen" + local c8="$c7/__codecgen" + local c9="codecgen-scratch.go" + + if ! [[ $zforce || $(_ng "values_codecgen${c5}") ]]; then return 0; fi + + # Note: ensure you run the codecgen for this codebase/directory i.e. ./codecgen/codecgen + true && + echo "codecgen ... " && + if [[ $zforce || ! -f "$c8" || "$c7/gen.go" -nt "$c8" ]]; then + echo "rebuilding codecgen ... " && ( cd codecgen && go build -o $c8 ${zargs[*]} . ) + fi && + $c8 -rt codecgen -t 'codecgen generated' -o values_codecgen${c5} -d 19780 $zfin $zfin2 && + cp mammoth2_generated_test.go $c9 && + $c8 -t '!notfastpath' -o mammoth2_codecgen${c5} -d 19781 mammoth2_generated_test.go && + rm -f $c9 && + echo "generators done!" +} + +_prebuild() { + echo "prebuild: zforce: $zforce" + local d="$PWD" + zfin="test_values.generated.go" + zfin2="test_values_flex.generated.go" + zpkg="github.com/ugorji/go/codec" + # zpkg=${d##*/src/} + # zgobase=${d%%/src/*} + # rm -f *_generated_test.go + rm -f codecgen-*.go && + _build && + cp $d/values_test.go $d/$zfin && + cp $d/values_flex_test.go $d/$zfin2 && + _codegenerators && + if [[ "$(type -t _codegenerators_external )" = "function" ]]; then _codegenerators_external ; fi && + if [[ $zforce ]]; then go install ${zargs[*]} .; fi && + echo "prebuild done successfully" + rm -f $d/$zfin $d/$zfin2 + unset zfin zfin2 zpkg +} + +_make() { + zforce=1 + (cd codecgen && go install ${zargs[*]} .) && _prebuild && go install ${zargs[*]} . + unset zforce +} + +_clean() { + rm -f gen-from-tmpl.*generated.go \ + codecgen-*.go \ + test_values.generated.go test_values_flex.generated.go +} + +_release() { + local reply + read -p "Pre-release validation takes a few minutes and MUST be run from within GOPATH/src. Confirm y/n? " -n 1 -r reply + echo + if [[ ! $reply =~ ^[Yy]$ ]]; then return 1; fi + + # expects GOROOT, GOROOT_BOOTSTRAP to have been set. + if [[ -z "${GOROOT// }" || -z "${GOROOT_BOOTSTRAP// }" ]]; then return 1; fi + # (cd $GOROOT && git checkout -f master && git pull && git reset --hard) + (cd $GOROOT && git pull) + local f=`pwd`/make.release.out + cat > $f <>$f + if [[ "$i" != "master" ]]; then i="release-branch.go$i"; fi + (false || + (echo "===== BUILDING GO SDK for branch: $i ... =====" && + cd $GOROOT && + git checkout -f $i && git reset --hard && git clean -f . && + cd src && ./make.bash >>$f 2>&1 && sleep 1 ) ) && + echo "===== GO SDK BUILD DONE =====" && + _prebuild && + echo "===== PREBUILD DONE with exit: $? =====" && + _tests "$@" + if [[ "$?" != 0 ]]; then return 1; fi + done + unset zforce + echo "++++++++ RELEASE TEST SUITES ALL PASSED ++++++++" +} + +_usage() { + cat < [tests, make, prebuild (force) (external), inlining diagnostics, mid-stack inlining, race detector] + -v -> verbose +EOF + if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi +} + +_main() { + if [[ -z "$1" ]]; then _usage; return 1; fi + local x + unset zforce + zargs=() + zbenchflags="" + OPTIND=1 + while getopts ":ctmnrgpfvlzdb:" flag + do + case "x$flag" in + 'xf') zforce=1 ;; + 'xv') zverbose=1 ;; + 'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;; + 'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;; + 'xd') zargs+=("-race") ;; + 'xb') x='b'; zbenchflags=${OPTARG} ;; + x\?) _usage; return 1 ;; + *) x=$flag ;; + esac + done + shift $((OPTIND-1)) + # echo ">>>> _main: extra args: $@" + case "x$x" in + 'xt') _tests "$@" ;; + 'xm') _make "$@" ;; + 'xr') _release "$@" ;; + 'xg') _go ;; + 'xp') _prebuild "$@" ;; + 'xc') _clean "$@" ;; + 'xz') _analyze "$@" ;; + 'xb') _bench "$@" ;; + esac + unset zforce zargs zbenchflags +} + +[ "." = `dirname $0` ] && _main "$@" + diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/cbor.go b/vendor/github.com/hashicorp/go-msgpack/codec/cbor.go new file mode 100644 index 0000000000000..7833f9d68f090 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/cbor.go @@ -0,0 +1,767 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" + "time" +) + +const ( + cborMajorUint byte = iota + cborMajorNegInt + cborMajorBytes + cborMajorText + cborMajorArray + cborMajorMap + cborMajorTag + cborMajorOther +) + +const ( + cborBdFalse byte = 0xf4 + iota + cborBdTrue + cborBdNil + cborBdUndefined + cborBdExt + cborBdFloat16 + cborBdFloat32 + cborBdFloat64 +) + +const ( + cborBdIndefiniteBytes byte = 0x5f + cborBdIndefiniteString byte = 0x7f + cborBdIndefiniteArray byte = 0x9f + cborBdIndefiniteMap byte = 0xbf + cborBdBreak byte = 0xff +) + +// These define some in-stream descriptors for +// manual encoding e.g. when doing explicit indefinite-length +const ( + CborStreamBytes byte = 0x5f + CborStreamString byte = 0x7f + CborStreamArray byte = 0x9f + CborStreamMap byte = 0xbf + CborStreamBreak byte = 0xff +) + +const ( + cborBaseUint byte = 0x00 + cborBaseNegInt byte = 0x20 + cborBaseBytes byte = 0x40 + cborBaseString byte = 0x60 + cborBaseArray byte = 0x80 + cborBaseMap byte = 0xa0 + cborBaseTag byte = 0xc0 + cborBaseSimple byte = 0xe0 +) + +func cbordesc(bd byte) string { + switch bd { + case cborBdNil: + return "nil" + case cborBdFalse: + return "false" + case cborBdTrue: + return "true" + case cborBdFloat16, cborBdFloat32, cborBdFloat64: + return "float" + case cborBdIndefiniteBytes: + return "bytes*" + case cborBdIndefiniteString: + return "string*" + case cborBdIndefiniteArray: + return "array*" + case cborBdIndefiniteMap: + return "map*" + default: + switch { + case bd >= cborBaseUint && bd < cborBaseNegInt: + return "(u)int" + case bd >= cborBaseNegInt && bd < cborBaseBytes: + return "int" + case bd >= cborBaseBytes && bd < cborBaseString: + return "bytes" + case bd >= cborBaseString && bd < cborBaseArray: + return "string" + case bd >= cborBaseArray && bd < cborBaseMap: + return "array" + case bd >= cborBaseMap && bd < cborBaseTag: + return "map" + case bd >= cborBaseTag && bd < cborBaseSimple: + return "ext" + default: + return "unknown" + } + } +} + +// ------------------- + +type cborEncDriver struct { + noBuiltInTypes + encDriverNoopContainerWriter + e *Encoder + w *encWriterSwitch + h *CborHandle + x [8]byte + // _ [3]uint64 // padding +} + +func (e *cborEncDriver) EncodeNil() { + e.w.writen1(cborBdNil) +} + +func (e *cborEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(cborBdTrue) + } else { + e.w.writen1(cborBdFalse) + } +} + +func (e *cborEncDriver) EncodeFloat32(f float32) { + e.w.writen1(cborBdFloat32) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *cborEncDriver) EncodeFloat64(f float64) { + e.w.writen1(cborBdFloat64) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *cborEncDriver) encUint(v uint64, bd byte) { + if v <= 0x17 { + e.w.writen1(byte(v) + bd) + } else if v <= math.MaxUint8 { + e.w.writen2(bd+0x18, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 0x19) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 0x1a) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { + e.w.writen1(bd + 0x1b) + bigenHelper{e.x[:8], e.w}.writeUint64(v) + } +} + +func (e *cborEncDriver) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-1-v), cborBaseNegInt) + } else { + e.encUint(uint64(v), cborBaseUint) + } +} + +func (e *cborEncDriver) EncodeUint(v uint64) { + e.encUint(v, cborBaseUint) +} + +func (e *cborEncDriver) encLen(bd byte, length int) { + e.encUint(uint64(length), bd) +} + +func (e *cborEncDriver) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + } else if e.h.TimeRFC3339 { + e.encUint(0, cborBaseTag) + e.EncodeStringEnc(cUTF8, t.Format(time.RFC3339Nano)) + } else { + e.encUint(1, cborBaseTag) + t = t.UTC().Round(time.Microsecond) + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + if nsec == 0 { + e.EncodeInt(sec) + } else { + e.EncodeFloat64(float64(sec) + float64(nsec)/1e9) + } + } +} + +func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + e.encUint(uint64(xtag), cborBaseTag) + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + e.encUint(uint64(re.Tag), cborBaseTag) + // only encodes re.Value (never re.Data) + // if false && re.Data != nil { + // en.encode(re.Data) + // } else if re.Value != nil { + if re.Value != nil { + en.encode(re.Value) + } else { + e.EncodeNil() + } +} + +func (e *cborEncDriver) WriteArrayStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteArray) + } else { + e.encLen(cborBaseArray, length) + } +} + +func (e *cborEncDriver) WriteMapStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteMap) + } else { + e.encLen(cborBaseMap, length) + } +} + +func (e *cborEncDriver) WriteMapEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriver) WriteArrayEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriver) EncodeString(c charEncoding, v string) { + e.encStringBytesS(cborBaseString, v) +} + +func (e *cborEncDriver) EncodeStringEnc(c charEncoding, v string) { + e.encStringBytesS(cborBaseString, v) +} + +func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + if v == nil { + e.EncodeNil() + } else if c == cRAW { + e.encStringBytesS(cborBaseBytes, stringView(v)) + } else { + e.encStringBytesS(cborBaseString, stringView(v)) + } +} + +func (e *cborEncDriver) EncodeStringBytesRaw(v []byte) { + if v == nil { + e.EncodeNil() + } else { + e.encStringBytesS(cborBaseBytes, stringView(v)) + } +} + +func (e *cborEncDriver) encStringBytesS(bb byte, v string) { + if e.h.IndefiniteLength { + if bb == cborBaseBytes { + e.w.writen1(cborBdIndefiniteBytes) + } else { + e.w.writen1(cborBdIndefiniteString) + } + var vlen uint = uint(len(v)) + blen := vlen / 4 + if blen == 0 { + blen = 64 + } else if blen > 1024 { + blen = 1024 + } + for i := uint(0); i < vlen; { + var v2 string + i2 := i + blen + if i2 < vlen { + v2 = v[i:i2] + } else { + v2 = v[i:] + } + e.encLen(bb, len(v2)) + e.w.writestr(v2) + i = i2 + } + e.w.writen1(cborBdBreak) + } else { + e.encLen(bb, len(v)) + e.w.writestr(v) + } +} + +// ---------------------- + +type cborDecDriver struct { + d *Decoder + h *CborHandle + r *decReaderSwitch + br bool // bytes reader + bdRead bool + bd byte + noBuiltInTypes + // decNoSeparator + decDriverNoopContainerReader + // _ [3]uint64 // padding +} + +func (d *cborDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *cborDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *cborDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil { + return valueTypeNil + } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { + return valueTypeBytes + } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { + return valueTypeString + } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { + return valueTypeArray + } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { + return valueTypeMap + } + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset +} + +func (d *cborDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + // treat Nil and Undefined as nil values + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) CheckBreak() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdBreak { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) decUint() (ui uint64) { + v := d.bd & 0x1f + if v <= 0x17 { + ui = uint64(v) + } else { + if v == 0x18 { + ui = uint64(d.r.readn1()) + } else if v == 0x19 { + ui = uint64(bigen.Uint16(d.r.readx(2))) + } else if v == 0x1a { + ui = uint64(bigen.Uint32(d.r.readx(4))) + } else if v == 0x1b { + ui = uint64(bigen.Uint64(d.r.readx(8))) + } else { + d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd)) + return + } + } + return +} + +func (d *cborDecDriver) decCheckInteger() (neg bool) { + if !d.bdRead { + d.readNextBd() + } + major := d.bd >> 5 + if major == cborMajorUint { + } else if major == cborMajorNegInt { + neg = true + } else { + d.d.errorf("not an integer - invalid major %v from descriptor %x/%s", + major, d.bd, cbordesc(d.bd)) + return + } + return +} + +func (d *cborDecDriver) DecodeInt64() (i int64) { + neg := d.decCheckInteger() + ui := d.decUint() + // check if this number can be converted to an int without overflow + if neg { + i = -(chkOvf.SignedIntV(ui + 1)) + } else { + i = chkOvf.SignedIntV(ui) + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeUint64() (ui uint64) { + if d.decCheckInteger() { + d.d.errorf("assigning negative signed value to unsigned type") + return + } + ui = d.decUint() + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdFloat16 { + f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) + } else if bd == cborBdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if bd == cborBdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else if bd >= cborBaseUint && bd < cborBaseBytes { + f = float64(d.DecodeInt64()) + } else { + d.d.errorf("float only valid from float16/32/64 - invalid descriptor %x/%s", bd, cbordesc(bd)) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *cborDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdTrue { + b = true + } else if bd == cborBdFalse { + } else { + d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd)) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + if d.bd == cborBdIndefiniteMap { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + if d.bd == cborBdIndefiniteArray { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) decLen() int { + return int(d.decUint()) +} + +func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { + d.bdRead = false + for { + if d.CheckBreak() { + break + } + if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { + d.d.errorf("expect bytes/string major type in indefinite string/bytes;"+ + " got major %v from descriptor %x/%x", major, d.bd, cbordesc(d.bd)) + return nil + } + n := d.decLen() + oldLen := len(bs) + newLen := oldLen + n + if newLen > cap(bs) { + bs2 := make([]byte, newLen, 2*cap(bs)+n) + copy(bs2, bs) + bs = bs2 + } else { + bs = bs[:newLen] + } + d.r.readb(bs[oldLen:newLen]) + // bs = append(bs, d.r.readn()...) + d.bdRead = false + } + d.bdRead = false + return bs +} + +func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return nil + } + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + d.bdRead = false + if bs == nil { + if zerocopy { + return d.decAppendIndefiniteBytes(d.d.b[:0]) + } + return d.decAppendIndefiniteBytes(zeroByteSlice) + } + return d.decAppendIndefiniteBytes(bs[:0]) + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(uint(clen)) + } else if len(bs) == 0 { + bs = d.d.b[:] + } + } + return decByteSlice(d.r, clen, d.h.MaxInitLen, bs) +} + +func (d *cborDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.d.b[:], true)) +} + +func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.d.b[:], true) +} + +func (d *cborDecDriver) DecodeTime() (t time.Time) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return + } + xtag := d.decUint() + d.bdRead = false + return d.decodeTime(xtag) +} + +func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) { + if !d.bdRead { + d.readNextBd() + } + switch xtag { + case 0: + var err error + if t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes())); err != nil { + d.d.errorv(err) + } + case 1: + // decode an int64 or a float, and infer time.Time from there. + // for floats, round to microseconds, as that is what is guaranteed to fit well. + switch { + case d.bd == cborBdFloat16, d.bd == cborBdFloat32: + f1, f2 := math.Modf(d.DecodeFloat64()) + t = time.Unix(int64(f1), int64(f2*1e9)) + case d.bd == cborBdFloat64: + f1, f2 := math.Modf(d.DecodeFloat64()) + t = time.Unix(int64(f1), int64(f2*1e9)) + case d.bd >= cborBaseUint && d.bd < cborBaseNegInt, + d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: + t = time.Unix(d.DecodeInt64(), 0) + default: + d.d.errorf("time.Time can only be decoded from a number (or RFC3339 string)") + } + default: + d.d.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag) + } + t = t.UTC().Round(time.Microsecond) + return +} + +func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if !d.bdRead { + d.readNextBd() + } + u := d.decUint() + d.bdRead = false + realxtag = u + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + d.d.decode(&re.Value) + } else if xtag != realxtag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) + return + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.naked() + var decodeFurther bool + + switch d.bd { + case cborBdNil: + n.v = valueTypeNil + case cborBdFalse: + n.v = valueTypeBool + n.b = false + case cborBdTrue: + n.v = valueTypeBool + n.b = true + case cborBdFloat16, cborBdFloat32, cborBdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + case cborBdIndefiniteBytes: + decNakedReadRawBytes(d, d.d, n, d.h.RawToString) + case cborBdIndefiniteString: + n.v = valueTypeString + n.s = d.DecodeString() + case cborBdIndefiniteArray: + n.v = valueTypeArray + decodeFurther = true + case cborBdIndefiniteMap: + n.v = valueTypeMap + decodeFurther = true + default: + switch { + case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt64() + } else { + n.v = valueTypeUint + n.u = d.DecodeUint64() + } + case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: + n.v = valueTypeInt + n.i = d.DecodeInt64() + case d.bd >= cborBaseBytes && d.bd < cborBaseString: + decNakedReadRawBytes(d, d.d, n, d.h.RawToString) + case d.bd >= cborBaseString && d.bd < cborBaseArray: + n.v = valueTypeString + n.s = d.DecodeString() + case d.bd >= cborBaseArray && d.bd < cborBaseMap: + n.v = valueTypeArray + decodeFurther = true + case d.bd >= cborBaseMap && d.bd < cborBaseTag: + n.v = valueTypeMap + decodeFurther = true + case d.bd >= cborBaseTag && d.bd < cborBaseSimple: + n.v = valueTypeExt + n.u = d.decUint() + n.l = nil + if n.u == 0 || n.u == 1 { + d.bdRead = false + n.v = valueTypeTime + n.t = d.decodeTime(n.u) + } + // d.bdRead = false + // d.d.decode(&re.Value) // handled by decode itself. + // decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + return + } + } + + if !decodeFurther { + d.bdRead = false + } +} + +// ------------------------- + +// CborHandle is a Handle for the CBOR encoding format, +// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . +// +// CBOR is comprehensively supported, including support for: +// - indefinite-length arrays/maps/bytes/strings +// - (extension) tags in range 0..0xffff (0 .. 65535) +// - half, single and double-precision floats +// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) +// - nil, true, false, ... +// - arrays and maps, bytes and text strings +// +// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. +// Users can implement them as needed (using SetExt), including spec-documented ones: +// - timestamp, BigNum, BigFloat, Decimals, +// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. +type CborHandle struct { + binaryEncodingType + noElemSeparators + BasicHandle + + // IndefiniteLength=true, means that we encode using indefinitelength + IndefiniteLength bool + + // TimeRFC3339 says to encode time.Time using RFC3339 format. + // If unset, we encode time.Time using seconds past epoch. + TimeRFC3339 bool + + // _ [1]uint64 // padding +} + +// Name returns the name of the handle: cbor +func (h *CborHandle) Name() string { return "cbor" } + +// SetInterfaceExt sets an extension +func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) +} + +func (h *CborHandle) newEncDriver(e *Encoder) encDriver { + return &cborEncDriver{e: e, w: e.w, h: h} +} + +func (h *CborHandle) newDecDriver(d *Decoder) decDriver { + return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *cborEncDriver) reset() { + e.w = e.e.w +} + +func (d *cborDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +var _ decDriver = (*cborDecDriver)(nil) +var _ encDriver = (*cborEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/codecgen.go b/vendor/github.com/hashicorp/go-msgpack/codec/codecgen.go new file mode 100644 index 0000000000000..cc5ecec6dbbac --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/codecgen.go @@ -0,0 +1,13 @@ +// +build codecgen generated + +package codec + +// this file is here, to set the codecgen variable to true +// when the build tag codecgen is set. +// +// this allows us do specific things e.g. skip missing fields tests, +// when running in codecgen mode. + +func init() { + codecgen = true +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/decode.go b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go index 851b54ac7e776..27d362012d146 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/decode.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go @@ -1,578 +1,2439 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec import ( + "encoding" + "errors" + "fmt" "io" "reflect" - // "runtime/debug" + "runtime" + "strconv" + "time" ) // Some tagging information for error messages. const ( - msgTagDec = "codec.decoder" - msgBadDesc = "Unrecognized descriptor byte" - msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" + msgBadDesc = "unrecognized descriptor byte" + // msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" ) +const ( + decDefMaxDepth = 1024 // maximum depth + decDefSliceCap = 8 + decDefChanCap = 64 // should be large, as cap cannot be expanded + decScratchByteArrayLen = cacheLineSize // + (8 * 2) // - (8 * 1) +) + +var ( + errstrOnlyMapOrArrayCanDecodeIntoStruct = "only encoded map or array can be decoded into a struct" + errstrCannotDecodeIntoNil = "cannot decode into nil" + + errmsgExpandSliceOverflow = "expand slice: slice overflow" + errmsgExpandSliceCannotChange = "expand slice: cannot change" + + errDecoderNotInitialized = errors.New("Decoder not initialized") + + errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read") + errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read") + errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown") + errMaxDepthExceeded = errors.New("maximum decoding depth exceeded") +) + +/* + // decReader abstracts the reading source, allowing implementations that can // read from an io.Reader or directly off a byte slice with zero-copying. +// +// Deprecated: Use decReaderSwitch instead. type decReader interface { - readn(n int) []byte + unreadn1() + // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR + // just return a view of the []byte being decoded from. + // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. + readx(n int) []byte readb([]byte) readn1() uint8 - readUint16() uint16 - readUint32() uint32 - readUint64() uint64 + numread() uint // number of bytes read + track() + stopTrack() []byte + + // skip will skip any byte that matches, and return the first non-matching byte + skip(accept *bitset256) (token byte) + // readTo will read any byte that matches, stopping once no-longer matching. + readTo(in []byte, accept *bitset256) (out []byte) + // readUntil will read, only stopping once it matches the 'stop' byte. + readUntil(in []byte, stop byte) (out []byte) } +*/ + type decDriver interface { - initReadNext() - tryDecodeAsNil() bool - currentEncodedType() valueType - isBuiltinType(rt uintptr) bool - decodeBuiltin(rt uintptr, v interface{}) - //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). - decodeNaked() (v interface{}, vt valueType, decodeFurther bool) - decodeInt(bitsize uint8) (i int64) - decodeUint(bitsize uint8) (ui uint64) - decodeFloat(chkOverflow32 bool) (f float64) - decodeBool() (b bool) - // decodeString can also decode symbols - decodeString() (s string) - decodeBytes(bs []byte) (bsOut []byte, changed bool) - decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) - readMapLen() int - readArrayLen() int + // this will check if the next token is a break. + CheckBreak() bool + // TryDecodeAsNil tries to decode as nil. + // Note: TryDecodeAsNil should be careful not to share any temporary []byte with + // the rest of the decDriver. This is because sometimes, we optimize by holding onto + // a transient []byte, and ensuring the only other call we make to the decDriver + // during that time is maybe a TryDecodeAsNil() call. + TryDecodeAsNil() bool + // ContainerType returns one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. + ContainerType() (vt valueType) + // IsBuiltinType(rt uintptr) bool + + // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. + // For maps and arrays, it will not do the decoding in-band, but will signal + // the decoder, so that is done later, by setting the decNaked.valueType field. + // + // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + // for extensions, DecodeNaked must read the tag and the []byte if it exists. + // if the []byte is not read, then kInterfaceNaked will treat it as a Handle + // that stores the subsequent value in-band, and complete reading the RawExt. + // + // extensions should also use readx to decode them, for efficiency. + // kInterface will extract the detached byte slice if it has to pass it outside its realm. + DecodeNaked() + + // Deprecated: use DecodeInt64 and DecodeUint64 instead + // DecodeInt(bitsize uint8) (i int64) + // DecodeUint(bitsize uint8) (ui uint64) + + DecodeInt64() (i int64) + DecodeUint64() (ui uint64) + + DecodeFloat64() (f float64) + DecodeBool() (b bool) + // DecodeString can also decode symbols. + // It looks redundant as DecodeBytes is available. + // However, some codecs (e.g. binc) support symbols and can + // return a pre-stored string value, meaning that it can bypass + // the cost of []byte->string conversion. + DecodeString() (s string) + DecodeStringAsBytes() (v []byte) + + // DecodeBytes may be called directly, without going through reflection. + // Consequently, it must be designed to handle possible nil. + DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) + // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) + + // decodeExt will decode into a *RawExt or into an extension. + DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) + // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + + DecodeTime() (t time.Time) + + ReadArrayStart() int + ReadArrayElem() + ReadArrayEnd() + ReadMapStart() int + ReadMapElemKey() + ReadMapElemValue() + ReadMapEnd() + + reset() + uncacheRead() +} + +type decodeError struct { + codecError + pos int +} + +func (d decodeError) Error() string { + return fmt.Sprintf("%s decode error [pos %d]: %v", d.name, d.pos, d.err) } +type decDriverNoopContainerReader struct{} + +func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { return } +func (x decDriverNoopContainerReader) ReadArrayElem() {} +func (x decDriverNoopContainerReader) ReadArrayEnd() {} +func (x decDriverNoopContainerReader) ReadMapStart() (v int) { return } +func (x decDriverNoopContainerReader) ReadMapElemKey() {} +func (x decDriverNoopContainerReader) ReadMapElemValue() {} +func (x decDriverNoopContainerReader) ReadMapEnd() {} +func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return } + +// func (x decNoSeparator) uncacheRead() {} + +// DecodeOptions captures configuration options during decode. type DecodeOptions struct { - // An instance of MapType is used during schema-less decoding of a map in the stream. - // If nil, we use map[interface{}]interface{} + // MapType specifies type to use during schema-less decoding of a map in the stream. + // If nil (unset), we default to map[string]interface{} iff json handle and MapStringAsKey=true, + // else map[interface{}]interface{}. MapType reflect.Type - // An instance of SliceType is used during schema-less decoding of an array in the stream. - // If nil, we use []interface{} + + // SliceType specifies type to use during schema-less decoding of an array in the stream. + // If nil (unset), we default to []interface{} for all formats. SliceType reflect.Type - // ErrorIfNoField controls whether an error is returned when decoding a map + + // MaxInitLen defines the maxinum initial length that we "make" a collection + // (string, slice, map, chan). If 0 or negative, we default to a sensible value + // based on the size of an element in the collection. + // + // For example, when decoding, a stream may say that it has 2^64 elements. + // We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash. + // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. + MaxInitLen int + + // ReaderBufferSize is the size of the buffer used when reading. + // + // if > 0, we use a smart buffer internally for performance purposes. + ReaderBufferSize int + + // MaxDepth defines the maximum depth when decoding nested + // maps and slices. If 0 or negative, we default to a suitably large number (currently 1024). + MaxDepth int16 + + // If ErrorIfNoField, return an error when decoding a map // from a codec stream into a struct, and no matching struct field is found. ErrorIfNoField bool + + // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. + // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, + // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). + ErrorIfNoArrayExpand bool + + // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). + SignedInteger bool + + // MapValueReset controls how we decode into a map value. + // + // By default, we MAY retrieve the mapping for a key, and then decode into that. + // However, especially with big maps, that retrieval may be expensive and unnecessary + // if the stream already contains all that is necessary to recreate the value. + // + // If true, we will never retrieve the previous mapping, + // but rather decode into a new value and set that in the map. + // + // If false, we will retrieve the previous mapping if necessary e.g. + // the previous mapping is a pointer, or is a struct or array with pre-set state, + // or is an interface. + MapValueReset bool + + // SliceElementReset: on decoding a slice, reset the element to a zero value first. + // + // concern: if the slice already contained some garbage, we will decode into that garbage. + SliceElementReset bool + + // InterfaceReset controls how we decode into an interface. + // + // By default, when we see a field that is an interface{...}, + // or a map with interface{...} value, we will attempt decoding into the + // "contained" value. + // + // However, this prevents us from reading a string into an interface{} + // that formerly contained a number. + // + // If true, we will decode into a new "blank" value, and set that in the interface. + // If false, we will decode into whatever is contained in the interface. + InterfaceReset bool + + // InternString controls interning of strings during decoding. + // + // Some handles, e.g. json, typically will read map keys as strings. + // If the set of keys are finite, it may help reduce allocation to + // look them up from a map (than to allocate them afresh). + // + // Note: Handles will be smart when using the intern functionality. + // Every string should not be interned. + // An excellent use-case for interning is struct field names, + // or map keys where key type is string. + InternString bool + + // PreferArrayOverSlice controls whether to decode to an array or a slice. + // + // This only impacts decoding into a nil interface{}. + // Consequently, it has no effect on codecgen. + // + // *Note*: This only applies if using go1.5 and above, + // as it requires reflect.ArrayOf support which was absent before go1.5. + PreferArrayOverSlice bool + + // DeleteOnNilMapValue controls how to decode a nil value in the stream. + // + // If true, we will delete the mapping of the key. + // Else, just set the mapping to the zero value of the type. + DeleteOnNilMapValue bool + + // RawToString controls how raw bytes in a stream are decoded into a nil interface{}. + // By default, they are decoded as []byte, but can be decoded as string (if configured). + RawToString bool } -// ------------------------------------ +// ------------------------------------------------ + +type unreadByteStatus uint8 + +// unreadByteStatus goes from +// undefined (when initialized) -- (read) --> canUnread -- (unread) --> canRead ... +const ( + unreadByteUndefined unreadByteStatus = iota + unreadByteCanRead + unreadByteCanUnread +) + +type ioDecReaderCommon struct { + r io.Reader // the reader passed in + + n uint // num read -// ioDecReader is a decReader that reads off an io.Reader + l byte // last byte + ls unreadByteStatus // last byte status + trb bool // tracking bytes turned on + _ bool + b [4]byte // tiny buffer for reading single bytes + + tr []byte // tracking bytes read +} + +func (z *ioDecReaderCommon) reset(r io.Reader) { + z.r = r + z.ls = unreadByteUndefined + z.l, z.n = 0, 0 + z.trb = false + if z.tr != nil { + z.tr = z.tr[:0] + } +} + +func (z *ioDecReaderCommon) numread() uint { + return z.n +} + +func (z *ioDecReaderCommon) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *ioDecReaderCommon) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ------------------------------------------ + +// ioDecReader is a decReader that reads off an io.Reader. +// +// It also has a fallback implementation of ByteScanner if needed. type ioDecReader struct { - r io.Reader - br io.ByteReader - x [8]byte //temp byte array re-used internally for efficiency + ioDecReaderCommon + + rr io.Reader + br io.ByteScanner + + x [scratchByteArrayLen]byte // for: get struct field name, swallow valueTypeBytes, etc + _ [1]uint64 // padding } -func (z *ioDecReader) readn(n int) (bs []byte) { - if n <= 0 { +func (z *ioDecReader) reset(r io.Reader) { + z.ioDecReaderCommon.reset(r) + + var ok bool + z.rr = r + z.br, ok = r.(io.ByteScanner) + if !ok { + z.br = z + z.rr = z + } +} + +func (z *ioDecReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { return } - bs = make([]byte, n) - if _, err := io.ReadAtLeast(z.r, bs, n); err != nil { + var firstByte bool + if z.ls == unreadByteCanRead { + z.ls = unreadByteCanUnread + p[0] = z.l + if len(p) == 1 { + n = 1 + return + } + firstByte = true + p = p[1:] + } + n, err = z.r.Read(p) + if n > 0 { + if err == io.EOF && n == len(p) { + err = nil // read was successful, so postpone EOF (till next time) + } + z.l = p[n-1] + z.ls = unreadByteCanUnread + } + if firstByte { + n++ + } + return +} + +func (z *ioDecReader) ReadByte() (c byte, err error) { + n, err := z.Read(z.b[:1]) + if n == 1 { + c = z.b[0] + if err == io.EOF { + err = nil // read was successful, so postpone EOF (till next time) + } + } + return +} + +func (z *ioDecReader) UnreadByte() (err error) { + switch z.ls { + case unreadByteCanUnread: + z.ls = unreadByteCanRead + case unreadByteCanRead: + err = errDecUnreadByteLastByteNotRead + case unreadByteUndefined: + err = errDecUnreadByteNothingToRead + default: + err = errDecUnreadByteUnknown + } + return +} + +func (z *ioDecReader) readx(n uint) (bs []byte) { + if n == 0 { + return + } + if n < uint(len(z.x)) { + bs = z.x[:n] + } else { + bs = make([]byte, n) + } + if _, err := decReadFull(z.rr, bs); err != nil { panic(err) } + z.n += uint(len(bs)) + if z.trb { + z.tr = append(z.tr, bs...) + } return } func (z *ioDecReader) readb(bs []byte) { - if _, err := io.ReadAtLeast(z.r, bs, len(bs)); err != nil { + if len(bs) == 0 { + return + } + if _, err := decReadFull(z.rr, bs); err != nil { panic(err) } + z.n += uint(len(bs)) + if z.trb { + z.tr = append(z.tr, bs...) + } } -func (z *ioDecReader) readn1() uint8 { - if z.br != nil { - b, err := z.br.ReadByte() - if err != nil { - panic(err) +func (z *ioDecReader) readn1eof() (b uint8, eof bool) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) } - return b + } else if err == io.EOF { + eof = true + } else { + panic(err) } - z.readb(z.x[:1]) - return z.x[0] + return +} + +func (z *ioDecReader) readn1() (b uint8) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + return + } + panic(err) +} + +func (z *ioDecReader) skip(accept *bitset256) (token byte) { + var eof bool + // for { + // token, eof = z.readn1eof() + // if eof { + // return + // } + // if accept.isset(token) { + // continue + // } + // return + // } +LOOP: + token, eof = z.readn1eof() + if eof { + return + } + if accept.isset(token) { + goto LOOP + } + return } -func (z *ioDecReader) readUint16() uint16 { - z.readb(z.x[:2]) - return bigen.Uint16(z.x[:2]) +func (z *ioDecReader) readTo(in []byte, accept *bitset256) []byte { + // out = in + + // for { + // token, eof := z.readn1eof() + // if eof { + // return + // } + // if accept.isset(token) { + // out = append(out, token) + // } else { + // z.unreadn1() + // return + // } + // } +LOOP: + token, eof := z.readn1eof() + if eof { + return in + } + if accept.isset(token) { + // out = append(out, token) + in = append(in, token) + goto LOOP + } + z.unreadn1() + return in } -func (z *ioDecReader) readUint32() uint32 { - z.readb(z.x[:4]) - return bigen.Uint32(z.x[:4]) +func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) { + out = in + // for { + // token, eof := z.readn1eof() + // if eof { + // panic(io.EOF) + // } + // out = append(out, token) + // if token == stop { + // return + // } + // } +LOOP: + token, eof := z.readn1eof() + if eof { + panic(io.EOF) + } + out = append(out, token) + if token == stop { + return + } + goto LOOP } -func (z *ioDecReader) readUint64() uint64 { - z.readb(z.x[:8]) - return bigen.Uint64(z.x[:8]) +//go:noinline +func (z *ioDecReader) unreadn1() { + err := z.br.UnreadByte() + if err != nil { + panic(err) + } + z.n-- + if z.trb { + if l := len(z.tr) - 1; l >= 0 { + z.tr = z.tr[:l] + } + } } // ------------------------------------ -// bytesDecReader is a decReader that reads off a byte slice with zero copying -type bytesDecReader struct { - b []byte // data - c int // cursor - a int // available +type bufioDecReader struct { + ioDecReaderCommon + + c uint // cursor + buf []byte + + bytesBufPooler + + // err error + + // Extensions can call Decode() within a current Decode() call. + // We need to know when the top level Decode() call returns, + // so we can decide whether to Release() or not. + calls uint16 // what depth in mustDecode are we in now. + + _ [6]uint8 // padding + + _ [1]uint64 // padding } -func (z *bytesDecReader) consume(n int) (oldcursor int) { - if z.a == 0 { - panic(io.EOF) +func (z *bufioDecReader) reset(r io.Reader, bufsize int) { + z.ioDecReaderCommon.reset(r) + z.c = 0 + z.calls = 0 + if cap(z.buf) >= bufsize { + z.buf = z.buf[:0] + } else { + z.buf = z.bytesBufPooler.get(bufsize)[:0] + // z.buf = make([]byte, 0, bufsize) } - if n > z.a { - decErr("Trying to read %v bytes. Only %v available", n, z.a) +} + +func (z *bufioDecReader) release() { + z.buf = nil + z.bytesBufPooler.end() +} + +func (z *bufioDecReader) readb(p []byte) { + var n = uint(copy(p, z.buf[z.c:])) + z.n += n + z.c += n + if len(p) == int(n) { + if z.trb { + z.tr = append(z.tr, p...) // cost=9 + } + } else { + z.readbFill(p, n) } - // z.checkAvailable(n) - oldcursor = z.c - z.c = oldcursor + n - z.a = z.a - n - return } -func (z *bytesDecReader) readn(n int) (bs []byte) { - if n <= 0 { +//go:noinline - fallback when z.buf is consumed +func (z *bufioDecReader) readbFill(p0 []byte, n uint) { + // at this point, there's nothing in z.buf to read (z.buf is fully consumed) + p := p0[n:] + var n2 uint + var err error + if len(p) > cap(z.buf) { + n2, err = decReadFull(z.r, p) + if err != nil { + panic(err) + } + n += n2 + z.n += n2 + // always keep last byte in z.buf + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } return } - c0 := z.consume(n) - bs = z.b[c0:z.c] - return + // z.c is now 0, and len(p) <= cap(z.buf) +LOOP: + // for len(p) > 0 && z.err == nil { + if len(p) > 0 { + z.buf = z.buf[0:cap(z.buf)] + var n1 int + n1, err = z.r.Read(z.buf) + n2 = uint(n1) + if n2 == 0 && err != nil { + panic(err) + } + z.buf = z.buf[:n2] + n2 = uint(copy(p, z.buf)) + z.c = n2 + n += n2 + z.n += n2 + p = p[n2:] + goto LOOP + } + if z.c == 0 { + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + } + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } } -func (z *bytesDecReader) readb(bs []byte) { - copy(bs, z.readn(len(bs))) +func (z *bufioDecReader) readn1() (b byte) { + // fast-path, so we elide calling into Read() most of the time + if z.c < uint(len(z.buf)) { + b = z.buf[z.c] + z.c++ + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + } else { // meaning z.c == len(z.buf) or greater ... so need to fill + z.readbFill(z.b[:1], 0) + b = z.b[0] + } + return } -func (z *bytesDecReader) readn1() uint8 { - c0 := z.consume(1) - return z.b[c0] +func (z *bufioDecReader) unreadn1() { + if z.c == 0 { + panic(errDecUnreadByteNothingToRead) + } + z.c-- + z.n-- + if z.trb { + z.tr = z.tr[:len(z.tr)-1] + } } -// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits -// creating temp slice variable and copying it to helper function is expensive -// for just 2 bits. +func (z *bufioDecReader) readx(n uint) (bs []byte) { + if n == 0 { + // return + } else if z.c+n <= uint(len(z.buf)) { + bs = z.buf[z.c : z.c+n] + z.n += n + z.c += n + if z.trb { + z.tr = append(z.tr, bs...) + } + } else { + bs = make([]byte, n) + // n no longer used - can reuse + n = uint(copy(bs, z.buf[z.c:])) + z.n += n + z.c += n + z.readbFill(bs, n) + } + return +} -func (z *bytesDecReader) readUint16() uint16 { - c0 := z.consume(2) - return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8 +//go:noinline - track called by Decoder.nextValueBytes() (called by jsonUnmarshal,rawBytes) +func (z *bufioDecReader) doTrack(y uint) { + z.tr = append(z.tr, z.buf[z.c:y]...) // cost=14??? } -func (z *bytesDecReader) readUint32() uint32 { - c0 := z.consume(4) - return bigen.Uint32(z.b[c0:z.c]) +func (z *bufioDecReader) skipLoopFn(i uint) { + z.n += (i - z.c) - 1 + i++ + if z.trb { + // z.tr = append(z.tr, z.buf[z.c:i]...) + z.doTrack(i) + } + z.c = i } -func (z *bytesDecReader) readUint64() uint64 { - c0 := z.consume(8) - return bigen.Uint64(z.b[c0:z.c]) +func (z *bufioDecReader) skip(accept *bitset256) (token byte) { + // token, _ = z.search(nil, accept, 0, 1); return + + // for i := z.c; i < len(z.buf); i++ { + // if token = z.buf[i]; !accept.isset(token) { + // z.skipLoopFn(i) + // return + // } + // } + + i := z.c +LOOP: + if i < uint(len(z.buf)) { + // inline z.skipLoopFn(i) and refactor, so cost is within inline budget + token = z.buf[i] + i++ + if accept.isset(token) { + goto LOOP + } + z.n += i - 2 - z.c + if z.trb { + z.doTrack(i) + } + z.c = i + return + } + return z.skipFill(accept) } -// ------------------------------------ +func (z *bufioDecReader) skipFill(accept *bitset256) (token byte) { + z.n += uint(len(z.buf)) - z.c + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n2 int + var err error + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, err = z.r.Read(z.buf) + if n2 == 0 && err != nil { + panic(err) + } + z.buf = z.buf[:n2] + var i int + for i, token = range z.buf { + if !accept.isset(token) { + z.skipLoopFn(uint(i)) + return + } + } + // for i := 0; i < n2; i++ { + // if token = z.buf[i]; !accept.isset(token) { + // z.skipLoopFn(i) + // return + // } + // } + z.n += uint(n2) + if z.trb { + z.tr = append(z.tr, z.buf...) + } + } +} -// decFnInfo has methods for registering handling decoding of a specific type -// based on some characteristics (builtin, extension, reflect Kind, etc) -type decFnInfo struct { - ti *typeInfo - d *Decoder - dd decDriver - xfFn func(reflect.Value, []byte) error - xfTag byte - array bool +func (z *bufioDecReader) readToLoopFn(i uint, out0 []byte) (out []byte) { + // out0 is never nil + z.n += (i - z.c) - 1 + out = append(out0, z.buf[z.c:i]...) + if z.trb { + z.doTrack(i) + } + z.c = i + return } -func (f *decFnInfo) builtin(rv reflect.Value) { - f.dd.decodeBuiltin(f.ti.rtid, rv.Addr().Interface()) +func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { + // _, out = z.search(in, accept, 0, 2); return + + // for i := z.c; i < len(z.buf); i++ { + // if !accept.isset(z.buf[i]) { + // return z.readToLoopFn(i, nil) + // } + // } + + i := z.c +LOOP: + if i < uint(len(z.buf)) { + if !accept.isset(z.buf[i]) { + // return z.readToLoopFn(i, nil) + // inline readToLoopFn here (for performance) + z.n += (i - z.c) - 1 + out = z.buf[z.c:i] + if z.trb { + z.doTrack(i) + } + z.c = i + return + } + i++ + goto LOOP + } + return z.readToFill(in, accept) } -func (f *decFnInfo) rawExt(rv reflect.Value) { - xtag, xbs := f.dd.decodeExt(false, 0) - rv.Field(0).SetUint(uint64(xtag)) - rv.Field(1).SetBytes(xbs) +func (z *bufioDecReader) readToFill(in []byte, accept *bitset256) (out []byte) { + z.n += uint(len(z.buf)) - z.c + out = append(in, z.buf[z.c:]...) + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n2 int + var err error + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, err = z.r.Read(z.buf) + if n2 == 0 && err != nil { + if err == io.EOF { + return // readTo should read until it matches or end is reached + } + panic(err) + } + z.buf = z.buf[:n2] + for i, token := range z.buf { + if !accept.isset(token) { + return z.readToLoopFn(uint(i), out) + } + } + // for i := 0; i < n2; i++ { + // if !accept.isset(z.buf[i]) { + // return z.readToLoopFn(i, out) + // } + // } + out = append(out, z.buf...) + z.n += uint(n2) + if z.trb { + z.tr = append(z.tr, z.buf...) + } + } } -func (f *decFnInfo) ext(rv reflect.Value) { - _, xbs := f.dd.decodeExt(true, f.xfTag) - if fnerr := f.xfFn(rv, xbs); fnerr != nil { - panic(fnerr) +func (z *bufioDecReader) readUntilLoopFn(i uint, out0 []byte) (out []byte) { + z.n += (i - z.c) - 1 + i++ + out = append(out0, z.buf[z.c:i]...) + if z.trb { + // z.tr = append(z.tr, z.buf[z.c:i]...) + z.doTrack(i) } + z.c = i + return } -func (f *decFnInfo) binaryMarshal(rv reflect.Value) { - var bm binaryUnmarshaler - if f.ti.unmIndir == -1 { - bm = rv.Addr().Interface().(binaryUnmarshaler) - } else if f.ti.unmIndir == 0 { - bm = rv.Interface().(binaryUnmarshaler) - } else { - for j, k := int8(0), f.ti.unmIndir; j < k; j++ { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) +func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) { + // _, out = z.search(in, nil, stop, 4); return + + // for i := z.c; i < len(z.buf); i++ { + // if z.buf[i] == stop { + // return z.readUntilLoopFn(i, nil) + // } + // } + + i := z.c +LOOP: + if i < uint(len(z.buf)) { + if z.buf[i] == stop { + // inline readUntilLoopFn + // return z.readUntilLoopFn(i, nil) + z.n += (i - z.c) - 1 + i++ + out = z.buf[z.c:i] + if z.trb { + z.doTrack(i) } - rv = rv.Elem() + z.c = i + return } - bm = rv.Interface().(binaryUnmarshaler) - } - xbs, _ := f.dd.decodeBytes(nil) - if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { - panic(fnerr) + i++ + goto LOOP } + return z.readUntilFill(in, stop) } -func (f *decFnInfo) kErr(rv reflect.Value) { - decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc) +func (z *bufioDecReader) readUntilFill(in []byte, stop byte) (out []byte) { + z.n += uint(len(z.buf)) - z.c + out = append(in, z.buf[z.c:]...) + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n1 int + var n2 uint + var err error + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n1, err = z.r.Read(z.buf) + n2 = uint(n1) + if n2 == 0 && err != nil { + panic(err) + } + z.buf = z.buf[:n2] + for i, token := range z.buf { + if token == stop { + return z.readUntilLoopFn(uint(i), out) + } + } + // for i := 0; i < n2; i++ { + // if z.buf[i] == stop { + // return z.readUntilLoopFn(i, out) + // } + // } + out = append(out, z.buf...) + z.n += n2 + if z.trb { + z.tr = append(z.tr, z.buf...) + } + } } -func (f *decFnInfo) kString(rv reflect.Value) { - rv.SetString(f.dd.decodeString()) +// ------------------------------------ + +var errBytesDecReaderCannotUnread = errors.New("cannot unread last byte read") + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c uint // cursor + t uint // track start + // a int // available } -func (f *decFnInfo) kBool(rv reflect.Value) { - rv.SetBool(f.dd.decodeBool()) +func (z *bytesDecReader) reset(in []byte) { + z.b = in + // z.a = len(in) + z.c = 0 + z.t = 0 } -func (f *decFnInfo) kInt(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(intBitsize)) +func (z *bytesDecReader) numread() uint { + return z.c } -func (f *decFnInfo) kInt64(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(64)) +func (z *bytesDecReader) unreadn1() { + if z.c == 0 || len(z.b) == 0 { + panic(errBytesDecReaderCannotUnread) + } + z.c-- + // z.a++ } -func (f *decFnInfo) kInt32(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(32)) +func (z *bytesDecReader) readx(n uint) (bs []byte) { + // slicing from a non-constant start position is more expensive, + // as more computation is required to decipher the pointer start position. + // However, we do it only once, and it's better than reslicing both z.b and return value. + + // if n <= 0 { + // } else if z.a == 0 { + // panic(io.EOF) + // } else if n > z.a { + // panic(io.ErrUnexpectedEOF) + // } else { + // c0 := z.c + // z.c = c0 + n + // z.a = z.a - n + // bs = z.b[c0:z.c] + // } + // return + + if n != 0 { + z.c += n + if z.c > uint(len(z.b)) { + z.c = uint(len(z.b)) + panic(io.EOF) + } + bs = z.b[z.c-n : z.c] + } + return + + // if n == 0 { + // } else if z.c+n > uint(len(z.b)) { + // z.c = uint(len(z.b)) + // panic(io.EOF) + // } else { + // z.c += n + // bs = z.b[z.c-n : z.c] + // } + // return + + // if n == 0 { + // return + // } + // if z.c == uint(len(z.b)) { + // panic(io.EOF) + // } + // if z.c+n > uint(len(z.b)) { + // panic(io.ErrUnexpectedEOF) + // } + // // z.a -= n + // z.c += n + // return z.b[z.c-n : z.c] } -func (f *decFnInfo) kInt8(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(8)) +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readx(uint(len(bs)))) } -func (f *decFnInfo) kInt16(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(16)) +func (z *bytesDecReader) readn1() (v uint8) { + if z.c == uint(len(z.b)) { + panic(io.EOF) + } + v = z.b[z.c] + z.c++ + // z.a-- + return } -func (f *decFnInfo) kFloat32(rv reflect.Value) { - rv.SetFloat(f.dd.decodeFloat(true)) +// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { +// if z.a == 0 { +// eof = true +// return +// } +// v = z.b[z.c] +// z.c++ +// z.a-- +// return +// } + +func (z *bytesDecReader) skip(accept *bitset256) (token byte) { + i := z.c + // if i == len(z.b) { + // goto END + // // panic(io.EOF) + // } + + // Replace loop with goto construct, so that this can be inlined + // for i := z.c; i < blen; i++ { + // if !accept.isset(z.b[i]) { + // token = z.b[i] + // i++ + // z.a -= (i - z.c) + // z.c = i + // return + // } + // } + + // i := z.c +LOOP: + if i < uint(len(z.b)) { + token = z.b[i] + i++ + if accept.isset(token) { + goto LOOP + } + // z.a -= (i - z.c) + z.c = i + return + } + // END: + panic(io.EOF) + // // z.a = 0 + // z.c = blen + // return } -func (f *decFnInfo) kFloat64(rv reflect.Value) { - rv.SetFloat(f.dd.decodeFloat(false)) +func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) { + return z.readToNoInput(accept) } -func (f *decFnInfo) kUint8(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(8)) +func (z *bytesDecReader) readToNoInput(accept *bitset256) (out []byte) { + i := z.c + if i == uint(len(z.b)) { + panic(io.EOF) + } + + // Replace loop with goto construct, so that this can be inlined + // for i := z.c; i < blen; i++ { + // if !accept.isset(z.b[i]) { + // out = z.b[z.c:i] + // z.a -= (i - z.c) + // z.c = i + // return + // } + // } + // out = z.b[z.c:] + // z.a, z.c = 0, blen + // return + + // i := z.c + // LOOP: + // if i < blen { + // if accept.isset(z.b[i]) { + // i++ + // goto LOOP + // } + // out = z.b[z.c:i] + // z.a -= (i - z.c) + // z.c = i + // return + // } + // out = z.b[z.c:] + // // z.a, z.c = 0, blen + // z.a = 0 + // z.c = blen + // return + + // c := i +LOOP: + if i < uint(len(z.b)) { + if accept.isset(z.b[i]) { + i++ + goto LOOP + } + } + + out = z.b[z.c:i] + // z.a -= (i - z.c) + z.c = i + return // z.b[c:i] + // z.c, i = i, z.c + // return z.b[i:z.c] } -func (f *decFnInfo) kUint64(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(64)) +func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) { + return z.readUntilNoInput(stop) } -func (f *decFnInfo) kUint(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(uintBitsize)) +func (z *bytesDecReader) readUntilNoInput(stop byte) (out []byte) { + i := z.c + // if i == len(z.b) { + // panic(io.EOF) + // } + + // Replace loop with goto construct, so that this can be inlined + // for i := z.c; i < blen; i++ { + // if z.b[i] == stop { + // i++ + // out = z.b[z.c:i] + // z.a -= (i - z.c) + // z.c = i + // return + // } + // } +LOOP: + if i < uint(len(z.b)) { + if z.b[i] == stop { + i++ + out = z.b[z.c:i] + // z.a -= (i - z.c) + z.c = i + return + } + i++ + goto LOOP + } + // z.a = 0 + // z.c = blen + panic(io.EOF) } -func (f *decFnInfo) kUint32(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(32)) +func (z *bytesDecReader) track() { + z.t = z.c } -func (f *decFnInfo) kUint16(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(16)) +func (z *bytesDecReader) stopTrack() (bs []byte) { + return z.b[z.t:z.c] } -// func (f *decFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") -// if rv.IsNil() { -// rv.Set(reflect.New(rv.Type().Elem())) -// } -// f.d.decodeValue(rv.Elem()) +// ---------------------------------------- + +// func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) { +// d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv)) // } -func (f *decFnInfo) kInterface(rv reflect.Value) { - // debugf("\t===> kInterface") - if !rv.IsNil() { - f.d.decodeValue(rv.Elem()) - return +func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), 0, nil) +} + +func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn) +} + +func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(d) +} + +func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs := d.d.DecodeBytes(nil, true) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) } +} + +func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := rv2i(rv).(jsonUnmarshaler) + // bs := d.d.DecodeBytes(d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) { + d.errorf("no decoding function defined for kind %v", rv.Kind()) +} + +// var kIntfCtr uint64 + +func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { // nil interface: - // use some hieristics to set the nil interface to an - // appropriate value based on the first byte read (byte descriptor bd) - v, vt, decodeFurther := f.dd.decodeNaked() - if vt == valueTypeNil { + // use some hieristics to decode it appropriately + // based on the detected next value in the stream. + n := d.naked() + d.d.DecodeNaked() + if n.v == valueTypeNil { return } - // Cannot decode into nil interface with methods (e.g. error, io.Reader, etc) - // if non-nil value in stream. - if num := f.ti.rt.NumMethod(); num > 0 { - decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)", - f.ti.rt, num) + // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). + if f.ti.numMeth > 0 { + d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + return } - var rvn reflect.Value - var useRvn bool - switch vt { + // var useRvn bool + switch n.v { case valueTypeMap: - if f.d.h.MapType == nil { - var m2 map[interface{}]interface{} - v = &m2 + // if json, default to a map type with string keys + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } else if mtid == mapStrIntfTypId { // for json performance + var v2 map[string]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() } else { - rvn = reflect.New(f.d.h.MapType).Elem() - useRvn = true + if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = reflect.New(d.h.MapType).Elem() + d.decodeValue(rvn, nil, true) + } } case valueTypeArray: - if f.d.h.SliceType == nil { - var m2 []interface{} - v = &m2 + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { + rvn2 := reflect.New(reflectArrayOf(rvn.Len(), intfTyp)).Elem() + reflect.Copy(rvn2, rvn) + rvn = rvn2 + } } else { - rvn = reflect.New(f.d.h.SliceType).Elem() - useRvn = true + if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = reflect.New(d.h.SliceType).Elem() + d.decodeValue(rvn, nil, true) + } } case valueTypeExt: - re := v.(*RawExt) - var bfn func(reflect.Value, []byte) error - rvn, bfn = f.d.h.getDecodeExtForTag(re.Tag) + var v interface{} + tag, bytes := n.u, n.l // calling decode below might taint the values + if bytes == nil { + d.decode(&v) + } + bfn := d.h.getExtForTag(tag) if bfn == nil { - rvn = reflect.ValueOf(*re) - } else if fnerr := bfn(rvn, re.Data); fnerr != nil { - panic(fnerr) + var re RawExt + re.Tag = tag + re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) + re.Value = v + rvn = reflect.ValueOf(&re).Elem() + } else { + rvnA := reflect.New(bfn.rt) + if bytes != nil { + bfn.ext.ReadExt(rv2i(rvnA), bytes) + } else { + bfn.ext.UpdateExt(rv2i(rvnA), v) + } + rvn = rvnA.Elem() } - rv.Set(rvn) - return + case valueTypeNil: + // no-op + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + panicv.errorf("kInterfaceNaked: unexpected valueType: %d", n.v) } - if decodeFurther { - if useRvn { - f.d.decodeValue(rvn) - } else if v != nil { - // this v is a pointer, so we need to dereference it when done - f.d.decode(v) - rvn = reflect.ValueOf(v).Elem() - useRvn = true + return +} + +func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) { + // Note: + // A consequence of how kInterface works, is that + // if an interface already contains something, we try + // to decode into what was there before. + // We do not replace with a generic value (as got from decodeNaked). + + // every interface passed here MUST be settable. + var rvn reflect.Value + if rv.IsNil() || d.h.InterfaceReset { + // check if mapping to a type: if so, initialize it and move on + rvn = d.h.intf2impl(f.ti.rtid) + if rvn.IsValid() { + rv.Set(rvn) + } else { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rv.Set(rvn) + } else if d.h.InterfaceReset { + // reset to zero value based on current type in there. + rv.Set(reflect.Zero(rv.Elem().Type())) + } + return } + } else { + // now we have a non-nil interface value, meaning it contains a type + rvn = rv.Elem() } - if useRvn { - rv.Set(rvn) - } else if v != nil { - rv.Set(reflect.ValueOf(v)) + if d.d.TryDecodeAsNil() { + rv.Set(reflect.Zero(rvn.Type())) + return + } + + // Note: interface{} is settable, but underlying type may not be. + // Consequently, we MAY have to create a decodable value out of the underlying value, + // decode into it, and reset the interface itself. + // fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type()) + + rvn2, canDecode := isDecodeable(rvn) + if canDecode { + d.decodeValue(rvn2, nil, true) + return + } + + rvn2 = reflect.New(rvn.Type()).Elem() + rvn2.Set(rvn) + d.decodeValue(rvn2, nil, true) + rv.Set(rvn2) +} + +func decStructFieldKey(dd decDriver, keyType valueType, b *[decScratchByteArrayLen]byte) (rvkencname []byte) { + // use if-else-if, not switch (which compiles to binary-search) + // since keyType is typically valueTypeString, branch prediction is pretty good. + + if keyType == valueTypeString { + rvkencname = dd.DecodeStringAsBytes() + } else if keyType == valueTypeInt { + rvkencname = strconv.AppendInt(b[:0], dd.DecodeInt64(), 10) + } else if keyType == valueTypeUint { + rvkencname = strconv.AppendUint(b[:0], dd.DecodeUint64(), 10) + } else if keyType == valueTypeFloat { + rvkencname = strconv.AppendFloat(b[:0], dd.DecodeFloat64(), 'f', -1, 64) + } else { + rvkencname = dd.DecodeStringAsBytes() } + return rvkencname } -func (f *decFnInfo) kStruct(rv reflect.Value) { +func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { fti := f.ti - if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap { - containerLen := f.dd.readMapLen() + dd := d.d + elemsep := d.esep + sfn := structFieldNode{v: rv, update: true} + ctyp := dd.ContainerType() + var mf MissingFielder + if fti.mf { + mf = rv2i(rv).(MissingFielder) + } else if fti.mfp { + mf = rv2i(rv.Addr()).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := dd.ReadMapStart() if containerLen == 0 { + dd.ReadMapEnd() return } - tisfi := fti.sfi - for j := 0; j < containerLen; j++ { - // var rvkencname string - // ddecode(&rvkencname) - f.dd.initReadNext() - rvkencname := f.dd.decodeString() - // rvksi := ti.getForEncName(rvkencname) + d.depthIncr() + tisfi := fti.sfiSort + hasLen := containerLen >= 0 + + var rvkencname []byte + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadMapElemKey() + } + rvkencname = decStructFieldKey(dd, fti.keyType, &d.b) + if elemsep { + dd.ReadMapElemValue() + } if k := fti.indexForEncName(rvkencname); k > -1 { - sfik := tisfi[k] - if sfik.i != -1 { - f.d.decodeValue(rv.Field(int(sfik.i))) + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) } else { - f.d.decEmbeddedField(rv, sfik.is) + d.decodeValue(sfn.field(si), nil, true) } - // f.d.decodeValue(ti.field(k, rv)) - } else { - if f.d.h.ErrorIfNoField { - decErr("No matching struct field found when decoding stream map with key: %v", - rvkencname) - } else { - var nilintf0 interface{} - f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } else if mf != nil { + // store rvkencname in new []byte, as it previously shares Decoder.b, which is used in decode + name2 := rvkencname + rvkencname = make([]byte, len(rvkencname)) + copy(rvkencname, name2) + + var f interface{} + // xdebugf("kStruct: mf != nil: before decode: rvkencname: %s", rvkencname) + d.decode(&f) + // xdebugf("kStruct: mf != nil: after decode: rvkencname: %s", rvkencname) + if !mf.CodecMissingField(rvkencname, f) && d.h.ErrorIfNoField { + d.errorf("no matching struct field found when decoding stream map with key: %s ", + stringView(rvkencname)) } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) } + // keepAlive4StringView(rvkencnameB) // not needed, as reference is outside loop } - } else if currEncodedType == valueTypeArray { - containerLen := f.dd.readArrayLen() + dd.ReadMapEnd() + d.depthDecr() + } else if ctyp == valueTypeArray { + containerLen := dd.ReadArrayStart() if containerLen == 0 { + dd.ReadArrayEnd() return } - for j, si := range fti.sfip { - if j == containerLen { + d.depthIncr() + // Not much gain from doing it two ways for array. + // Arrays are not used as much for structs. + hasLen := containerLen >= 0 + var checkbreak bool + for j, si := range fti.sfiSrc { + if hasLen && j == containerLen { break } - if si.i != -1 { - f.d.decodeValue(rv.Field(int(si.i))) + if !hasLen && dd.CheckBreak() { + checkbreak = true + break + } + if elemsep { + dd.ReadArrayElem() + } + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) } else { - f.d.decEmbeddedField(rv, si.is) + d.decodeValue(sfn.field(si), nil, true) } } - if containerLen > len(fti.sfip) { + if (hasLen && containerLen > len(fti.sfiSrc)) || (!hasLen && !checkbreak) { // read remaining values and throw away - for j := len(fti.sfip); j < containerLen; j++ { - var nilintf0 interface{} - f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + for j := len(fti.sfiSrc); ; j++ { + if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) { + break + } + if elemsep { + dd.ReadArrayElem() + } + d.structFieldNotFound(j, "") } } + dd.ReadArrayEnd() + d.depthDecr() } else { - decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)", - currEncodedType) + d.errorstr(errstrOnlyMapOrArrayCanDecodeIntoStruct) + return } } -func (f *decFnInfo) kSlice(rv reflect.Value) { +func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { // A slice can be set from a map or array in stream. - currEncodedType := f.dd.currentEncodedType() + // This way, the order can be kept (as order is lost with map). + ti := f.ti + if f.seq == seqTypeChan && ti.chandir&uint8(reflect.SendDir) == 0 { + d.errorf("receive-only channel cannot be decoded") + } + dd := d.d + rtelem0 := ti.elem + ctyp := dd.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + // you can only decode bytes or string in the stream into a slice or array of bytes + if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { + d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + if f.seq == seqTypeChan { + bs2 := dd.DecodeBytes(nil, true) + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + } else { + rvbs := rv.Bytes() + bs2 := dd.DecodeBytes(rvbs, false) + // if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { + if !(len(bs2) > 0 && len(bs2) == len(rvbs) && &bs2[0] == &rvbs[0]) { + if rv.CanSet() { + rv.SetBytes(bs2) + } else if len(rvbs) > 0 && len(bs2) > 0 { + copy(rvbs, bs2) + } + } + } + return + } - switch currEncodedType { - case valueTypeBytes, valueTypeString: - if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { - if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 { - rv.SetBytes(bs2) + // array := f.seq == seqTypeChan + + slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) + + // an array can never return a nil slice. so no need to check f.array here. + if containerLenS == 0 { + if rv.CanSet() { + if f.seq == seqTypeSlice { + if rv.IsNil() { + rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) + } else { + rv.SetLen(0) + } + } else if f.seq == seqTypeChan { + if rv.IsNil() { + rv.Set(reflect.MakeChan(ti.rt, 0)) + } } - return } + slh.End() + return } - if shortCircuitReflectToFastPath && rv.CanAddr() { - switch f.ti.rtid { - case intfSliceTypId: - f.d.decSliceIntf(rv.Addr().Interface().(*[]interface{}), currEncodedType, f.array) - return - case uint64SliceTypId: - f.d.decSliceUint64(rv.Addr().Interface().(*[]uint64), currEncodedType, f.array) - return - case int64SliceTypId: - f.d.decSliceInt64(rv.Addr().Interface().(*[]int64), currEncodedType, f.array) - return - case strSliceTypId: - f.d.decSliceStr(rv.Addr().Interface().(*[]string), currEncodedType, f.array) - return + d.depthIncr() + + rtelem0Size := int(rtelem0.Size()) + rtElem0Kind := rtelem0.Kind() + rtelem0Mut := !isImmutableKind(rtElem0Kind) + rtelem := rtelem0 + rtelemkind := rtelem.Kind() + for rtelemkind == reflect.Ptr { + rtelem = rtelem.Elem() + rtelemkind = rtelem.Kind() + } + + var fn *codecFn + + var rvCanset = rv.CanSet() + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + rvlen := rv.Len() + rvcap := rv.Cap() + hasLen := containerLenS > 0 + if hasLen && f.seq == seqTypeSlice { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size())) + if rvlen <= rvcap { + if rvCanset { + rv.SetLen(rvlen) + } + } else if rvCanset { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvcap = rvlen + rvChanged = true + } else { + d.errorf("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { + reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) + } + } else if containerLenS != rvlen { + rvlen = containerLenS + if rvCanset { + rv.SetLen(rvlen) + } + // else { + // rv = rv.Slice(0, rvlen) + // rvChanged = true + // d.errorf("cannot decode into non-settable slice") + // } } } - containerLen, containerLenS := decContLens(f.dd, currEncodedType) + // consider creating new element once, and just decoding into it. + var rtelem0Zero reflect.Value + var rtelem0ZeroValid bool + var decodeAsNil bool + var j int + + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() { + if hasLen { + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size) + } else if f.seq == seqTypeSlice { + rvlen = decDefSliceCap + } else { + rvlen = decDefChanCap + } + if rvCanset { + if f.seq == seqTypeSlice { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } else { // chan + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } + } else { + d.errorf("cannot decode into non-settable slice") + } + } + slh.ElemContainerState(j) + decodeAsNil = dd.TryDecodeAsNil() + if f.seq == seqTypeChan { + if decodeAsNil { + rv.Send(reflect.Zero(rtelem0)) + continue + } + if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) { + rv9 = reflect.New(rtelem0).Elem() + } + if fn == nil { + fn = d.h.fn(rtelem, true, true) + } + d.decodeValue(rv9, fn, true) + rv.Send(rv9) + } else { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= rvlen { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, j+1) + decodeIntoBlank = true + } else { // if f.seq == seqTypeSlice + // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // append logic + varargs + var rvcap2 int + var rvErrmsg2 string + rv9, rvcap2, rvChanged, rvErrmsg2 = + expandSliceRV(rv, ti.rt, rvCanset, rtelem0Size, 1, rvlen, rvcap) + if rvErrmsg2 != "" { + d.errorf(rvErrmsg2) + } + rvlen++ + if rvChanged { + rv = rv9 + rvcap = rvcap2 + } + } + } + if decodeIntoBlank { + if !decodeAsNil { + d.swallow() + } + } else { + rv9 = rv.Index(j) + if d.h.SliceElementReset || decodeAsNil { + if !rtelem0ZeroValid { + rtelem0ZeroValid = true + rtelem0Zero = reflect.Zero(rtelem0) + } + rv9.Set(rtelem0Zero) + if decodeAsNil { + continue + } + } - // an array can never return a nil slice. so no need to check f.array here. + if fn == nil { + fn = d.h.fn(rtelem, true, true) + } + d.decodeValue(rv9, fn, true) + } + } + } + if f.seq == seqTypeSlice { + if j < rvlen { + if rv.CanSet() { + rv.SetLen(j) + } else if rvCanset { + rv = rv.Slice(0, j) + rvChanged = true + } // else { d.errorf("kSlice: cannot change non-settable slice") } + rvlen = j + } else if j == 0 && rv.IsNil() { + if rvCanset { + rv = reflect.MakeSlice(ti.rt, 0, 0) + rvChanged = true + } // else { d.errorf("kSlice: cannot change non-settable slice") } + } + } + slh.End() + + if rvChanged { // infers rvCanset=true, so it can be reset + rv0.Set(rv) + } + + d.depthDecr() +} +// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) { +// // d.decodeValueFn(rv.Slice(0, rv.Len())) +// f.kSlice(rv.Slice(0, rv.Len())) +// } + +func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { + dd := d.d + containerLen := dd.ReadMapStart() + elemsep := d.esep + ti := f.ti if rv.IsNil() { - rv.Set(reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS)) + rvlen := decInferLen(containerLen, d.h.MaxInitLen, int(ti.key.Size()+ti.elem.Size())) + rv.Set(makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + dd.ReadMapEnd() + return + } + + d.depthIncr() + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := vtype.Kind() + + var keyFn, valFn *codecFn + var ktypeLo, vtypeLo reflect.Type + + for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + + for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + + var mapGet, mapSet bool + rvvImmut := isImmutableKind(vtypeKind) + if !d.h.MapValueReset { + // if pointer, mapGet = true + // if interface, mapGet = true if !DecodeNakedAlways (else false) + // if builtin, mapGet = false + // else mapGet = true + if vtypeKind == reflect.Ptr { + mapGet = true + } else if vtypeKind == reflect.Interface { + if !d.h.InterfaceReset { + mapGet = true + } + } else if !rvvImmut { + mapGet = true + } + } + + var rvk, rvkp, rvv, rvz reflect.Value + rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk. + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen > 0 + var kstrbs []byte + + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if rvkMut || !rvkp.IsValid() { + rvkp = reflect.New(ktype) + rvk = rvkp.Elem() + } + if elemsep { + dd.ReadMapElemKey() + } + // if false && dd.TryDecodeAsNil() { // nil cannot be a map key, so disregard this block + // // Previously, if a nil key, we just ignored the mapped value and continued. + // // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil} + // // to be an empty map. + // // Instead, we treat a nil key as the zero value of the type. + // rvk.Set(reflect.Zero(ktype)) + // } else if ktypeIsString { + if ktypeIsString { + kstrbs = dd.DecodeStringAsBytes() + rvk.SetString(stringView(kstrbs)) + // NOTE: if doing an insert, you MUST use a real string (not stringview) + } else { + if keyFn == nil { + keyFn = d.h.fn(ktypeLo, true, true) + } + d.decodeValue(rvk, keyFn, true) + } + // special case if a byte array. + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() { + if rvk2.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk2.Bytes())) + } else { + rvk = rvk2 + } + } + } + + if elemsep { + dd.ReadMapElemValue() + } + + // Brittle, but OK per TryDecodeAsNil() contract. + // i.e. TryDecodeAsNil never shares slices with other decDriver procedures + if dd.TryDecodeAsNil() { + if ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if d.h.DeleteOnNilMapValue { + rv.SetMapIndex(rvk, reflect.Value{}) + } else { + rv.SetMapIndex(rvk, reflect.Zero(vtype)) + } + continue + } + + mapSet = true // set to false if u do a get, and its a non-nil pointer + if mapGet { + // mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable. + rvv = rv.MapIndex(rvk) + if !rvv.IsValid() { + rvv = reflect.New(vtype).Elem() + } else if vtypeKind == reflect.Ptr { + if rvv.IsNil() { + rvv = reflect.New(vtype).Elem() + } else { + mapSet = false + } + } else if vtypeKind == reflect.Interface { + // not addressable, and thus not settable. + // e MUST create a settable/addressable variant + rvv2 := reflect.New(rvv.Type()).Elem() + if !rvv.IsNil() { + rvv2.Set(rvv) + } + rvv = rvv2 + } + // else it is ~mutable, and we can just decode into it directly + } else if rvvImmut { + if !rvz.IsValid() { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } else { + rvv = reflect.New(vtype).Elem() + } + + // We MUST be done with the stringview of the key, before decoding the value + // so that we don't bastardize the reused byte array. + if mapSet && ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if valFn == nil { + valFn = d.h.fn(vtypeLo, true, true) + } + d.decodeValue(rvv, valFn, true) + // d.decodeValueFn(rvv, valFn) + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + // if ktypeIsString { + // // keepAlive4StringView(kstrbs) // not needed, as reference is outside loop + // } + } + + dd.ReadMapEnd() + + d.depthDecr() +} + +// decNaked is used to keep track of the primitives decoded. +// Without it, we would have to decode each primitive and wrap it +// in an interface{}, causing an allocation. +// In this model, the primitives are decoded in a "pseudo-atomic" fashion, +// so we can rest assured that no other decoding happens while these +// primitives are being decoded. +// +// maps and arrays are not handled by this mechanism. +// However, RawExt is, and we accommodate for extensions that decode +// RawExt from DecodeNaked, but need to decode the value subsequently. +// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. +// +// However, decNaked also keeps some arrays of default maps and slices +// used in DecodeNaked. This way, we can get a pointer to it +// without causing a new heap allocation. +// +// kInterfaceNaked will ensure that there is no allocation for the common +// uses. + +type decNaked struct { + // r RawExt // used for RawExt, uint, []byte. + + // primitives below + u uint64 + i int64 + f float64 + l []byte + s string + + // ---- cpu cache line boundary? + t time.Time + b bool + + // state + v valueType + _ [6]bool // padding + + // ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above + // + // _ [3]uint64 // padding +} + +// func (n *decNaked) init() { +// n.ru = reflect.ValueOf(&n.u).Elem() +// n.ri = reflect.ValueOf(&n.i).Elem() +// n.rf = reflect.ValueOf(&n.f).Elem() +// n.rl = reflect.ValueOf(&n.l).Elem() +// n.rs = reflect.ValueOf(&n.s).Elem() +// n.rt = reflect.ValueOf(&n.t).Elem() +// n.rb = reflect.ValueOf(&n.b).Elem() +// // n.rr[] = reflect.ValueOf(&n.) +// } + +// type decNakedPooler struct { +// n *decNaked +// nsp *sync.Pool +// } + +// // naked must be called before each call to .DecodeNaked, as they will use it. +// func (d *decNakedPooler) naked() *decNaked { +// if d.n == nil { +// // consider one of: +// // - get from sync.Pool (if GC is frequent, there's no value here) +// // - new alloc (safest. only init'ed if it a naked decode will be done) +// // - field in Decoder (makes the Decoder struct very big) +// // To support using a decoder where a DecodeNaked is not needed, +// // we prefer #1 or #2. +// // d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool +// // d.n.init() +// var v interface{} +// d.nsp, v = pool.decNaked() +// d.n = v.(*decNaked) +// } +// return d.n +// } + +// func (d *decNakedPooler) end() { +// if d.n != nil { +// // if n != nil, then nsp != nil (they are always set together) +// d.nsp.Put(d.n) +// d.n, d.nsp = nil, nil +// } +// } + +// type rtid2rv struct { +// rtid uintptr +// rv reflect.Value +// } + +// -------------- + +type decReaderSwitch struct { + rb bytesDecReader + // ---- cpu cache line boundary? + ri *ioDecReader + bi *bufioDecReader + + mtr, str bool // whether maptype or slicetype are known types + + be bool // is binary encoding + js bool // is json handle + jsms bool // is json handle, and MapKeyAsString + esep bool // has elem separators + + // typ entryType + bytes bool // is bytes reader + bufio bool // is this a bufioDecReader? +} + +// numread, track and stopTrack are always inlined, as they just check int fields, etc. + +/* +func (z *decReaderSwitch) numread() int { + switch z.typ { + case entryTypeBytes: + return z.rb.numread() + case entryTypeIo: + return z.ri.numread() + default: + return z.bi.numread() + } +} +func (z *decReaderSwitch) track() { + switch z.typ { + case entryTypeBytes: + z.rb.track() + case entryTypeIo: + z.ri.track() + default: + z.bi.track() + } +} +func (z *decReaderSwitch) stopTrack() []byte { + switch z.typ { + case entryTypeBytes: + return z.rb.stopTrack() + case entryTypeIo: + return z.ri.stopTrack() + default: + return z.bi.stopTrack() + } +} + +func (z *decReaderSwitch) unreadn1() { + switch z.typ { + case entryTypeBytes: + z.rb.unreadn1() + case entryTypeIo: + z.ri.unreadn1() + default: + z.bi.unreadn1() + } +} +func (z *decReaderSwitch) readx(n int) []byte { + switch z.typ { + case entryTypeBytes: + return z.rb.readx(n) + case entryTypeIo: + return z.ri.readx(n) + default: + return z.bi.readx(n) + } +} +func (z *decReaderSwitch) readb(s []byte) { + switch z.typ { + case entryTypeBytes: + z.rb.readb(s) + case entryTypeIo: + z.ri.readb(s) + default: + z.bi.readb(s) + } +} +func (z *decReaderSwitch) readn1() uint8 { + switch z.typ { + case entryTypeBytes: + return z.rb.readn1() + case entryTypeIo: + return z.ri.readn1() + default: + return z.bi.readn1() + } +} +func (z *decReaderSwitch) skip(accept *bitset256) (token byte) { + switch z.typ { + case entryTypeBytes: + return z.rb.skip(accept) + case entryTypeIo: + return z.ri.skip(accept) + default: + return z.bi.skip(accept) + } +} +func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) { + switch z.typ { + case entryTypeBytes: + return z.rb.readTo(in, accept) + case entryTypeIo: + return z.ri.readTo(in, accept) + default: + return z.bi.readTo(in, accept) + } +} +func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) { + switch z.typ { + case entryTypeBytes: + return z.rb.readUntil(in, stop) + case entryTypeIo: + return z.ri.readUntil(in, stop) + default: + return z.bi.readUntil(in, stop) + } +} + +*/ + +// the if/else-if/else block is expensive to inline. +// Each node of this construct costs a lot and dominates the budget. +// Best to only do an if fast-path else block (so fast-path is inlined). +// This is irrespective of inlineExtraCallCost set in $GOROOT/src/cmd/compile/internal/gc/inl.go +// +// In decReaderSwitch methods below, we delegate all IO functions into their own methods. +// This allows for the inlining of the common path when z.bytes=true. +// Go 1.12+ supports inlining methods with up to 1 inlined function (or 2 if no other constructs). + +func (z *decReaderSwitch) numread() uint { + if z.bytes { + return z.rb.numread() + } else if z.bufio { + return z.bi.numread() + } else { + return z.ri.numread() + } +} +func (z *decReaderSwitch) track() { + if z.bytes { + z.rb.track() + } else if z.bufio { + z.bi.track() + } else { + z.ri.track() + } +} +func (z *decReaderSwitch) stopTrack() []byte { + if z.bytes { + return z.rb.stopTrack() + } else if z.bufio { + return z.bi.stopTrack() + } else { + return z.ri.stopTrack() + } +} + +// func (z *decReaderSwitch) unreadn1() { +// if z.bytes { +// z.rb.unreadn1() +// } else { +// z.unreadn1IO() +// } +// } +// func (z *decReaderSwitch) unreadn1IO() { +// if z.bufio { +// z.bi.unreadn1() +// } else { +// z.ri.unreadn1() +// } +// } + +func (z *decReaderSwitch) unreadn1() { + if z.bytes { + z.rb.unreadn1() + } else if z.bufio { + z.bi.unreadn1() + } else { + z.ri.unreadn1() // not inlined + } +} + +func (z *decReaderSwitch) readx(n uint) []byte { + if z.bytes { + return z.rb.readx(n) + } + return z.readxIO(n) +} +func (z *decReaderSwitch) readxIO(n uint) []byte { + if z.bufio { + return z.bi.readx(n) + } + return z.ri.readx(n) +} + +func (z *decReaderSwitch) readb(s []byte) { + if z.bytes { + z.rb.readb(s) + } else { + z.readbIO(s) + } +} + +//go:noinline - fallback for io, ensures z.bytes path is inlined +func (z *decReaderSwitch) readbIO(s []byte) { + if z.bufio { + z.bi.readb(s) + } else { + z.ri.readb(s) + } +} + +func (z *decReaderSwitch) readn1() uint8 { + if z.bytes { + return z.rb.readn1() + } + return z.readn1IO() +} +func (z *decReaderSwitch) readn1IO() uint8 { + if z.bufio { + return z.bi.readn1() } + return z.ri.readn1() +} - if containerLen == 0 { - return +func (z *decReaderSwitch) skip(accept *bitset256) (token byte) { + if z.bytes { + return z.rb.skip(accept) } - - if rvcap, rvlen := rv.Len(), rv.Cap(); containerLenS > rvcap { - if f.array { // !rv.CanSet() - decErr(msgDecCannotExpandArr, rvcap, containerLenS) - } - rvn := reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS) - if rvlen > 0 { - reflect.Copy(rvn, rv) - } - rv.Set(rvn) - } else if containerLenS > rvlen { - rv.SetLen(containerLenS) + return z.skipIO(accept) +} +func (z *decReaderSwitch) skipIO(accept *bitset256) (token byte) { + if z.bufio { + return z.bi.skip(accept) } + return z.ri.skip(accept) +} - for j := 0; j < containerLenS; j++ { - f.d.decodeValue(rv.Index(j)) +func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) { + if z.bytes { + return z.rb.readToNoInput(accept) // z.rb.readTo(in, accept) } + return z.readToIO(in, accept) } -func (f *decFnInfo) kArray(rv reflect.Value) { - // f.d.decodeValue(rv.Slice(0, rv.Len())) - f.kSlice(rv.Slice(0, rv.Len())) +//go:noinline - fallback for io, ensures z.bytes path is inlined +func (z *decReaderSwitch) readToIO(in []byte, accept *bitset256) (out []byte) { + if z.bufio { + return z.bi.readTo(in, accept) + } + return z.ri.readTo(in, accept) +} +func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) { + if z.bytes { + return z.rb.readUntilNoInput(stop) + } + return z.readUntilIO(in, stop) } -func (f *decFnInfo) kMap(rv reflect.Value) { - if shortCircuitReflectToFastPath && rv.CanAddr() { - switch f.ti.rtid { - case mapStrIntfTypId: - f.d.decMapStrIntf(rv.Addr().Interface().(*map[string]interface{})) - return - case mapIntfIntfTypId: - f.d.decMapIntfIntf(rv.Addr().Interface().(*map[interface{}]interface{})) - return - case mapInt64IntfTypId: - f.d.decMapInt64Intf(rv.Addr().Interface().(*map[int64]interface{})) - return - case mapUint64IntfTypId: - f.d.decMapUint64Intf(rv.Addr().Interface().(*map[uint64]interface{})) - return - } +func (z *decReaderSwitch) readUntilIO(in []byte, stop byte) (out []byte) { + if z.bufio { + return z.bi.readUntil(in, stop) } + return z.ri.readUntil(in, stop) +} - containerLen := f.dd.readMapLen() +// Decoder reads and decodes an object from an input stream in a supported format. +// +// Decoder is NOT safe for concurrent use i.e. a Decoder cannot be used +// concurrently in multiple goroutines. +// +// However, as Decoder could be allocation heavy to initialize, a Reset method is provided +// so its state can be reused to decode new input streams repeatedly. +// This is the idiomatic way to use. +type Decoder struct { + panicHdl + // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. + // Try to put things that go together to fit within a cache line (8 words). - if rv.IsNil() { - rv.Set(reflect.MakeMap(f.ti.rt)) - } + d decDriver - if containerLen == 0 { - return - } + // NOTE: Decoder shouldn't call it's read methods, + // as the handler MAY need to do some coordination. + r *decReaderSwitch - ktype, vtype := f.ti.rt.Key(), f.ti.rt.Elem() - ktypeId := reflect.ValueOf(ktype).Pointer() - for j := 0; j < containerLen; j++ { - rvk := reflect.New(ktype).Elem() - f.d.decodeValue(rvk) + // bi *bufioDecReader + // cache the mapTypeId and sliceTypeId for faster comparisons + mtid uintptr + stid uintptr - // special case if a byte array. - // if ktype == intfTyp { - if ktypeId == intfTypId { - rvk = rvk.Elem() - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(string(rvk.Bytes())) - } - } - rvv := rv.MapIndex(rvk) - if !rvv.IsValid() || !rvv.CanSet() { - rvv = reflect.New(vtype).Elem() - } + hh Handle + h *BasicHandle - f.d.decodeValue(rvv) - rv.SetMapIndex(rvk, rvv) - } -} + // ---- cpu cache line boundary? + decReaderSwitch -// ---------------------------------------- + // ---- cpu cache line boundary? + n decNaked -type decFn struct { - i *decFnInfo - f func(*decFnInfo, reflect.Value) -} + // cr containerStateRecv + err error -// A Decoder reads and decodes an object from an input stream in the codec format. -type Decoder struct { - r decReader - d decDriver - h *BasicHandle - f map[uintptr]decFn - x []uintptr - s []decFn + depth int16 + maxdepth int16 + + _ [4]uint8 // padding + + is map[string]string // used for interning strings + + // ---- cpu cache line boundary? + b [decScratchByteArrayLen]byte // scratch buffer, used by Decoder and xxxEncDrivers + + // padding - false sharing help // modify 232 if Decoder struct changes. + // _ [cacheLineSize - 232%cacheLineSize]byte } // NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. // -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Reader, bytes.Buffer). +// For efficiency, Users are encouraged to configure ReaderBufferSize on the handle +// OR pass in a memory buffered reader (eg bufio.Reader, bytes.Buffer). func NewDecoder(r io.Reader, h Handle) *Decoder { - z := ioDecReader{ - r: r, - } - z.br, _ = r.(io.ByteReader) - return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} + d := newDecoder(h) + d.Reset(r) + return d } // NewDecoderBytes returns a Decoder which efficiently decodes directly // from a byte slice with zero copying. func NewDecoderBytes(in []byte, h Handle) *Decoder { - z := bytesDecReader{ - b: in, - a: len(in), + d := newDecoder(h) + d.ResetBytes(in) + return d +} + +// var defaultDecNaked decNaked + +func newDecoder(h Handle) *Decoder { + d := &Decoder{h: basicHandle(h), err: errDecoderNotInitialized} + d.bytes = true + if useFinalizers { + runtime.SetFinalizer(d, (*Decoder).finalize) + // xdebugf(">>>> new(Decoder) with finalizer") + } + d.r = &d.decReaderSwitch + d.hh = h + d.be = h.isBinary() + // NOTE: do not initialize d.n here. It is lazily initialized in d.naked() + var jh *JsonHandle + jh, d.js = h.(*JsonHandle) + if d.js { + d.jsms = jh.MapKeyAsString + } + d.esep = d.hh.hasElemSeparators() + if d.h.InternString { + d.is = make(map[string]string, 32) + } + d.d = h.newDecDriver(d) + // d.cr, _ = d.d.(containerStateRecv) + return d +} + +func (d *Decoder) resetCommon() { + // d.r = &d.decReaderSwitch + d.d.reset() + d.err = nil + d.depth = 0 + d.maxdepth = d.h.MaxDepth + if d.maxdepth <= 0 { + d.maxdepth = decDefMaxDepth + } + // reset all things which were cached from the Handle, but could change + d.mtid, d.stid = 0, 0 + d.mtr, d.str = false, false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + d.mtr = fastpathAV.index(d.mtid) != -1 + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + d.str = fastpathAV.index(d.stid) != -1 + } +} + +// Reset the Decoder with a new Reader to decode from, +// clearing all state from last run(s). +func (d *Decoder) Reset(r io.Reader) { + if r == nil { + return + } + d.bytes = false + // d.typ = entryTypeUnset + if d.h.ReaderBufferSize > 0 { + if d.bi == nil { + d.bi = new(bufioDecReader) + } + d.bi.reset(r, d.h.ReaderBufferSize) + // d.r = d.bi + // d.typ = entryTypeBufio + d.bufio = true + } else { + // d.ri.x = &d.b + // d.s = d.sa[:0] + if d.ri == nil { + d.ri = new(ioDecReader) + } + d.ri.reset(r) + // d.r = d.ri + // d.typ = entryTypeIo + d.bufio = false + } + d.resetCommon() +} + +// ResetBytes resets the Decoder with a new []byte to decode from, +// clearing all state from last run(s). +func (d *Decoder) ResetBytes(in []byte) { + if in == nil { + return } - return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} + d.bytes = true + d.bufio = false + // d.typ = entryTypeBytes + d.rb.reset(in) + // d.r = &d.rb + d.resetCommon() +} + +func (d *Decoder) naked() *decNaked { + return &d.n } // Decode decodes the stream from reader and stores the result in the @@ -609,7 +2470,8 @@ func NewDecoderBytes(in []byte, h Handle) *Decoder { // - Else decode it based on its reflect.Kind // // There are some special rules when decoding into containers (slice/array/map/struct). -// Decode will typically use the stream contents to UPDATE the container. +// Decode will typically use the stream contents to UPDATE the container i.e. the values +// in these containers will not be zero'ed before decoding. // - A map can be decoded from a stream map, by updating matching keys. // - A slice can be decoded from a stream array, // by updating the first n elements, where n is length of the stream. @@ -619,430 +2481,629 @@ func NewDecoderBytes(in []byte, h Handle) *Decoder { // - A struct can be decoded from a stream array, // by updating fields as they occur in the struct (by index). // -// When decoding a stream map or array with length of 0 into a nil map or slice, +// This in-place update maintains consistency in the decoding philosophy (i.e. we ALWAYS update +// in place by default). However, the consequence of this is that values in slices or maps +// which are not zero'ed before hand, will have part of the prior values in place after decode +// if the stream doesn't contain an update for those parts. +// +// This in-place update can be disabled by configuring the MapValueReset and SliceElementReset +// decode options available on every handle. +// +// Furthermore, when decoding a stream map or array with length of 0 into a nil map or slice, // we reset the destination map or slice to a zero-length value. // // However, when decoding a stream nil, we reset the destination container // to its "zero" value (e.g. nil for slice/map, etc). // +// Note: we allow nil values in the stream anywhere except for map keys. +// A nil value in the encoded stream where a map key is expected is treated as an error. func (d *Decoder) Decode(v interface{}) (err error) { - defer panicToErr(&err) - d.decode(v) + // tried to use closure, as runtime optimizes defer with no params. + // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc). + // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139 + // defer func() { d.deferred(&err) }() + // { x, y := d, &err; defer func() { x.deferred(y) }() } + if d.err != nil { + return d.err + } + if recoverPanicToErr { + defer func() { + if x := recover(); x != nil { + panicValToErr(d, x, &d.err) + err = d.err + } + }() + } + + // defer d.deferred(&err) + d.mustDecode(v) return } -func (d *Decoder) decode(iv interface{}) { - d.d.initReadNext() +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) MustDecode(v interface{}) { + if d.err != nil { + panic(d.err) + } + d.mustDecode(v) +} - switch v := iv.(type) { - case nil: - decErr("Cannot decode into nil.") +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) mustDecode(v interface{}) { + // TODO: Top-level: ensure that v is a pointer and not nil. + if d.d.TryDecodeAsNil() { + setZero(v) + return + } + if d.bi == nil { + d.decode(v) + return + } - case reflect.Value: - d.chkPtrValue(v) - d.decodeValue(v.Elem()) + d.bi.calls++ + d.decode(v) + // xprintf.(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn) + d.bi.calls-- + if !d.h.ExplicitRelease && d.bi.calls == 0 { + d.bi.release() + } +} + +// func (d *Decoder) deferred(err1 *error) { +// if recoverPanicToErr { +// if x := recover(); x != nil { +// panicValToErr(d, x, err1) +// panicValToErr(d, x, &d.err) +// } +// } +// } + +//go:noinline -- as it is run by finalizer +func (d *Decoder) finalize() { + // xdebugf("finalizing Decoder") + d.Release() +} + +// Release releases shared (pooled) resources. +// +// It is important to call Release() when done with a Decoder, so those resources +// are released instantly for use by subsequently created Decoders. +// +// By default, Release() is automatically called unless the option ExplicitRelease is set. +func (d *Decoder) Release() { + if d.bi != nil { + d.bi.release() + } + // d.decNakedPooler.end() +} + +// // this is not a smart swallow, as it allocates objects and does unnecessary work. +// func (d *Decoder) swallowViaHammer() { +// var blank interface{} +// d.decodeValueNoFn(reflect.ValueOf(&blank).Elem()) +// } + +func (d *Decoder) swallow() { + // smarter decode that just swallows the content + dd := d.d + if dd.TryDecodeAsNil() { + return + } + elemsep := d.esep + switch dd.ContainerType() { + case valueTypeMap: + containerLen := dd.ReadMapStart() + d.depthIncr() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + // if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break} + if elemsep { + dd.ReadMapElemKey() + } + d.swallow() + if elemsep { + dd.ReadMapElemValue() + } + d.swallow() + } + dd.ReadMapEnd() + d.depthDecr() + case valueTypeArray: + containerLen := dd.ReadArrayStart() + d.depthIncr() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadArrayElem() + } + d.swallow() + } + dd.ReadArrayEnd() + d.depthDecr() + case valueTypeBytes: + dd.DecodeBytes(d.b[:], true) + case valueTypeString: + dd.DecodeStringAsBytes() + default: + // these are all primitives, which we can get from decodeNaked + // if RawExt using Value, complete the processing. + n := d.naked() + dd.DecodeNaked() + if n.v == valueTypeExt && n.l == nil { + var v2 interface{} + d.decode(&v2) + } + } +} +func setZero(iv interface{}) { + if iv == nil || definitelyNil(iv) { + return + } + var canDecode bool + switch v := iv.(type) { case *string: - *v = d.d.decodeString() + *v = "" case *bool: - *v = d.d.decodeBool() + *v = false case *int: - *v = int(d.d.decodeInt(intBitsize)) + *v = 0 case *int8: - *v = int8(d.d.decodeInt(8)) + *v = 0 case *int16: - *v = int16(d.d.decodeInt(16)) + *v = 0 case *int32: - *v = int32(d.d.decodeInt(32)) + *v = 0 case *int64: - *v = d.d.decodeInt(64) + *v = 0 case *uint: - *v = uint(d.d.decodeUint(uintBitsize)) + *v = 0 case *uint8: - *v = uint8(d.d.decodeUint(8)) + *v = 0 case *uint16: - *v = uint16(d.d.decodeUint(16)) + *v = 0 case *uint32: - *v = uint32(d.d.decodeUint(32)) + *v = 0 case *uint64: - *v = d.d.decodeUint(64) + *v = 0 case *float32: - *v = float32(d.d.decodeFloat(true)) + *v = 0 case *float64: - *v = d.d.decodeFloat(false) - case *[]byte: - *v, _ = d.d.decodeBytes(*v) - - case *[]interface{}: - d.decSliceIntf(v, valueTypeInvalid, false) - case *[]uint64: - d.decSliceUint64(v, valueTypeInvalid, false) - case *[]int64: - d.decSliceInt64(v, valueTypeInvalid, false) - case *[]string: - d.decSliceStr(v, valueTypeInvalid, false) - case *map[string]interface{}: - d.decMapStrIntf(v) - case *map[interface{}]interface{}: - d.decMapIntfIntf(v) - case *map[uint64]interface{}: - d.decMapUint64Intf(v) - case *map[int64]interface{}: - d.decMapInt64Intf(v) - - case *interface{}: - d.decodeValue(reflect.ValueOf(iv).Elem()) - + *v = 0 + case *[]uint8: + *v = nil + case *Raw: + *v = nil + case *time.Time: + *v = time.Time{} + case reflect.Value: + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? default: - rv := reflect.ValueOf(iv) - d.chkPtrValue(rv) - d.decodeValue(rv.Elem()) + if !fastpathDecodeSetZeroTypeSwitch(iv) { + v := reflect.ValueOf(iv) + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + } } } -func (d *Decoder) decodeValue(rv reflect.Value) { - d.d.initReadNext() +func (d *Decoder) decode(iv interface{}) { + // a switch with only concrete types can be optimized. + // consequently, we deal with nil and interfaces outside the switch. - if d.d.tryDecodeAsNil() { - // If value in stream is nil, set the dereferenced value to its "zero" value (if settable) - if rv.Kind() == reflect.Ptr { - if !rv.IsNil() { - rv.Set(reflect.Zero(rv.Type())) - } - return - } - // for rv.Kind() == reflect.Ptr { - // rv = rv.Elem() - // } - if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid - rv.Set(reflect.Zero(rv.Type())) - } + if iv == nil { + d.errorstr(errstrCannotDecodeIntoNil) return } - // If stream is not containing a nil value, then we can deref to the base - // non-pointer value, and decode into that. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) + switch v := iv.(type) { + // case nil: + // case Selfer: + case reflect.Value: + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, true) + + case *string: + *v = d.d.DecodeString() + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *float32: + f64 := d.d.DecodeFloat64() + if chkOvf.Float32(f64) { + d.errorf("float32 overflow: %v", f64) } - rv = rv.Elem() - } + *v = float32(f64) + case *float64: + *v = d.d.DecodeFloat64() + case *[]uint8: + *v = d.d.DecodeBytes(*v, false) + case []uint8: + b := d.d.DecodeBytes(v, false) + if !(len(b) > 0 && len(b) == len(v) && &b[0] == &v[0]) { + copy(v, b) + } + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem(), nil, true) + // d.decodeValueNotNil(reflect.ValueOf(iv).Elem()) - // retrieve or register a focus'ed function for this type - // to eliminate need to do the retrieval multiple times + default: + if v, ok := iv.(Selfer); ok { + v.CodecDecodeSelf(d) + } else if !fastpathDecodeTypeSwitch(iv, d) { + v := reflect.ValueOf(iv) + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, false) + // d.decodeValueFallback(v) + } + } +} - // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } - var fn decFn - var ok bool - if useMapForCodecCache { - fn, ok = d.f[rtid] - } else { - for i, v := range d.x { - if v == rtid { - fn, ok = d.s[i], true +func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, chkAll bool) { + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + var rvp reflect.Value + var rvpValid bool + if rv.Kind() == reflect.Ptr { + rvpValid = true + for { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + if rv.Kind() != reflect.Ptr { break } } } - if !ok { - // debugf("\tCreating new dec fn for type: %v\n", rt) - fi := decFnInfo{ti: getTypeInfo(rtid, rt), d: d, dd: d.d} - fn.i = &fi - // An extension can be registered for any type, regardless of the Kind - // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. - // - // We can't check if it's an extension byte here first, because the user may have - // registered a pointer or non-pointer type, meaning we may have to recurse first - // before matching a mapped type, even though the extension byte is already detected. - // - // NOTE: if decoding into a nil interface{}, we return a non-nil - // value except even if the container registers a length of 0. - if rtid == rawExtTypId { - fn.f = (*decFnInfo).rawExt - } else if d.d.isBuiltinType(rtid) { - fn.f = (*decFnInfo).builtin - } else if xfTag, xfFn := d.h.getDecodeExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfTag, xfFn - fn.f = (*decFnInfo).ext - } else if supportBinaryMarshal && fi.ti.unm { - fn.f = (*decFnInfo).binaryMarshal + + if fn == nil { + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = d.h.fn(rv.Type(), chkAll, true) // chkAll, chkAll) + } + if fn.i.addrD { + if rvpValid { + fn.fd(d, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fd(d, &fn.i, rv.Addr()) + } else if !fn.i.addrF { + fn.fd(d, &fn.i, rv) } else { - switch rk := rt.Kind(); rk { - case reflect.String: - fn.f = (*decFnInfo).kString - case reflect.Bool: - fn.f = (*decFnInfo).kBool - case reflect.Int: - fn.f = (*decFnInfo).kInt - case reflect.Int64: - fn.f = (*decFnInfo).kInt64 - case reflect.Int32: - fn.f = (*decFnInfo).kInt32 - case reflect.Int8: - fn.f = (*decFnInfo).kInt8 - case reflect.Int16: - fn.f = (*decFnInfo).kInt16 - case reflect.Float32: - fn.f = (*decFnInfo).kFloat32 - case reflect.Float64: - fn.f = (*decFnInfo).kFloat64 - case reflect.Uint8: - fn.f = (*decFnInfo).kUint8 - case reflect.Uint64: - fn.f = (*decFnInfo).kUint64 - case reflect.Uint: - fn.f = (*decFnInfo).kUint - case reflect.Uint32: - fn.f = (*decFnInfo).kUint32 - case reflect.Uint16: - fn.f = (*decFnInfo).kUint16 - // case reflect.Ptr: - // fn.f = (*decFnInfo).kPtr - case reflect.Interface: - fn.f = (*decFnInfo).kInterface - case reflect.Struct: - fn.f = (*decFnInfo).kStruct - case reflect.Slice: - fn.f = (*decFnInfo).kSlice - case reflect.Array: - fi.array = true - fn.f = (*decFnInfo).kArray - case reflect.Map: - fn.f = (*decFnInfo).kMap - default: - fn.f = (*decFnInfo).kErr - } + d.errorf("cannot decode into a non-pointer value") } - if useMapForCodecCache { - if d.f == nil { - d.f = make(map[uintptr]decFn, 16) - } - d.f[rtid] = fn - } else { - d.s = append(d.s, fn) - d.x = append(d.x, rtid) + } else { + fn.fd(d, &fn.i, rv) + } + // return rv +} + +func (d *Decoder) structFieldNotFound(index int, rvkencname string) { + // NOTE: rvkencname may be a stringView, so don't pass it to another function. + if d.h.ErrorIfNoField { + if index >= 0 { + d.errorf("no matching struct field found when decoding stream array at index %v", index) + return + } else if rvkencname != "" { + d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) + return } } + d.swallow() +} - fn.f(fn.i, rv) +func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { + if d.h.ErrorIfNoArrayExpand { + d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) + } +} +func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) { + switch rv.Kind() { + case reflect.Array: + return rv, rv.CanAddr() + case reflect.Ptr: + if !rv.IsNil() { + return rv.Elem(), true + } + case reflect.Slice, reflect.Chan, reflect.Map: + if !rv.IsNil() { + return rv, true + } + } return } -func (d *Decoder) chkPtrValue(rv reflect.Value) { - // We can only decode into a non-nil pointer - if rv.Kind() == reflect.Ptr && !rv.IsNil() { +func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) { + // decode can take any reflect.Value that is a inherently addressable i.e. + // - array + // - non-nil chan (we will SEND to it) + // - non-nil slice (we will set its elements) + // - non-nil map (we will put into it) + // - non-nil pointer (we can "update" it) + rv2, canDecode := isDecodeable(rv) + if canDecode { return } if !rv.IsValid() { - decErr("Cannot decode into a zero (ie invalid) reflect.Value") + d.errorstr(errstrCannotDecodeIntoNil) + return } if !rv.CanInterface() { - decErr("Cannot decode into a value without an interface: %v", rv) + d.errorf("cannot decode into a value without an interface: %v", rv) + return } - rvi := rv.Interface() - decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", - rv.Kind(), rvi, rvi) + rvi := rv2i(rv) + rvk := rv.Kind() + d.errorf("cannot decode into value of kind: %v, type: %T, %v", rvk, rvi, rvi) + return } -func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) { - // d.decodeValue(rv.FieldByIndex(index)) - // nil pointers may be here; so reproduce FieldByIndex logic + enhancements - for _, j := range index { - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - // If a pointer, it must be a pointer to struct (based on typeInfo contract) - rv = rv.Elem() - } - rv = rv.Field(j) +func (d *Decoder) depthIncr() { + d.depth++ + if d.depth >= d.maxdepth { + panic(errMaxDepthExceeded) + } +} + +func (d *Decoder) depthDecr() { + d.depth-- +} + +// Possibly get an interned version of a string +// +// This should mostly be used for map keys, where the key type is string. +// This is because keys of a map/struct are typically reused across many objects. +func (d *Decoder) string(v []byte) (s string) { + if d.is == nil { + return string(v) // don't return stringView, as we need a real string here. + } + s, ok := d.is[string(v)] // no allocation here, per go implementation + if !ok { + s = string(v) // new allocation here + d.is[s] = s } - d.decodeValue(rv) + return s +} + +// nextValueBytes returns the next value in the stream as a set of bytes. +func (d *Decoder) nextValueBytes() (bs []byte) { + d.d.uncacheRead() + d.r.track() + d.swallow() + bs = d.r.stopTrack() + return +} + +func (d *Decoder) rawBytes() []byte { + // ensure that this is not a view into the bytes + // i.e. make new copy always. + bs := d.nextValueBytes() + bs2 := make([]byte, len(bs)) + copy(bs2, bs) + return bs2 +} + +func (d *Decoder) wrapErr(v interface{}, err *error) { + *err = decodeError{codecError: codecError{name: d.hh.Name(), err: v}, pos: int(d.r.numread())} +} + +// NumBytesRead returns the number of bytes read +func (d *Decoder) NumBytesRead() int { + return int(d.r.numread()) } // -------------------------------------------------- -// short circuit functions for common maps and slices - -func (d *Decoder) decSliceIntf(v *[]interface{}, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]interface{}, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]interface{}, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - d.decode(&s[j]) - } - *v = s -} - -func (d *Decoder) decSliceInt64(v *[]int64, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]int64, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]int64, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeInt(intBitsize) - } - *v = s -} - -func (d *Decoder) decSliceUint64(v *[]uint64, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]uint64, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]uint64, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeUint(intBitsize) - } - *v = s -} - -func (d *Decoder) decSliceStr(v *[]string, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]string, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]string, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeString() - } - *v = s -} - -func (d *Decoder) decMapIntfIntf(v *map[interface{}]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[interface{}]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - var mk interface{} - d.decode(&mk) - // special case if a byte array. - if bv, bok := mk.([]byte); bok { - mk = string(bv) - } - mv := m[mk] - d.decode(&mv) - m[mk] = mv +// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. +// A slice can be set from a map or array in stream. This supports the MapBySlice interface. +type decSliceHelper struct { + d *Decoder + // ct valueType + array bool +} + +func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { + dd := d.d + ctyp := dd.ContainerType() + switch ctyp { + case valueTypeArray: + x.array = true + clen = dd.ReadArrayStart() + case valueTypeMap: + clen = dd.ReadMapStart() * 2 + default: + d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) } + // x.ct = ctyp + x.d = d + return } -func (d *Decoder) decMapInt64Intf(v *map[int64]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[int64]interface{}, containerLen) - *v = m +func (x decSliceHelper) End() { + if x.array { + x.d.d.ReadArrayEnd() + } else { + x.d.d.ReadMapEnd() } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeInt(intBitsize) - mv := m[mk] - d.decode(&mv) - m[mk] = mv +} + +func (x decSliceHelper) ElemContainerState(index int) { + if x.array { + x.d.d.ReadArrayElem() + } else if index%2 == 0 { + x.d.d.ReadMapElemKey() + } else { + x.d.d.ReadMapElemValue() } } -func (d *Decoder) decMapUint64Intf(v *map[uint64]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[uint64]interface{}, containerLen) - *v = m +func decByteSlice(r *decReaderSwitch, clen, maxInitLen int, bs []byte) (bsOut []byte) { + if clen == 0 { + return zeroByteSlice } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeUint(intBitsize) - mv := m[mk] - d.decode(&mv) - m[mk] = mv + if len(bs) == clen { + bsOut = bs + r.readb(bsOut) + } else if cap(bs) >= clen { + bsOut = bs[:clen] + r.readb(bsOut) + } else { + // bsOut = make([]byte, clen) + len2 := decInferLen(clen, maxInitLen, 1) + bsOut = make([]byte, len2) + r.readb(bsOut) + for len2 < clen { + len3 := decInferLen(clen-len2, maxInitLen, 1) + bs3 := bsOut + bsOut = make([]byte, len2+len3) + copy(bsOut, bs3) + r.readb(bsOut[len2:]) + len2 += len3 + } } + return } -func (d *Decoder) decMapStrIntf(v *map[string]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[string]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeString() - mv := m[mk] - d.decode(&mv) - m[mk] = mv +// func decByteSliceZeroCopy(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { +// if _, ok := r.(*bytesDecReader); ok && clen <= maxInitLen { +// return r.readx(clen) +// } +// return decByteSlice(r, clen, maxInitLen, bs) +// } + +func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { + if xlen := len(in); xlen > 0 { + if isBytesReader || xlen <= scratchByteArrayLen { + if cap(dest) >= xlen { + out = dest[:xlen] + } else { + out = make([]byte, xlen) + } + copy(out, in) + return + } } + return in } -// ---------------------------------------- +// decInferLen will infer a sensible length, given the following: +// - clen: length wanted. +// - maxlen: max length to be returned. +// if <= 0, it is unset, and we infer it based on the unit size +// - unit: number of bytes for each element of the collection +func decInferLen(clen, maxlen, unit int) (rvlen int) { + // handle when maxlen is not set i.e. <= 0 + if clen <= 0 { + return + } + if unit == 0 { + return clen + } + if maxlen <= 0 { + // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. + // maxlen = 256 * 1024 / unit + // if maxlen < (4 * 1024) { + // maxlen = 4 * 1024 + // } + if unit < (256 / 4) { + maxlen = 256 * 1024 / unit + } else { + maxlen = 4 * 1024 + } + } + if clen > maxlen { + rvlen = maxlen + } else { + rvlen = clen + } + return +} -func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) { - if currEncodedType == valueTypeInvalid { - currEncodedType = dd.currentEncodedType() +func expandSliceRV(s reflect.Value, st reflect.Type, canChange bool, stElemSize, num, slen, scap int) ( + s2 reflect.Value, scap2 int, changed bool, err string) { + l1 := slen + num // new slice length + if l1 < slen { + err = errmsgExpandSliceOverflow + return } - switch currEncodedType { - case valueTypeArray: - containerLen = dd.readArrayLen() - containerLenS = containerLen - case valueTypeMap: - containerLen = dd.readMapLen() - containerLenS = containerLen * 2 - default: - decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)", - currEncodedType) + if l1 <= scap { + if s.CanSet() { + s.SetLen(l1) + } else if canChange { + s2 = s.Slice(0, l1) + scap2 = scap + changed = true + } else { + err = errmsgExpandSliceCannotChange + return + } + return + } + if !canChange { + err = errmsgExpandSliceCannotChange + return } + scap2 = growCap(scap, stElemSize, num) + s2 = reflect.MakeSlice(st, l1, scap2) + changed = true + reflect.Copy(s2, s) return } -func decErr(format string, params ...interface{}) { - doPanic(msgTagDec, format, params...) +func decReadFull(r io.Reader, bs []byte) (n uint, err error) { + var nn int + for n < uint(len(bs)) && err == nil { + nn, err = r.Read(bs[n:]) + if nn > 0 { + if err == io.EOF { + // leave EOF for next time + err = nil + } + n += uint(nn) + } + } + // xdebugf("decReadFull: len(bs): %v, n: %v, err: %v", len(bs), n, err) + // do not do this - it serves no purpose + // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF } + return +} + +func decNakedReadRawBytes(dr decDriver, d *Decoder, n *decNaked, rawToString bool) { + if rawToString { + n.v = valueTypeString + n.s = string(dr.DecodeBytes(d.b[:], true)) + } else { + n.v = valueTypeBytes + n.l = dr.DecodeBytes(nil, false) + } } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/doc.go b/vendor/github.com/hashicorp/go-msgpack/codec/doc.go new file mode 100644 index 0000000000000..5c5df9cfda1a0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/doc.go @@ -0,0 +1,245 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +Package codec provides a +High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library +for binc, msgpack, cbor, json. + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + - simple: + +This package will carefully use 'package unsafe' for performance reasons in specific places. +You can build without unsafe use by passing the safe or appengine tag +i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 4 +go releases e.g. current go release is go 1.12, so we support unsafe use only from +go 1.9+ . This is because supporting unsafe requires knowledge of implementation details. + +For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Support for go1.4 and above, while selectively using newer APIs for later releases + - Excellent code coverage ( > 90% ) + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Careful selected use of 'unsafe' for targeted performance gains. + 100% mode exists where 'unsafe' is not used at all. + - Lock-free (sans mutex) concurrency for scaling to 100's of cores + - In-place updates during decode, with option to zero value in maps and slices prior to decode + - Coerce types where appropriate + e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Support IsZero() bool to determine if a value is a zero value. + Analogous to time.Time.IsZero() bool. + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Mapping a non-interface type to an interface, so we can decode appropriately + into any interface type with a correctly configured non-interface value. + - Encode a struct as an array, and decode struct from an array in the data stream + - Option to encode struct keys as numbers (instead of strings) + (to support structured streams with fields encoded as numeric codes) + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json, cbor) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +Custom Encoding and Decoding + +This package maintains symmetry in the encoding and decoding halfs. +We determine how to encode or decode by walking this decision tree + + - is type a codec.Selfer? + - is there an extension registered for the type? + - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler? + - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler? + - is format text-based, and type an encoding.TextMarshaler and TextUnmarshaler? + - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc + +This symmetry is important to reduce chances of issues happening because the +encoding and decoding sides are out of sync e.g. decoded via very specific +encoding.TextUnmarshaler but encoded via kind-specific generalized mode. + +Consequently, if a type only defines one-half of the symmetry +(e.g. it implements UnmarshalJSON() but not MarshalJSON() ), +then that type doesn't satisfy the check and we will continue walking down the +decision tree. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. + +The Encoder and Decoder are NOT safe for concurrent use. + +Consequently, the usage model is basically: + + - Create and initialize the Handle before any use. + Once created, DO NOT modify it. + - Multiple Encoders or Decoders can now use the Handle concurrently. + They only read information off the Handle (never write). + - However, each Encoder or Decoder MUST not be used concurrently + - To re-use an Encoder/Decoder, call Reset(...) on it first. + This allows you use state maintained on the Encoder/Decoder. + +Sample usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ch codec.CborHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +Running Tests + +To run tests, use the following: + + go test + +To run the full suite of tests, use the following: + + go test -tags alltests -run Suite + +You can run the tag 'safe' to run tests or build in safe mode. e.g. + + go test -tags safe -run Json + go test -tags "alltests safe" -run Suite + +Running Benchmarks + + cd bench + go test -bench . -benchmem -benchtime 1s + +Please see http://github.com/ugorji/go-codec-bench . + +Managing Binary Size + +This package could add up to 10MB to the size of your binaries. + +This is because we include some a auto-generated file: `fast-path.generated.go` +to help with performance when encoding/decoding slices and maps of +built in numeric, boolean, string and interface{} types. + +You can override this by building (or running tests and benchmarks) +with the tag: `notfastpath`. + + go install -tags notfastpath + go build -tags notfastpath + go test -tags notfastpath + +Be aware that, at least in our representative microbenchmarks for cbor (for example), +we see up to 33% increase in decoding and 50% increase in encoding speeds. +YMMV. + +Caveats + +Struct fields matching the following are ignored during encoding and decoding + - struct tag value set to - + - func, complex numbers, unsafe pointers + - unexported and not embedded + - unexported and embedded and not struct kind + - unexported and embedded pointers (from go1.10) + +Every other field in a struct will be encoded/decoded. + +Embedded fields are encoded as if they exist in the top-level struct, +with some caveats. See Encode documentation. + +*/ +package codec + diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/encode.go b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go index 4914be0c748bf..9d8d1164a4b2c 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/encode.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go @@ -1,626 +1,1267 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec import ( + "encoding" + "errors" + "fmt" "io" "reflect" + "runtime" + "sort" + "strconv" + "time" ) -const ( - // Some tagging information for error messages. - msgTagEnc = "codec.encoder" - defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 - // maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366 -) - -// AsSymbolFlag defines what should be encoded as symbols. -type AsSymbolFlag uint8 - -const ( - // AsSymbolDefault is default. - // Currently, this means only encode struct field names as symbols. - // The default is subject to change. - AsSymbolDefault AsSymbolFlag = iota - - // AsSymbolAll means encode anything which could be a symbol as a symbol. - AsSymbolAll = 0xfe +// defEncByteBufSize is the default size of []byte used +// for bufio buffer or []byte (when nil passed) +const defEncByteBufSize = 1 << 10 // 4:16, 6:64, 8:256, 10:1024 - // AsSymbolNone means do not encode anything as a symbol. - AsSymbolNone = 1 << iota +var errEncoderNotInitialized = errors.New("Encoder not initialized") - // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. - AsSymbolMapStringKeysFlag +/* - // AsSymbolStructFieldName means encode struct field names as symbols. - AsSymbolStructFieldNameFlag -) - -// encWriter abstracting writing to a byte array or to an io.Writer. +// encWriter abstracts writing to a byte array or to an io.Writer. +// +// +// Deprecated: Use encWriterSwitch instead. type encWriter interface { - writeUint16(uint16) - writeUint32(uint32) - writeUint64(uint64) writeb([]byte) writestr(string) writen1(byte) writen2(byte, byte) - atEndOfEncode() + end() } +*/ + // encDriver abstracts the actual codec (binc vs msgpack, etc) type encDriver interface { - isBuiltinType(rt uintptr) bool - encodeBuiltin(rt uintptr, v interface{}) - encodeNil() - encodeInt(i int64) - encodeUint(i uint64) - encodeBool(b bool) - encodeFloat32(f float32) - encodeFloat64(f float64) - encodeExtPreamble(xtag byte, length int) - encodeArrayPreamble(length int) - encodeMapPreamble(length int) - encodeString(c charEncoding, v string) - encodeSymbol(v string) - encodeStringBytes(c charEncoding, v []byte) - //TODO + EncodeNil() + EncodeInt(i int64) + EncodeUint(i uint64) + EncodeBool(b bool) + EncodeFloat32(f float32) + EncodeFloat64(f float64) + // encodeExtPreamble(xtag byte, length int) + EncodeRawExt(re *RawExt, e *Encoder) + EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) + // Deprecated: use EncodeStringEnc instead + EncodeString(c charEncoding, v string) + // Deprecated: use EncodeStringBytesRaw instead + EncodeStringBytes(c charEncoding, v []byte) + EncodeStringEnc(c charEncoding, v string) // c cannot be cRAW + // EncodeSymbol(v string) + EncodeStringBytesRaw(v []byte) + EncodeTime(time.Time) //encBignum(f *big.Int) //encStringRunes(c charEncoding, v []rune) + WriteArrayStart(length int) + WriteArrayElem() + WriteArrayEnd() + WriteMapStart(length int) + WriteMapElemKey() + WriteMapElemValue() + WriteMapEnd() + + reset() + atEndOfEncode() } -type ioEncWriterWriter interface { - WriteByte(c byte) error - WriteString(s string) (n int, err error) - Write(p []byte) (n int, err error) +type encDriverAsis interface { + EncodeAsis(v []byte) } -type ioEncStringWriter interface { - WriteString(s string) (n int, err error) +type encodeError struct { + codecError } +func (e encodeError) Error() string { + return fmt.Sprintf("%s encode error: %v", e.name, e.err) +} + +type encDriverNoopContainerWriter struct{} + +func (encDriverNoopContainerWriter) WriteArrayStart(length int) {} +func (encDriverNoopContainerWriter) WriteArrayElem() {} +func (encDriverNoopContainerWriter) WriteArrayEnd() {} +func (encDriverNoopContainerWriter) WriteMapStart(length int) {} +func (encDriverNoopContainerWriter) WriteMapElemKey() {} +func (encDriverNoopContainerWriter) WriteMapElemValue() {} +func (encDriverNoopContainerWriter) WriteMapEnd() {} +func (encDriverNoopContainerWriter) atEndOfEncode() {} + +type encDriverTrackContainerWriter struct { + c containerState +} + +func (e *encDriverTrackContainerWriter) WriteArrayStart(length int) { e.c = containerArrayStart } +func (e *encDriverTrackContainerWriter) WriteArrayElem() { e.c = containerArrayElem } +func (e *encDriverTrackContainerWriter) WriteArrayEnd() { e.c = containerArrayEnd } +func (e *encDriverTrackContainerWriter) WriteMapStart(length int) { e.c = containerMapStart } +func (e *encDriverTrackContainerWriter) WriteMapElemKey() { e.c = containerMapKey } +func (e *encDriverTrackContainerWriter) WriteMapElemValue() { e.c = containerMapValue } +func (e *encDriverTrackContainerWriter) WriteMapEnd() { e.c = containerMapEnd } +func (e *encDriverTrackContainerWriter) atEndOfEncode() {} + +// type ioEncWriterWriter interface { +// WriteByte(c byte) error +// WriteString(s string) (n int, err error) +// Write(p []byte) (n int, err error) +// } + +// EncodeOptions captures configuration options during encode. type EncodeOptions struct { - // Encode a struct as an array, and not as a map. + // WriterBufferSize is the size of the buffer used when writing. + // + // if > 0, we use a smart buffer internally for performance purposes. + WriterBufferSize int + + // ChanRecvTimeout is the timeout used when selecting from a chan. + // + // Configuring this controls how we receive from a chan during the encoding process. + // - If ==0, we only consume the elements currently available in the chan. + // - if <0, we consume until the chan is closed. + // - If >0, we consume until this timeout. + ChanRecvTimeout time.Duration + + // StructToArray specifies to encode a struct as an array, and not as a map StructToArray bool - // AsSymbols defines what should be encoded as symbols. + // Canonical representation means that encoding a value will always result in the same + // sequence of bytes. // - // Encoding as symbols can reduce the encoded size significantly. + // This only affects maps, as the iteration order for maps is random. // - // However, during decoding, each string to be encoded as a symbol must - // be checked to see if it has been seen before. Consequently, encoding time - // will increase if using symbols, because string comparisons has a clear cost. + // The implementation MAY use the natural sort order for the map keys if possible: // - // Sample values: - // AsSymbolNone - // AsSymbolAll - // AsSymbolMapStringKeys - // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag - AsSymbols AsSymbolFlag -} + // - If there is a natural sort order (ie for number, bool, string or []byte keys), + // then the map keys are first sorted in natural order and then written + // with corresponding map values to the strema. + // - If there is no natural sort order, then the map keys will first be + // encoded into []byte, and then sorted, + // before writing the sorted keys and the corresponding map values to the stream. + // + Canonical bool -// --------------------------------------------- + // CheckCircularRef controls whether we check for circular references + // and error fast during an encode. + // + // If enabled, an error is received if a pointer to a struct + // references itself either directly or through one of its fields (iteratively). + // + // This is opt-in, as there may be a performance hit to checking circular references. + CheckCircularRef bool -type simpleIoEncWriterWriter struct { - w io.Writer - bw io.ByteWriter - sw ioEncStringWriter -} + // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers + // when checking if a value is empty. + // + // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. + RecursiveEmptyCheck bool -func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { - if o.bw != nil { - return o.bw.WriteByte(c) - } - _, err = o.w.Write([]byte{c}) - return -} + // Raw controls whether we encode Raw values. + // This is a "dangerous" option and must be explicitly set. + // If set, we blindly encode Raw values as-is, without checking + // if they are a correct representation of a value in that format. + // If unset, we error out. + Raw bool -func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { - if o.sw != nil { - return o.sw.WriteString(s) - } - return o.w.Write([]byte(s)) + // StringToRaw controls how strings are encoded. + // + // As a go string is just an (immutable) sequence of bytes, + // it can be encoded either as raw bytes or as a UTF string. + // + // By default, strings are encoded as UTF-8. + // but can be treated as []byte during an encode. + // + // Note that things which we know (by definition) to be UTF-8 + // are ALWAYS encoded as UTF-8 strings. + // These include encoding.TextMarshaler, time.Format calls, struct field names, etc. + StringToRaw bool + + // // AsSymbols defines what should be encoded as symbols. + // // + // // Encoding as symbols can reduce the encoded size significantly. + // // + // // However, during decoding, each string to be encoded as a symbol must + // // be checked to see if it has been seen before. Consequently, encoding time + // // will increase if using symbols, because string comparisons has a clear cost. + // // + // // Sample values: + // // AsSymbolNone + // // AsSymbolAll + // // AsSymbolMapStringKeys + // // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + // AsSymbols AsSymbolFlag } -func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { - return o.w.Write(p) -} +// --------------------------------------------- + +/* -// ---------------------------------------- +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} // ioEncWriter implements encWriter and can write to an io.Writer implementation type ioEncWriter struct { - w ioEncWriterWriter - x [8]byte // temp byte array re-used internally for efficiency + w io.Writer + ww io.Writer + bw io.ByteWriter + sw ioEncStringWriter + fw ioFlusher + b [8]byte } -func (z *ioEncWriter) writeUint16(v uint16) { - bigen.PutUint16(z.x[:2], v) - z.writeb(z.x[:2]) +func (z *ioEncWriter) reset(w io.Writer) { + z.w = w + var ok bool + if z.bw, ok = w.(io.ByteWriter); !ok { + z.bw = z + } + if z.sw, ok = w.(ioEncStringWriter); !ok { + z.sw = z + } + z.fw, _ = w.(ioFlusher) + z.ww = w } -func (z *ioEncWriter) writeUint32(v uint32) { - bigen.PutUint32(z.x[:4], v) - z.writeb(z.x[:4]) +func (z *ioEncWriter) WriteByte(b byte) (err error) { + z.b[0] = b + _, err = z.w.Write(z.b[:1]) + return } -func (z *ioEncWriter) writeUint64(v uint64) { - bigen.PutUint64(z.x[:8], v) - z.writeb(z.x[:8]) +func (z *ioEncWriter) WriteString(s string) (n int, err error) { + return z.w.Write(bytesView(s)) } func (z *ioEncWriter) writeb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := z.w.Write(bs) - if err != nil { + if _, err := z.ww.Write(bs); err != nil { panic(err) } - if n != len(bs) { - encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n) - } } func (z *ioEncWriter) writestr(s string) { - n, err := z.w.WriteString(s) - if err != nil { + if _, err := z.sw.WriteString(s); err != nil { panic(err) } - if n != len(s) { - encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n) - } } func (z *ioEncWriter) writen1(b byte) { - if err := z.w.WriteByte(b); err != nil { + if err := z.bw.WriteByte(b); err != nil { panic(err) } } -func (z *ioEncWriter) writen2(b1 byte, b2 byte) { - z.writen1(b1) - z.writen1(b2) +func (z *ioEncWriter) writen2(b1, b2 byte) { + var err error + if err = z.bw.WriteByte(b1); err == nil { + if err = z.bw.WriteByte(b2); err == nil { + return + } + } + panic(err) } -func (z *ioEncWriter) atEndOfEncode() {} +// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) { +// z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5 +// if _, err := z.ww.Write(z.b[:5]); err != nil { +// panic(err) +// } +// } -// ---------------------------------------- +//go:noinline - so *encWriterSwitch.XXX has the bytesEncAppender.XXX inlined +func (z *ioEncWriter) end() { + if z.fw != nil { + if err := z.fw.Flush(); err != nil { + panic(err) + } + } +} -// bytesEncWriter implements encWriter and can write to an byte slice. -// It is used by Marshal function. -type bytesEncWriter struct { - b []byte - c int // cursor - out *[]byte // write out on atEndOfEncode +*/ + +// --------------------------------------------- + +// bufioEncWriter +type bufioEncWriter struct { + buf []byte + w io.Writer + n int + sz int // buf size + + // Extensions can call Encode() within a current Encode() call. + // We need to know when the top level Encode() call returns, + // so we can decide whether to Release() or not. + calls uint16 // what depth in mustDecode are we in now. + + _ [6]uint8 // padding + + bytesBufPooler + + _ [1]uint64 // padding + // a int + // b [4]byte + // err } -func (z *bytesEncWriter) writeUint16(v uint16) { - c := z.grow(2) - z.b[c] = byte(v >> 8) - z.b[c+1] = byte(v) +func (z *bufioEncWriter) reset(w io.Writer, bufsize int) { + z.w = w + z.n = 0 + z.calls = 0 + if bufsize <= 0 { + bufsize = defEncByteBufSize + } + z.sz = bufsize + if cap(z.buf) >= bufsize { + z.buf = z.buf[:cap(z.buf)] + } else { + z.buf = z.bytesBufPooler.get(bufsize) + // z.buf = make([]byte, bufsize) + } } -func (z *bytesEncWriter) writeUint32(v uint32) { - c := z.grow(4) - z.b[c] = byte(v >> 24) - z.b[c+1] = byte(v >> 16) - z.b[c+2] = byte(v >> 8) - z.b[c+3] = byte(v) +func (z *bufioEncWriter) release() { + z.buf = nil + z.bytesBufPooler.end() } -func (z *bytesEncWriter) writeUint64(v uint64) { - c := z.grow(8) - z.b[c] = byte(v >> 56) - z.b[c+1] = byte(v >> 48) - z.b[c+2] = byte(v >> 40) - z.b[c+3] = byte(v >> 32) - z.b[c+4] = byte(v >> 24) - z.b[c+5] = byte(v >> 16) - z.b[c+6] = byte(v >> 8) - z.b[c+7] = byte(v) +//go:noinline - flush only called intermittently +func (z *bufioEncWriter) flushErr() (err error) { + n, err := z.w.Write(z.buf[:z.n]) + z.n -= n + if z.n > 0 && err == nil { + err = io.ErrShortWrite + } + if n > 0 && z.n > 0 { + copy(z.buf, z.buf[n:z.n+n]) + } + return err } -func (z *bytesEncWriter) writeb(s []byte) { - if len(s) == 0 { - return +func (z *bufioEncWriter) flush() { + if err := z.flushErr(); err != nil { + panic(err) } - c := z.grow(len(s)) - copy(z.b[c:], s) } -func (z *bytesEncWriter) writestr(s string) { - c := z.grow(len(s)) - copy(z.b[c:], s) +func (z *bufioEncWriter) writeb(s []byte) { +LOOP: + a := len(z.buf) - z.n + if len(s) > a { + z.n += copy(z.buf[z.n:], s[:a]) + s = s[a:] + z.flush() + goto LOOP + } + z.n += copy(z.buf[z.n:], s) } -func (z *bytesEncWriter) writen1(b1 byte) { - c := z.grow(1) - z.b[c] = b1 +func (z *bufioEncWriter) writestr(s string) { + // z.writeb(bytesView(s)) // inlined below +LOOP: + a := len(z.buf) - z.n + if len(s) > a { + z.n += copy(z.buf[z.n:], s[:a]) + s = s[a:] + z.flush() + goto LOOP + } + z.n += copy(z.buf[z.n:], s) } -func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { - c := z.grow(2) - z.b[c] = b1 - z.b[c+1] = b2 +func (z *bufioEncWriter) writen1(b1 byte) { + if 1 > len(z.buf)-z.n { + z.flush() + } + z.buf[z.n] = b1 + z.n++ } -func (z *bytesEncWriter) atEndOfEncode() { - *(z.out) = z.b[:z.c] +func (z *bufioEncWriter) writen2(b1, b2 byte) { + if 2 > len(z.buf)-z.n { + z.flush() + } + z.buf[z.n+1] = b2 + z.buf[z.n] = b1 + z.n += 2 } -func (z *bytesEncWriter) grow(n int) (oldcursor int) { - oldcursor = z.c - z.c = oldcursor + n - if z.c > cap(z.b) { - // Tried using appendslice logic: (if cap < 1024, *2, else *1.25). - // However, it was too expensive, causing too many iterations of copy. - // Using bytes.Buffer model was much better (2*cap + n) - bs := make([]byte, 2*cap(z.b)+n) - copy(bs, z.b[:oldcursor]) - z.b = bs - } else if z.c > len(z.b) { - z.b = z.b[:cap(z.b)] +func (z *bufioEncWriter) endErr() (err error) { + if z.n > 0 { + err = z.flushErr() } return } // --------------------------------------------- -type encFnInfo struct { - ti *typeInfo - e *Encoder - ee encDriver - xfFn func(reflect.Value) ([]byte, error) - xfTag byte +// bytesEncAppender implements encWriter and can write to an byte slice. +type bytesEncAppender struct { + b []byte + out *[]byte } -func (f *encFnInfo) builtin(rv reflect.Value) { - f.ee.encodeBuiltin(f.ti.rtid, rv.Interface()) +func (z *bytesEncAppender) writeb(s []byte) { + z.b = append(z.b, s...) } - -func (f *encFnInfo) rawExt(rv reflect.Value) { - f.e.encRawExt(rv.Interface().(RawExt)) +func (z *bytesEncAppender) writestr(s string) { + z.b = append(z.b, s...) } - -func (f *encFnInfo) ext(rv reflect.Value) { - bs, fnerr := f.xfFn(rv) - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - f.ee.encodeNil() - return - } - if f.e.hh.writeExt() { - f.ee.encodeExtPreamble(f.xfTag, len(bs)) - f.e.w.writeb(bs) - } else { - f.ee.encodeStringBytes(c_RAW, bs) - } - +func (z *bytesEncAppender) writen1(b1 byte) { + z.b = append(z.b, b1) } - -func (f *encFnInfo) binaryMarshal(rv reflect.Value) { - var bm binaryMarshaler - if f.ti.mIndir == 0 { - bm = rv.Interface().(binaryMarshaler) - } else if f.ti.mIndir == -1 { - bm = rv.Addr().Interface().(binaryMarshaler) - } else { - for j, k := int8(0), f.ti.mIndir; j < k; j++ { - if rv.IsNil() { - f.ee.encodeNil() - return - } - rv = rv.Elem() - } - bm = rv.Interface().(binaryMarshaler) - } - // debugf(">>>> binaryMarshaler: %T", rv.Interface()) - bs, fnerr := bm.MarshalBinary() - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - f.ee.encodeNil() - } else { - f.ee.encodeStringBytes(c_RAW, bs) - } +func (z *bytesEncAppender) writen2(b1, b2 byte) { + z.b = append(z.b, b1, b2) } +func (z *bytesEncAppender) endErr() error { + *(z.out) = z.b + return nil +} +func (z *bytesEncAppender) reset(in []byte, out *[]byte) { + z.b = in[:0] + z.out = out +} + +// --------------------------------------------- -func (f *encFnInfo) kBool(rv reflect.Value) { - f.ee.encodeBool(rv.Bool()) +func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeRawExt(rv2i(rv).(*RawExt), e) } -func (f *encFnInfo) kString(rv reflect.Value) { - f.ee.encodeString(c_UTF8, rv.String()) +func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e) } -func (f *encFnInfo) kFloat64(rv reflect.Value) { - f.ee.encodeFloat64(rv.Float()) +func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(e) } -func (f *encFnInfo) kFloat32(rv reflect.Value) { - f.ee.encodeFloat32(float32(rv.Float())) +func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) } -func (f *encFnInfo) kInt(rv reflect.Value) { - f.ee.encodeInt(rv.Int()) +func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) } -func (f *encFnInfo) kUint(rv reflect.Value) { - f.ee.encodeUint(rv.Uint()) +func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) } -func (f *encFnInfo) kInvalid(rv reflect.Value) { - f.ee.encodeNil() +func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) } -func (f *encFnInfo) kErr(rv reflect.Value) { - encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv) +func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeNil() } -func (f *encFnInfo) kSlice(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } +func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) { + e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) +} - if shortCircuitReflectToFastPath { - switch f.ti.rtid { - case intfSliceTypId: - f.e.encSliceIntf(rv.Interface().([]interface{})) - return - case strSliceTypId: - f.e.encSliceStr(rv.Interface().([]string)) - return - case uint64SliceTypId: - f.e.encSliceUint64(rv.Interface().([]uint64)) +func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) { + ti := f.ti + ee := e.e + // array may be non-addressable, so we have to manage with care + // (don't call rv.Bytes, rv.Slice, etc). + // E.g. type struct S{B [2]byte}; + // Encode(S{}) will bomb on "panic: slice of unaddressable array". + if f.seq != seqTypeArray { + if rv.IsNil() { + ee.EncodeNil() return - case int64SliceTypId: - f.e.encSliceInt64(rv.Interface().([]int64)) + } + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if ti.rtid == uint8SliceTypId { + ee.EncodeStringBytesRaw(rv.Bytes()) return } } + if f.seq == seqTypeChan && ti.chandir&uint8(reflect.RecvDir) == 0 { + e.errorf("send-only channel cannot be encoded") + } + elemsep := e.esep + rtelem := ti.elem + rtelemIsByte := uint8TypId == rt2id(rtelem) // NOT rtelem.Kind() == reflect.Uint8 + var l int + // if a slice, array or chan of bytes, treat specially + if rtelemIsByte { + switch f.seq { + case seqTypeSlice: + ee.EncodeStringBytesRaw(rv.Bytes()) + case seqTypeArray: + l = rv.Len() + if rv.CanAddr() { + ee.EncodeStringBytesRaw(rv.Slice(0, l).Bytes()) + } else { + var bs []byte + if l <= cap(e.b) { + bs = e.b[:l] + } else { + bs = make([]byte, l) + } + reflect.Copy(reflect.ValueOf(bs), rv) + ee.EncodeStringBytesRaw(bs) + } + case seqTypeChan: + // do not use range, so that the number of elements encoded + // does not change, and encoding does not hang waiting on someone to close chan. + // for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) } + // ch := rv2i(rv).(<-chan byte) // fix error - that this is a chan byte, not a <-chan byte. + + if rv.IsNil() { + ee.EncodeNil() + break + } + bs := e.b[:0] + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + + L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: // only consume available + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: // consume until timeout + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + // close(tt.C) + break L1 + } + } + default: // consume until close + for b := range ch { + bs = append(bs, b) + } + } - // If in this method, then there was no extension function defined. - // So it's okay to treat as []byte. - if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { - f.ee.encodeStringBytes(c_RAW, rv.Bytes()) + ee.EncodeStringBytesRaw(bs) + } return } - l := rv.Len() - if f.ti.mbs { + // if chan, consume chan into a slice, and work off that slice. + if f.seq == seqTypeChan { + rvcs := reflect.Zero(reflect.SliceOf(rtelem)) + timeout := e.h.ChanRecvTimeout + if timeout < 0 { // consume until close + for { + recv, recvOk := rv.Recv() + if !recvOk { + break + } + rvcs = reflect.Append(rvcs, recv) + } + } else { + cases := make([]reflect.SelectCase, 2) + cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv} + if timeout == 0 { + cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault} + } else { + tt := time.NewTimer(timeout) + cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)} + } + for { + chosen, recv, recvOk := reflect.Select(cases) + if chosen == 1 || !recvOk { + break + } + rvcs = reflect.Append(rvcs, recv) + } + } + rv = rvcs // TODO: ensure this doesn't mess up anywhere that rv of kind chan is expected + } + + l = rv.Len() + if ti.mbs { if l%2 == 1 { - encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + e.errorf("mapBySlice requires even slice length, but got %v", l) + return } - f.ee.encodeMapPreamble(l / 2) + ee.WriteMapStart(l / 2) } else { - f.ee.encodeArrayPreamble(l) + ee.WriteArrayStart(l) } - if l == 0 { - return + + if l > 0 { + var fn *codecFn + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + // if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + if rtelem.Kind() != reflect.Interface { + fn = e.h.fn(rtelem, true, true) + } + for j := 0; j < l; j++ { + if elemsep { + if ti.mbs { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } else { + ee.WriteArrayElem() + } + } + e.encodeValue(rv.Index(j), fn, true) + } } - for j := 0; j < l; j++ { - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - f.e.encodeValue(rv.Index(j)) + + if ti.mbs { + ee.WriteMapEnd() + } else { + ee.WriteArrayEnd() } } -func (f *encFnInfo) kArray(rv reflect.Value) { - // We cannot share kSlice method, because the array may be non-addressable. - // E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array". - // So we have to duplicate the functionality here. - // f.e.encodeValue(rv.Slice(0, rv.Len())) - // f.kSlice(rv.Slice(0, rv.Len())) +func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + tisfi := fti.sfiSrc + toMap := !(fti.toArray || e.h.StructToArray) + if toMap { + tisfi = fti.sfiSort + } - l := rv.Len() - // Handle an array of bytes specially (in line with what is done for slices) - if f.ti.rt.Elem().Kind() == reflect.Uint8 { - if l == 0 { - f.ee.encodeStringBytes(c_RAW, nil) - return + ee := e.e + + sfn := structFieldNode{v: rv, update: false} + if toMap { + ee.WriteMapStart(len(tisfi)) + if e.esep { + for _, si := range tisfi { + ee.WriteMapElemKey() + e.kStructFieldKey(fti.keyType, si.encNameAsciiAlphaNum, si.encName) + ee.WriteMapElemValue() + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + e.kStructFieldKey(fti.keyType, si.encNameAsciiAlphaNum, si.encName) + e.encodeValue(sfn.field(si), nil, true) + } } - var bs []byte - if rv.CanAddr() { - bs = rv.Slice(0, l).Bytes() + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(len(tisfi)) + if e.esep { + for _, si := range tisfi { + ee.WriteArrayElem() + e.encodeValue(sfn.field(si), nil, true) + } } else { - bs = make([]byte, l) - for i := 0; i < l; i++ { - bs[i] = byte(rv.Index(i).Uint()) + for _, si := range tisfi { + e.encodeValue(sfn.field(si), nil, true) } } - f.ee.encodeStringBytes(c_RAW, bs) - return + ee.WriteArrayEnd() } +} - if f.ti.mbs { - if l%2 == 1 { - encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) - } - f.ee.encodeMapPreamble(l / 2) - } else { - f.ee.encodeArrayPreamble(l) - } - if l == 0 { - return - } - for j := 0; j < l; j++ { - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - f.e.encodeValue(rv.Index(j)) - } +func (e *Encoder) kStructFieldKey(keyType valueType, encNameAsciiAlphaNum bool, encName string) { + encStructFieldKey(encName, e.e, e.w, keyType, encNameAsciiAlphaNum, e.js) } -func (f *encFnInfo) kStruct(rv reflect.Value) { +func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { fti := f.ti - newlen := len(fti.sfi) - rvals := make([]reflect.Value, newlen) - var encnames []string - e := f.e - tisfi := fti.sfip + elemsep := e.esep + tisfi := fti.sfiSrc + var newlen int toMap := !(fti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if f.ti.mf { + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + toMap = true + newlen += len(mf) + } else if f.ti.mfp { + if rv.CanAddr() { + mf = rv2i(rv.Addr()).(MissingFielder).CodecMissingFields() + } else { + // make a new addressable value of same one, and use it + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + mf = rv2i(rv2).(MissingFielder).CodecMissingFields() + } + toMap = true + newlen += len(mf) + } // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) if toMap { - tisfi = fti.sfi - encnames = make([]string, newlen) + tisfi = fti.sfiSort } + newlen += len(tisfi) + ee := e.e + + // Use sync.Pool to reduce allocating slices unnecessarily. + // The cost of sync.Pool is less than the cost of new allocation. + // + // Each element of the array pools one of encStructPool(8|16|32|64). + // It allows the re-use of slices up to 64 in length. + // A performance cost of encoding structs was collecting + // which values were empty and should be omitted. + // We needed slices of reflect.Value and string to collect them. + // This shared pool reduces the amount of unnecessary creation we do. + // The cost is that of locking sometimes, but sync.Pool is efficient + // enough to reduce thread contention. + + // fmt.Printf(">>>>>>>>>>>>>> encode.kStruct: newlen: %d\n", newlen) + var spool sfiRvPooler + var fkvs = spool.get(newlen) + + var kv sfiRv + recur := e.h.RecursiveEmptyCheck + sfn := structFieldNode{v: rv, update: false} newlen = 0 for _, si := range tisfi { - if si.i != -1 { - rvals[newlen] = rv.Field(int(si.i)) - } else { - rvals[newlen] = rv.FieldByIndex(si.is) - } + // kv.r = si.field(rv, false) + kv.r = sfn.field(si) if toMap { - if si.omitEmpty && isEmptyValue(rvals[newlen]) { + if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) { continue } - encnames[newlen] = si.encName + kv.v = si // si.encName } else { - if si.omitEmpty && isEmptyValue(rvals[newlen]) { - rvals[newlen] = reflect.Value{} //encode as nil + // use the zero value. + // if a reference or struct, set to nil (so you do not output too much) + if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) { + switch kv.r.Kind() { + case reflect.Struct, reflect.Interface, reflect.Ptr, + reflect.Array, reflect.Map, reflect.Slice: + kv.r = reflect.Value{} //encode as nil + } } } + fkvs[newlen] = kv newlen++ } + fkvs = fkvs[:newlen] + + var mflen int + for k, v := range mf { + if k == "" { + delete(mf, k) + continue + } + if fti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur, recur) { + delete(mf, k) + continue + } + mflen++ + } - // debugf(">>>> kStruct: newlen: %v", newlen) + var j int if toMap { - ee := f.ee //don't dereference everytime - ee.encodeMapPreamble(newlen) - // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - for j := 0; j < newlen; j++ { - if asSymbols { - ee.encodeSymbol(encnames[j]) - } else { - ee.encodeString(c_UTF8, encnames[j]) + ee.WriteMapStart(newlen + mflen) + if elemsep { + for j = 0; j < len(fkvs); j++ { + kv = fkvs[j] + ee.WriteMapElemKey() + e.kStructFieldKey(fti.keyType, kv.v.encNameAsciiAlphaNum, kv.v.encName) + ee.WriteMapElemValue() + e.encodeValue(kv.r, nil, true) } - e.encodeValue(rvals[j]) + } else { + for j = 0; j < len(fkvs); j++ { + kv = fkvs[j] + e.kStructFieldKey(fti.keyType, kv.v.encNameAsciiAlphaNum, kv.v.encName) + e.encodeValue(kv.r, nil, true) + } + } + // now, add the others + for k, v := range mf { + ee.WriteMapElemKey() + e.kStructFieldKey(fti.keyType, false, k) + ee.WriteMapElemValue() + e.encode(v) } + ee.WriteMapEnd() } else { - f.ee.encodeArrayPreamble(newlen) - for j := 0; j < newlen; j++ { - e.encodeValue(rvals[j]) + ee.WriteArrayStart(newlen) + if elemsep { + for j = 0; j < len(fkvs); j++ { + ee.WriteArrayElem() + e.encodeValue(fkvs[j].r, nil, true) + } + } else { + for j = 0; j < len(fkvs); j++ { + e.encodeValue(fkvs[j].r, nil, true) + } } + ee.WriteArrayEnd() } -} -// func (f *encFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") -// if rv.IsNil() { -// f.ee.encodeNil() -// return -// } -// f.e.encodeValue(rv.Elem()) -// } - -func (f *encFnInfo) kInterface(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } - f.e.encodeValue(rv.Elem()) + // do not use defer. Instead, use explicit pool return at end of function. + // defer has a cost we are trying to avoid. + // If there is a panic and these slices are not returned, it is ok. + spool.end() } -func (f *encFnInfo) kMap(rv reflect.Value) { +func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { + ee := e.e if rv.IsNil() { - f.ee.encodeNil() + ee.EncodeNil() return } - if shortCircuitReflectToFastPath { - switch f.ti.rtid { - case mapIntfIntfTypId: - f.e.encMapIntfIntf(rv.Interface().(map[interface{}]interface{})) - return - case mapStrIntfTypId: - f.e.encMapStrIntf(rv.Interface().(map[string]interface{})) - return - case mapStrStrTypId: - f.e.encMapStrStr(rv.Interface().(map[string]string)) - return - case mapInt64IntfTypId: - f.e.encMapInt64Intf(rv.Interface().(map[int64]interface{})) - return - case mapUint64IntfTypId: - f.e.encMapUint64Intf(rv.Interface().(map[uint64]interface{})) - return - } - } - l := rv.Len() - f.ee.encodeMapPreamble(l) + ee.WriteMapStart(l) if l == 0 { + ee.WriteMapEnd() return } - // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String - keyTypeIsString := f.ti.rt.Key() == stringTyp - var asSymbols bool - if keyTypeIsString { - asSymbols = f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + // var asSymbols bool + // determine the underlying key and val encFn's for the map. + // This eliminates some work which is done for each loop iteration i.e. + // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. + // + // However, if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var keyFn, valFn *codecFn + ti := f.ti + rtkey0 := ti.key + rtkey := rtkey0 + rtval0 := ti.elem + rtval := rtval0 + // rtkeyid := rt2id(rtkey0) + for rtval.Kind() == reflect.Ptr { + rtval = rtval.Elem() + } + if rtval.Kind() != reflect.Interface { + valFn = e.h.fn(rtval, true, true) } mks := rv.MapKeys() + + if e.h.Canonical { + e.kMapCanonical(rtkey, rv, mks, valFn) + ee.WriteMapEnd() + return + } + + var keyTypeIsString = stringTypId == rt2id(rtkey0) // rtkeyid + if !keyTypeIsString { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + // rtkeyid = rt2id(rtkey) + keyFn = e.h.fn(rtkey, true, true) + } + } + // for j, lmks := 0, len(mks); j < lmks; j++ { for j := range mks { + if e.esep { + ee.WriteMapElemKey() + } if keyTypeIsString { - if asSymbols { - f.ee.encodeSymbol(mks[j].String()) + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(mks[j].String())) } else { - f.ee.encodeString(c_UTF8, mks[j].String()) + ee.EncodeStringEnc(cUTF8, mks[j].String()) } } else { - f.e.encodeValue(mks[j]) + e.encodeValue(mks[j], keyFn, true) + } + if e.esep { + ee.WriteMapElemValue() } - f.e.encodeValue(rv.MapIndex(mks[j])) + e.encodeValue(rv.MapIndex(mks[j]), valFn, true) + } + ee.WriteMapEnd() +} +func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn) { + ee := e.e + elemsep := e.esep + // we previously did out-of-band if an extension was registered. + // This is not necessary, as the natural kind is sufficient for ordering. + + switch rtkey.Kind() { + case reflect.Bool: + mksv := make([]boolRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bool() + } + sort.Sort(boolRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeBool(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.String: + mksv := make([]stringRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.String() + } + sort.Sort(stringRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(mksv[i].v)) + } else { + ee.EncodeStringEnc(cUTF8, mksv[i].v) + } + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]uintRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + sort.Sort(uintRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeUint(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]intRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + sort.Sort(intRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeInt(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float32: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(mksv[i].v)) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float64: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Struct: + if rv.Type() == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + sort.Sort(timeRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeTime(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + break + } + fallthrough + default: + // out-of-band + // first encode each key to a []byte first, then sort them, then record + var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + mksbv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + e2.MustEncode(k) + v.r = k + v.v = mksv[l:] + } + sort.Sort(bytesRvSlice(mksbv)) + for j := range mksbv { + if elemsep { + ee.WriteMapElemKey() + } + e.asis(mksbv[j].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true) + } + } } -// -------------------------------------------------- +// // -------------------------------------------------- + +type encWriterSwitch struct { + // wi *ioEncWriter + wb bytesEncAppender + wf *bufioEncWriter + // typ entryType + bytes bool // encoding to []byte + esep bool // whether it has elem separators + isas bool // whether e.as != nil + js bool // is json encoder? + be bool // is binary encoder? + _ [2]byte // padding + // _ [2]uint64 // padding + // _ uint64 // padding +} -// encFn encapsulates the captured variables and the encode function. -// This way, we only do some calculations one times, and pass to the -// code block that should be called (encapsulated in a function) -// instead of executing the checks every time. -type encFn struct { - i *encFnInfo - f func(*encFnInfo, reflect.Value) +func (z *encWriterSwitch) writeb(s []byte) { + if z.bytes { + z.wb.writeb(s) + } else { + z.wf.writeb(s) + } +} +func (z *encWriterSwitch) writestr(s string) { + if z.bytes { + z.wb.writestr(s) + } else { + z.wf.writestr(s) + } +} +func (z *encWriterSwitch) writen1(b1 byte) { + if z.bytes { + z.wb.writen1(b1) + } else { + z.wf.writen1(b1) + } +} +func (z *encWriterSwitch) writen2(b1, b2 byte) { + if z.bytes { + z.wb.writen2(b1, b2) + } else { + z.wf.writen2(b1, b2) + } +} +func (z *encWriterSwitch) endErr() error { + if z.bytes { + return z.wb.endErr() + } + return z.wf.endErr() } -// -------------------------------------------------- +func (z *encWriterSwitch) end() { + if err := z.endErr(); err != nil { + panic(err) + } +} -// An Encoder writes an object to an output stream in the codec format. +/* + +// ------------------------------------------ +func (z *encWriterSwitch) writeb(s []byte) { + switch z.typ { + case entryTypeBytes: + z.wb.writeb(s) + case entryTypeIo: + z.wi.writeb(s) + default: + z.wf.writeb(s) + } +} +func (z *encWriterSwitch) writestr(s string) { + switch z.typ { + case entryTypeBytes: + z.wb.writestr(s) + case entryTypeIo: + z.wi.writestr(s) + default: + z.wf.writestr(s) + } +} +func (z *encWriterSwitch) writen1(b1 byte) { + switch z.typ { + case entryTypeBytes: + z.wb.writen1(b1) + case entryTypeIo: + z.wi.writen1(b1) + default: + z.wf.writen1(b1) + } +} +func (z *encWriterSwitch) writen2(b1, b2 byte) { + switch z.typ { + case entryTypeBytes: + z.wb.writen2(b1, b2) + case entryTypeIo: + z.wi.writen2(b1, b2) + default: + z.wf.writen2(b1, b2) + } +} +func (z *encWriterSwitch) end() { + switch z.typ { + case entryTypeBytes: + z.wb.end() + case entryTypeIo: + z.wi.end() + default: + z.wf.end() + } +} + +// ------------------------------------------ +func (z *encWriterSwitch) writeb(s []byte) { + if z.bytes { + z.wb.writeb(s) + } else { + z.wi.writeb(s) + } +} +func (z *encWriterSwitch) writestr(s string) { + if z.bytes { + z.wb.writestr(s) + } else { + z.wi.writestr(s) + } +} +func (z *encWriterSwitch) writen1(b1 byte) { + if z.bytes { + z.wb.writen1(b1) + } else { + z.wi.writen1(b1) + } +} +func (z *encWriterSwitch) writen2(b1, b2 byte) { + if z.bytes { + z.wb.writen2(b1, b2) + } else { + z.wi.writen2(b1, b2) + } +} +func (z *encWriterSwitch) end() { + if z.bytes { + z.wb.end() + } else { + z.wi.end() + } +} + +*/ + +// Encoder writes an object to an output stream in a supported format. +// +// Encoder is NOT safe for concurrent use i.e. a Encoder cannot be used +// concurrently in multiple goroutines. +// +// However, as Encoder could be allocation heavy to initialize, a Reset method is provided +// so its state can be reused to decode new input streams repeatedly. +// This is the idiomatic way to use. type Encoder struct { - w encWriter - e encDriver + panicHdl + // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder + e encDriver + + // NOTE: Encoder shouldn't call it's write methods, + // as the handler MAY need to do some coordination. + w *encWriterSwitch + + // bw *bufio.Writer + as encDriverAsis + + err error + h *BasicHandle hh Handle - f map[uintptr]encFn - x []uintptr - s []encFn + // ---- cpu cache line boundary? + 3 + encWriterSwitch + + ci set + + b [(5 * 8)]byte // for encoding chan or (non-addressable) [N]byte + + // ---- writable fields during execution --- *try* to keep in sep cache line + + // ---- cpu cache line boundary? + // b [scratchByteArrayLen]byte + // _ [cacheLineSize - scratchByteArrayLen]byte // padding + // b [cacheLineSize - (8 * 0)]byte // used for encoding a chan or (non-addressable) array of bytes } // NewEncoder returns an Encoder for encoding into an io.Writer. // -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Writer, bytes.Buffer). +// For efficiency, Users are encouraged to configure WriterBufferSize on the handle +// OR pass in a memory buffered writer (eg bufio.Writer, bytes.Buffer). func NewEncoder(w io.Writer, h Handle) *Encoder { - ww, ok := w.(ioEncWriterWriter) - if !ok { - sww := simpleIoEncWriterWriter{w: w} - sww.bw, _ = w.(io.ByteWriter) - sww.sw, _ = w.(ioEncStringWriter) - ww = &sww - //ww = bufio.NewWriterSize(w, defEncByteBufSize) - } - z := ioEncWriter{ - w: ww, - } - return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} + e := newEncoder(h) + e.Reset(w) + return e } // NewEncoderBytes returns an encoder for encoding directly and efficiently @@ -629,373 +1270,541 @@ func NewEncoder(w io.Writer, h Handle) *Encoder { // It will potentially replace the output byte slice pointed to. // After encoding, the out parameter contains the encoded contents. func NewEncoderBytes(out *[]byte, h Handle) *Encoder { - in := *out + e := newEncoder(h) + e.ResetBytes(out) + return e +} + +func newEncoder(h Handle) *Encoder { + e := &Encoder{h: basicHandle(h), err: errEncoderNotInitialized} + e.bytes = true + if useFinalizers { + runtime.SetFinalizer(e, (*Encoder).finalize) + // xdebugf(">>>> new(Encoder) with finalizer") + } + e.w = &e.encWriterSwitch + e.hh = h + e.esep = h.hasElemSeparators() + + return e +} + +func (e *Encoder) resetCommon() { + // e.w = &e.encWriterSwitch + if e.e == nil || e.hh.recreateEncDriver(e.e) { + e.e = e.hh.newEncDriver(e) + e.as, e.isas = e.e.(encDriverAsis) + // e.cr, _ = e.e.(containerStateRecv) + } + e.be = e.hh.isBinary() + _, e.js = e.hh.(*JsonHandle) + e.e.reset() + e.err = nil +} + +// Reset resets the Encoder with a new output stream. +// +// This accommodates using the state of the Encoder, +// where it has "cached" information about sub-engines. +func (e *Encoder) Reset(w io.Writer) { + if w == nil { + return + } + // var ok bool + e.bytes = false + if e.wf == nil { + e.wf = new(bufioEncWriter) + } + // e.typ = entryTypeUnset + // if e.h.WriterBufferSize > 0 { + // // bw := bufio.NewWriterSize(w, e.h.WriterBufferSize) + // // e.wi.bw = bw + // // e.wi.sw = bw + // // e.wi.fw = bw + // // e.wi.ww = bw + // if e.wf == nil { + // e.wf = new(bufioEncWriter) + // } + // e.wf.reset(w, e.h.WriterBufferSize) + // e.typ = entryTypeBufio + // } else { + // if e.wi == nil { + // e.wi = new(ioEncWriter) + // } + // e.wi.reset(w) + // e.typ = entryTypeIo + // } + e.wf.reset(w, e.h.WriterBufferSize) + // e.typ = entryTypeBufio + + // e.w = e.wi + e.resetCommon() +} + +// ResetBytes resets the Encoder with a new destination output []byte. +func (e *Encoder) ResetBytes(out *[]byte) { + if out == nil { + return + } + var in []byte = *out if in == nil { in = make([]byte, defEncByteBufSize) } - z := bytesEncWriter{ - b: in, - out: out, - } - return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} + e.bytes = true + // e.typ = entryTypeBytes + e.wb.reset(in, out) + // e.w = &e.wb + e.resetCommon() } -// Encode writes an object into a stream in the codec format. +// Encode writes an object into a stream. // -// Encoding can be configured via the "codec" struct tag for the fields. +// Encoding can be configured via the struct tag for the fields. +// The key (in the struct tags) that we look at is configurable. // -// The "codec" key in struct field's tag value is the key name, +// By default, we look up the "codec" key in the struct field's tags, +// and fall bak to the "json" key if "codec" is absent. +// That key in struct field's tag value is the key name, // followed by an optional comma and options. // // To set an option on all fields (e.g. omitempty on all fields), you -// can create a field called _struct, and set flags on it. +// can create a field called _struct, and set flags on it. The options +// which can be set on _struct are: +// - omitempty: so all fields are omitted if empty +// - toarray: so struct is encoded as an array +// - int: so struct key names are encoded as signed integers (instead of strings) +// - uint: so struct key names are encoded as unsigned integers (instead of strings) +// - float: so struct key names are encoded as floats (instead of strings) +// More details on these below. // // Struct values "usually" encode as maps. Each exported struct field is encoded unless: -// - the field's codec tag is "-", OR -// - the field is empty and its codec tag specifies the "omitempty" option. +// - the field's tag is "-", OR +// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. // // When encoding as a map, the first string in the tag (before the comma) // is the map key string to use when encoding. +// ... +// This key is typically encoded as a string. +// However, there are instances where the encoded stream has mapping keys encoded as numbers. +// For example, some cbor streams have keys as integer codes in the stream, but they should map +// to fields in a structured object. Consequently, a struct is the natural representation in code. +// For these, configure the struct to encode/decode the keys as numbers (instead of string). +// This is done with the int,uint or float option on the _struct field (see above). // // However, struct values may encode as arrays. This happens when: // - StructToArray Encode option is set, OR -// - the codec tag on the _struct field sets the "toarray" option +// - the tag on the _struct field sets the "toarray" option +// Note that omitempty is ignored when encoding struct values as arrays, +// as an entry must be encoded for each field, to maintain its position. // // Values with types that implement MapBySlice are encoded as stream maps. // // The empty values (for omitempty option) are false, 0, any nil pointer // or interface value, and any array, slice, map, or string of length zero. // -// Anonymous fields are encoded inline if no struct tag is present. -// Else they are encoded as regular fields. +// Anonymous fields are encoded inline except: +// - the struct tag specifies a replacement name (first value) +// - the field is of an interface type // // Examples: // +// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. // type MyStruct struct { // _struct bool `codec:",omitempty"` //set omitempty for every field // Field1 string `codec:"-"` //skip this field // Field2 int `codec:"myName"` //Use key "myName" in encode stream // Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. // Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// io.Reader //use key "Reader". +// MyStruct `codec:"my1" //use key "my1". +// MyStruct //inline it // ... // } // // type MyStruct struct { -// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field -// //and encode struct as an array +// _struct bool `codec:",toarray"` //encode struct as an array +// } +// +// type MyStruct struct { +// _struct bool `codec:",uint"` //encode struct with "unsigned integer" keys +// Field1 string `codec:"1"` //encode Field1 key using: EncodeInt(1) +// Field2 string `codec:"2"` //encode Field2 key using: EncodeInt(2) // } // // The mode of encoding is based on the type of the value. When a value is seen: +// - If a Selfer, call its CodecEncodeSelf method // - If an extension is registered for it, call that extension function -// - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error) +// - If implements encoding.(Binary|Text|JSON)Marshaler, call Marshal(Binary|Text|JSON) method // - Else encode it based on its reflect.Kind // // Note that struct field names and keys in map[string]XXX will be treated as symbols. // Some formats support symbols (e.g. binc) and will properly encode the string // only once in the stream, and use a tag to refer to it thereafter. func (e *Encoder) Encode(v interface{}) (err error) { - defer panicToErr(&err) - e.encode(v) - e.w.atEndOfEncode() + // tried to use closure, as runtime optimizes defer with no params. + // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc). + // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139 + // defer func() { e.deferred(&err) }() } + // { x, y := e, &err; defer func() { x.deferred(y) }() } + if e.err != nil { + return e.err + } + if recoverPanicToErr { + defer func() { + // if error occurred during encoding, return that error; + // else if error occurred on end'ing (i.e. during flush), return that error. + err = e.w.endErr() + x := recover() + if x == nil { + e.err = err + } else { + panicValToErr(e, x, &e.err) + err = e.err + } + }() + } + + // defer e.deferred(&err) + e.mustEncode(v) return } +// MustEncode is like Encode, but panics if unable to Encode. +// This provides insight to the code location that triggered the error. +func (e *Encoder) MustEncode(v interface{}) { + if e.err != nil { + panic(e.err) + } + e.mustEncode(v) +} + +func (e *Encoder) mustEncode(v interface{}) { + if e.wf == nil { + e.encode(v) + e.e.atEndOfEncode() + e.w.end() + return + } + + if e.wf.buf == nil { + e.wf.buf = e.wf.bytesBufPooler.get(e.wf.sz) + } + e.wf.calls++ + + e.encode(v) + + e.wf.calls-- + + if e.wf.calls == 0 { + e.e.atEndOfEncode() + e.w.end() + if !e.h.ExplicitRelease { + e.wf.release() + } + } +} + +// func (e *Encoder) deferred(err1 *error) { +// e.w.end() +// if recoverPanicToErr { +// if x := recover(); x != nil { +// panicValToErr(e, x, err1) +// panicValToErr(e, x, &e.err) +// } +// } +// } + +//go:noinline -- as it is run by finalizer +func (e *Encoder) finalize() { + // xdebugf("finalizing Encoder") + e.Release() +} + +// Release releases shared (pooled) resources. +// +// It is important to call Release() when done with an Encoder, so those resources +// are released instantly for use by subsequently created Encoders. +func (e *Encoder) Release() { + if e.wf != nil { + e.wf.release() + } +} + func (e *Encoder) encode(iv interface{}) { - switch v := iv.(type) { - case nil: - e.e.encodeNil() + // a switch with only concrete types can be optimized. + // consequently, we deal with nil and interfaces outside the switch. + + if iv == nil || definitelyNil(iv) { + e.e.EncodeNil() + return + } + switch v := iv.(type) { + // case nil: + // case Selfer: + case Raw: + e.rawBytes(v) case reflect.Value: - e.encodeValue(v) + e.encodeValue(v, nil, true) case string: - e.e.encodeString(c_UTF8, v) + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(v)) + } else { + e.e.EncodeStringEnc(cUTF8, v) + } case bool: - e.e.encodeBool(v) + e.e.EncodeBool(v) case int: - e.e.encodeInt(int64(v)) + e.e.EncodeInt(int64(v)) case int8: - e.e.encodeInt(int64(v)) + e.e.EncodeInt(int64(v)) case int16: - e.e.encodeInt(int64(v)) + e.e.EncodeInt(int64(v)) case int32: - e.e.encodeInt(int64(v)) + e.e.EncodeInt(int64(v)) case int64: - e.e.encodeInt(v) + e.e.EncodeInt(v) case uint: - e.e.encodeUint(uint64(v)) + e.e.EncodeUint(uint64(v)) case uint8: - e.e.encodeUint(uint64(v)) + e.e.EncodeUint(uint64(v)) case uint16: - e.e.encodeUint(uint64(v)) + e.e.EncodeUint(uint64(v)) case uint32: - e.e.encodeUint(uint64(v)) + e.e.EncodeUint(uint64(v)) case uint64: - e.e.encodeUint(v) + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) case float32: - e.e.encodeFloat32(v) + e.e.EncodeFloat32(v) case float64: - e.e.encodeFloat64(v) - - case []interface{}: - e.encSliceIntf(v) - case []string: - e.encSliceStr(v) - case []int64: - e.encSliceInt64(v) - case []uint64: - e.encSliceUint64(v) + e.e.EncodeFloat64(v) + case time.Time: + e.e.EncodeTime(v) case []uint8: - e.e.encodeStringBytes(c_RAW, v) - - case map[interface{}]interface{}: - e.encMapIntfIntf(v) - case map[string]interface{}: - e.encMapStrIntf(v) - case map[string]string: - e.encMapStrStr(v) - case map[int64]interface{}: - e.encMapInt64Intf(v) - case map[uint64]interface{}: - e.encMapUint64Intf(v) + e.e.EncodeStringBytesRaw(v) + + case *Raw: + e.rawBytes(*v) case *string: - e.e.encodeString(c_UTF8, *v) + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(*v)) + } else { + e.e.EncodeStringEnc(cUTF8, *v) + } case *bool: - e.e.encodeBool(*v) + e.e.EncodeBool(*v) case *int: - e.e.encodeInt(int64(*v)) + e.e.EncodeInt(int64(*v)) case *int8: - e.e.encodeInt(int64(*v)) + e.e.EncodeInt(int64(*v)) case *int16: - e.e.encodeInt(int64(*v)) + e.e.EncodeInt(int64(*v)) case *int32: - e.e.encodeInt(int64(*v)) + e.e.EncodeInt(int64(*v)) case *int64: - e.e.encodeInt(*v) + e.e.EncodeInt(*v) case *uint: - e.e.encodeUint(uint64(*v)) + e.e.EncodeUint(uint64(*v)) case *uint8: - e.e.encodeUint(uint64(*v)) + e.e.EncodeUint(uint64(*v)) case *uint16: - e.e.encodeUint(uint64(*v)) + e.e.EncodeUint(uint64(*v)) case *uint32: - e.e.encodeUint(uint64(*v)) + e.e.EncodeUint(uint64(*v)) case *uint64: - e.e.encodeUint(*v) + e.e.EncodeUint(*v) + case *uintptr: + e.e.EncodeUint(uint64(*v)) case *float32: - e.e.encodeFloat32(*v) + e.e.EncodeFloat32(*v) case *float64: - e.e.encodeFloat64(*v) - - case *[]interface{}: - e.encSliceIntf(*v) - case *[]string: - e.encSliceStr(*v) - case *[]int64: - e.encSliceInt64(*v) - case *[]uint64: - e.encSliceUint64(*v) + e.e.EncodeFloat64(*v) + case *time.Time: + e.e.EncodeTime(*v) + case *[]uint8: - e.e.encodeStringBytes(c_RAW, *v) - - case *map[interface{}]interface{}: - e.encMapIntfIntf(*v) - case *map[string]interface{}: - e.encMapStrIntf(*v) - case *map[string]string: - e.encMapStrStr(*v) - case *map[int64]interface{}: - e.encMapInt64Intf(*v) - case *map[uint64]interface{}: - e.encMapUint64Intf(*v) + e.e.EncodeStringBytesRaw(*v) default: - e.encodeValue(reflect.ValueOf(iv)) + if v, ok := iv.(Selfer); ok { + v.CodecEncodeSelf(e) + } else if !fastpathEncodeTypeSwitch(iv, e) { + // checkfastpath=true (not false), as underlying slice/map type may be fast-path + e.encodeValue(reflect.ValueOf(iv), nil, true) + } } } -func (e *Encoder) encodeValue(rv reflect.Value) { - for rv.Kind() == reflect.Ptr { +func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) { + // if a valid fn is passed, it MUST BE for the dereferenced type of rv + var sptr uintptr + var rvp reflect.Value + var rvpValid bool +TOP: + switch rv.Kind() { + case reflect.Ptr: if rv.IsNil() { - e.e.encodeNil() + e.e.EncodeNil() return } + rvpValid = true + rvp = rv rv = rv.Elem() - } - - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - - // if e.f == nil && e.s == nil { debugf("---->Creating new enc f map for type: %v\n", rt) } - var fn encFn - var ok bool - if useMapForCodecCache { - fn, ok = e.f[rtid] - } else { - for i, v := range e.x { - if v == rtid { - fn, ok = e.s[i], true - break - } + if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { + // TODO: Movable pointers will be an issue here. Future problem. + sptr = rv.UnsafeAddr() + break TOP } - } - if !ok { - // debugf("\tCreating new enc fn for type: %v\n", rt) - fi := encFnInfo{ti: getTypeInfo(rtid, rt), e: e, ee: e.e} - fn.i = &fi - if rtid == rawExtTypId { - fn.f = (*encFnInfo).rawExt - } else if e.e.isBuiltinType(rtid) { - fn.f = (*encFnInfo).builtin - } else if xfTag, xfFn := e.h.getEncodeExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfTag, xfFn - fn.f = (*encFnInfo).ext - } else if supportBinaryMarshal && fi.ti.m { - fn.f = (*encFnInfo).binaryMarshal - } else { - switch rk := rt.Kind(); rk { - case reflect.Bool: - fn.f = (*encFnInfo).kBool - case reflect.String: - fn.f = (*encFnInfo).kString - case reflect.Float64: - fn.f = (*encFnInfo).kFloat64 - case reflect.Float32: - fn.f = (*encFnInfo).kFloat32 - case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: - fn.f = (*encFnInfo).kInt - case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16: - fn.f = (*encFnInfo).kUint - case reflect.Invalid: - fn.f = (*encFnInfo).kInvalid - case reflect.Slice: - fn.f = (*encFnInfo).kSlice - case reflect.Array: - fn.f = (*encFnInfo).kArray - case reflect.Struct: - fn.f = (*encFnInfo).kStruct - // case reflect.Ptr: - // fn.f = (*encFnInfo).kPtr - case reflect.Interface: - fn.f = (*encFnInfo).kInterface - case reflect.Map: - fn.f = (*encFnInfo).kMap - default: - fn.f = (*encFnInfo).kErr - } - } - if useMapForCodecCache { - if e.f == nil { - e.f = make(map[uintptr]encFn, 16) - } - e.f[rtid] = fn - } else { - e.s = append(e.s, fn) - e.x = append(e.x, rtid) + goto TOP + case reflect.Interface: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + goto TOP + case reflect.Slice, reflect.Map: + if rv.IsNil() { + e.e.EncodeNil() + return } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + return } - fn.f(fn.i, rv) - -} + if sptr != 0 && (&e.ci).add(sptr) { + e.errorf("circular reference found: # %d", sptr) + } -func (e *Encoder) encRawExt(re RawExt) { - if re.Data == nil { - e.e.encodeNil() - return + if fn == nil { + rt := rv.Type() + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = e.h.fn(rt, checkFastpath, true) } - if e.hh.writeExt() { - e.e.encodeExtPreamble(re.Tag, len(re.Data)) - e.w.writeb(re.Data) + if fn.i.addrE { + if rvpValid { + fn.fe(e, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fe(e, &fn.i, rv.Addr()) + } else { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + fn.fe(e, &fn.i, rv2) + } } else { - e.e.encodeStringBytes(c_RAW, re.Data) + fn.fe(e, &fn.i, rv) + } + if sptr != 0 { + (&e.ci).remove(sptr) } } -// --------------------------------------------- -// short circuit functions for common maps and slices +// func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { +// if fnerr != nil { +// panic(fnerr) +// } +// if bs == nil { +// e.e.EncodeNil() +// } else if asis { +// e.asis(bs) +// } else { +// e.e.EncodeStringBytesRaw(bs) +// } +// } -func (e *Encoder) encSliceIntf(v []interface{}) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.encode(v2) +func (e *Encoder) marshalUtf8(bs []byte, fnerr error) { + if fnerr != nil { + panic(fnerr) } -} - -func (e *Encoder) encSliceStr(v []string) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeString(c_UTF8, v2) + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeStringEnc(cUTF8, stringView(bs)) } } -func (e *Encoder) encSliceInt64(v []int64) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeInt(v2) +func (e *Encoder) marshalAsis(bs []byte, fnerr error) { + if fnerr != nil { + panic(fnerr) } -} - -func (e *Encoder) encSliceUint64(v []uint64) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeUint(v2) + if bs == nil { + e.e.EncodeNil() + } else { + e.asis(bs) } } -func (e *Encoder) encMapStrStr(v map[string]string) { - e.e.encodeMapPreamble(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - for k2, v2 := range v { - if asSymbols { - e.e.encodeSymbol(k2) - } else { - e.e.encodeString(c_UTF8, k2) - } - e.e.encodeString(c_UTF8, v2) +func (e *Encoder) marshalRaw(bs []byte, fnerr error) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeStringBytesRaw(bs) } } -func (e *Encoder) encMapStrIntf(v map[string]interface{}) { - e.e.encodeMapPreamble(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - for k2, v2 := range v { - if asSymbols { - e.e.encodeSymbol(k2) - } else { - e.e.encodeString(c_UTF8, k2) - } - e.encode(v2) +func (e *Encoder) asis(v []byte) { + if e.isas { + e.as.EncodeAsis(v) + } else { + e.w.writeb(v) } } -func (e *Encoder) encMapInt64Intf(v map[int64]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.e.encodeInt(k2) - e.encode(v2) +func (e *Encoder) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + e.errorf("Raw values cannot be encoded: %v", v) } + e.asis(v) } -func (e *Encoder) encMapUint64Intf(v map[uint64]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.e.encodeUint(uint64(k2)) - e.encode(v2) - } +func (e *Encoder) wrapErr(v interface{}, err *error) { + *err = encodeError{codecError{name: e.hh.Name(), err: v}} } -func (e *Encoder) encMapIntfIntf(v map[interface{}]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.encode(k2) - e.encode(v2) +func encStructFieldKey(encName string, ee encDriver, w *encWriterSwitch, + keyType valueType, encNameAsciiAlphaNum bool, js bool) { + var m must + // use if-else-if, not switch (which compiles to binary-search) + // since keyType is typically valueTypeString, branch prediction is pretty good. + if keyType == valueTypeString { + if js && encNameAsciiAlphaNum { // keyType == valueTypeString + // w.writen1('"') + // w.writestr(encName) + // w.writen1('"') + // ---- + // w.writestr(`"` + encName + `"`) + // ---- + // do concat myself, so it is faster than the generic string concat + b := make([]byte, len(encName)+2) + copy(b[1:], encName) + b[0] = '"' + b[len(b)-1] = '"' + w.writeb(b) + } else { // keyType == valueTypeString + ee.EncodeStringEnc(cUTF8, encName) + } + } else if keyType == valueTypeInt { + ee.EncodeInt(m.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + ee.EncodeUint(m.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + ee.EncodeFloat64(m.Float(strconv.ParseFloat(encName, 64))) } } -// ---------------------------------------- - -func encErr(format string, params ...interface{}) { - doPanic(msgTagEnc, format, params...) -} +// func encStringAsRawBytesMaybe(ee encDriver, s string, stringToRaw bool) { +// if stringToRaw { +// ee.EncodeStringBytesRaw(bytesView(s)) +// } else { +// ee.EncodeStringEnc(cUTF8, s) +// } +// } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.generated.go b/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.generated.go new file mode 100644 index 0000000000000..9bc14bd638a39 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.generated.go @@ -0,0 +1,33668 @@ +// +build !notfastpath + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from fast-path.go.tmpl - DO NOT EDIT. + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathEnabled = true + +const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v" + +type fastpathT struct{} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} + +type fastpathA [271]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + // Note: we use goto (instead of for loop) so this can be inlined. + // h, i, j := 0, 0, len(x) + var h, i uint + var j = uint(len(x)) +LOOP: + if i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(x)) && x[i].rtid == rtid { + return int(i) + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[uint(i)].rtid < x[uint(j)].rtid } +func (x fastpathAslice) Swap(i, j int) { x[uint(i)], x[uint(j)] = x[uint(j)], x[uint(i)] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + var i uint = 0 + fn := func(v interface{}, + fe func(*Encoder, *codecFnInfo, reflect.Value), + fd func(*Decoder, *codecFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + xptr := rt2id(xrt) + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + } + + fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR) + fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR) + fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R) + fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R) + fn([]uint(nil), (*Encoder).fastpathEncSliceUintR, (*Decoder).fastpathDecSliceUintR) + fn([]uint16(nil), (*Encoder).fastpathEncSliceUint16R, (*Decoder).fastpathDecSliceUint16R) + fn([]uint32(nil), (*Encoder).fastpathEncSliceUint32R, (*Decoder).fastpathDecSliceUint32R) + fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R) + fn([]uintptr(nil), (*Encoder).fastpathEncSliceUintptrR, (*Decoder).fastpathDecSliceUintptrR) + fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR) + fn([]int8(nil), (*Encoder).fastpathEncSliceInt8R, (*Decoder).fastpathDecSliceInt8R) + fn([]int16(nil), (*Encoder).fastpathEncSliceInt16R, (*Decoder).fastpathDecSliceInt16R) + fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R) + fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R) + fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR) + + fn(map[interface{}]interface{}(nil), (*Encoder).fastpathEncMapIntfIntfR, (*Decoder).fastpathDecMapIntfIntfR) + fn(map[interface{}]string(nil), (*Encoder).fastpathEncMapIntfStringR, (*Decoder).fastpathDecMapIntfStringR) + fn(map[interface{}]uint(nil), (*Encoder).fastpathEncMapIntfUintR, (*Decoder).fastpathDecMapIntfUintR) + fn(map[interface{}]uint8(nil), (*Encoder).fastpathEncMapIntfUint8R, (*Decoder).fastpathDecMapIntfUint8R) + fn(map[interface{}]uint16(nil), (*Encoder).fastpathEncMapIntfUint16R, (*Decoder).fastpathDecMapIntfUint16R) + fn(map[interface{}]uint32(nil), (*Encoder).fastpathEncMapIntfUint32R, (*Decoder).fastpathDecMapIntfUint32R) + fn(map[interface{}]uint64(nil), (*Encoder).fastpathEncMapIntfUint64R, (*Decoder).fastpathDecMapIntfUint64R) + fn(map[interface{}]uintptr(nil), (*Encoder).fastpathEncMapIntfUintptrR, (*Decoder).fastpathDecMapIntfUintptrR) + fn(map[interface{}]int(nil), (*Encoder).fastpathEncMapIntfIntR, (*Decoder).fastpathDecMapIntfIntR) + fn(map[interface{}]int8(nil), (*Encoder).fastpathEncMapIntfInt8R, (*Decoder).fastpathDecMapIntfInt8R) + fn(map[interface{}]int16(nil), (*Encoder).fastpathEncMapIntfInt16R, (*Decoder).fastpathDecMapIntfInt16R) + fn(map[interface{}]int32(nil), (*Encoder).fastpathEncMapIntfInt32R, (*Decoder).fastpathDecMapIntfInt32R) + fn(map[interface{}]int64(nil), (*Encoder).fastpathEncMapIntfInt64R, (*Decoder).fastpathDecMapIntfInt64R) + fn(map[interface{}]float32(nil), (*Encoder).fastpathEncMapIntfFloat32R, (*Decoder).fastpathDecMapIntfFloat32R) + fn(map[interface{}]float64(nil), (*Encoder).fastpathEncMapIntfFloat64R, (*Decoder).fastpathDecMapIntfFloat64R) + fn(map[interface{}]bool(nil), (*Encoder).fastpathEncMapIntfBoolR, (*Decoder).fastpathDecMapIntfBoolR) + fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR) + fn(map[string]uint(nil), (*Encoder).fastpathEncMapStringUintR, (*Decoder).fastpathDecMapStringUintR) + fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R) + fn(map[string]uint16(nil), (*Encoder).fastpathEncMapStringUint16R, (*Decoder).fastpathDecMapStringUint16R) + fn(map[string]uint32(nil), (*Encoder).fastpathEncMapStringUint32R, (*Decoder).fastpathDecMapStringUint32R) + fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R) + fn(map[string]uintptr(nil), (*Encoder).fastpathEncMapStringUintptrR, (*Decoder).fastpathDecMapStringUintptrR) + fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR) + fn(map[string]int8(nil), (*Encoder).fastpathEncMapStringInt8R, (*Decoder).fastpathDecMapStringInt8R) + fn(map[string]int16(nil), (*Encoder).fastpathEncMapStringInt16R, (*Decoder).fastpathDecMapStringInt16R) + fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R) + fn(map[string]int64(nil), (*Encoder).fastpathEncMapStringInt64R, (*Decoder).fastpathDecMapStringInt64R) + fn(map[string]float32(nil), (*Encoder).fastpathEncMapStringFloat32R, (*Decoder).fastpathDecMapStringFloat32R) + fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR) + fn(map[float32]interface{}(nil), (*Encoder).fastpathEncMapFloat32IntfR, (*Decoder).fastpathDecMapFloat32IntfR) + fn(map[float32]string(nil), (*Encoder).fastpathEncMapFloat32StringR, (*Decoder).fastpathDecMapFloat32StringR) + fn(map[float32]uint(nil), (*Encoder).fastpathEncMapFloat32UintR, (*Decoder).fastpathDecMapFloat32UintR) + fn(map[float32]uint8(nil), (*Encoder).fastpathEncMapFloat32Uint8R, (*Decoder).fastpathDecMapFloat32Uint8R) + fn(map[float32]uint16(nil), (*Encoder).fastpathEncMapFloat32Uint16R, (*Decoder).fastpathDecMapFloat32Uint16R) + fn(map[float32]uint32(nil), (*Encoder).fastpathEncMapFloat32Uint32R, (*Decoder).fastpathDecMapFloat32Uint32R) + fn(map[float32]uint64(nil), (*Encoder).fastpathEncMapFloat32Uint64R, (*Decoder).fastpathDecMapFloat32Uint64R) + fn(map[float32]uintptr(nil), (*Encoder).fastpathEncMapFloat32UintptrR, (*Decoder).fastpathDecMapFloat32UintptrR) + fn(map[float32]int(nil), (*Encoder).fastpathEncMapFloat32IntR, (*Decoder).fastpathDecMapFloat32IntR) + fn(map[float32]int8(nil), (*Encoder).fastpathEncMapFloat32Int8R, (*Decoder).fastpathDecMapFloat32Int8R) + fn(map[float32]int16(nil), (*Encoder).fastpathEncMapFloat32Int16R, (*Decoder).fastpathDecMapFloat32Int16R) + fn(map[float32]int32(nil), (*Encoder).fastpathEncMapFloat32Int32R, (*Decoder).fastpathDecMapFloat32Int32R) + fn(map[float32]int64(nil), (*Encoder).fastpathEncMapFloat32Int64R, (*Decoder).fastpathDecMapFloat32Int64R) + fn(map[float32]float32(nil), (*Encoder).fastpathEncMapFloat32Float32R, (*Decoder).fastpathDecMapFloat32Float32R) + fn(map[float32]float64(nil), (*Encoder).fastpathEncMapFloat32Float64R, (*Decoder).fastpathDecMapFloat32Float64R) + fn(map[float32]bool(nil), (*Encoder).fastpathEncMapFloat32BoolR, (*Decoder).fastpathDecMapFloat32BoolR) + fn(map[float64]interface{}(nil), (*Encoder).fastpathEncMapFloat64IntfR, (*Decoder).fastpathDecMapFloat64IntfR) + fn(map[float64]string(nil), (*Encoder).fastpathEncMapFloat64StringR, (*Decoder).fastpathDecMapFloat64StringR) + fn(map[float64]uint(nil), (*Encoder).fastpathEncMapFloat64UintR, (*Decoder).fastpathDecMapFloat64UintR) + fn(map[float64]uint8(nil), (*Encoder).fastpathEncMapFloat64Uint8R, (*Decoder).fastpathDecMapFloat64Uint8R) + fn(map[float64]uint16(nil), (*Encoder).fastpathEncMapFloat64Uint16R, (*Decoder).fastpathDecMapFloat64Uint16R) + fn(map[float64]uint32(nil), (*Encoder).fastpathEncMapFloat64Uint32R, (*Decoder).fastpathDecMapFloat64Uint32R) + fn(map[float64]uint64(nil), (*Encoder).fastpathEncMapFloat64Uint64R, (*Decoder).fastpathDecMapFloat64Uint64R) + fn(map[float64]uintptr(nil), (*Encoder).fastpathEncMapFloat64UintptrR, (*Decoder).fastpathDecMapFloat64UintptrR) + fn(map[float64]int(nil), (*Encoder).fastpathEncMapFloat64IntR, (*Decoder).fastpathDecMapFloat64IntR) + fn(map[float64]int8(nil), (*Encoder).fastpathEncMapFloat64Int8R, (*Decoder).fastpathDecMapFloat64Int8R) + fn(map[float64]int16(nil), (*Encoder).fastpathEncMapFloat64Int16R, (*Decoder).fastpathDecMapFloat64Int16R) + fn(map[float64]int32(nil), (*Encoder).fastpathEncMapFloat64Int32R, (*Decoder).fastpathDecMapFloat64Int32R) + fn(map[float64]int64(nil), (*Encoder).fastpathEncMapFloat64Int64R, (*Decoder).fastpathDecMapFloat64Int64R) + fn(map[float64]float32(nil), (*Encoder).fastpathEncMapFloat64Float32R, (*Decoder).fastpathDecMapFloat64Float32R) + fn(map[float64]float64(nil), (*Encoder).fastpathEncMapFloat64Float64R, (*Decoder).fastpathDecMapFloat64Float64R) + fn(map[float64]bool(nil), (*Encoder).fastpathEncMapFloat64BoolR, (*Decoder).fastpathDecMapFloat64BoolR) + fn(map[uint]interface{}(nil), (*Encoder).fastpathEncMapUintIntfR, (*Decoder).fastpathDecMapUintIntfR) + fn(map[uint]string(nil), (*Encoder).fastpathEncMapUintStringR, (*Decoder).fastpathDecMapUintStringR) + fn(map[uint]uint(nil), (*Encoder).fastpathEncMapUintUintR, (*Decoder).fastpathDecMapUintUintR) + fn(map[uint]uint8(nil), (*Encoder).fastpathEncMapUintUint8R, (*Decoder).fastpathDecMapUintUint8R) + fn(map[uint]uint16(nil), (*Encoder).fastpathEncMapUintUint16R, (*Decoder).fastpathDecMapUintUint16R) + fn(map[uint]uint32(nil), (*Encoder).fastpathEncMapUintUint32R, (*Decoder).fastpathDecMapUintUint32R) + fn(map[uint]uint64(nil), (*Encoder).fastpathEncMapUintUint64R, (*Decoder).fastpathDecMapUintUint64R) + fn(map[uint]uintptr(nil), (*Encoder).fastpathEncMapUintUintptrR, (*Decoder).fastpathDecMapUintUintptrR) + fn(map[uint]int(nil), (*Encoder).fastpathEncMapUintIntR, (*Decoder).fastpathDecMapUintIntR) + fn(map[uint]int8(nil), (*Encoder).fastpathEncMapUintInt8R, (*Decoder).fastpathDecMapUintInt8R) + fn(map[uint]int16(nil), (*Encoder).fastpathEncMapUintInt16R, (*Decoder).fastpathDecMapUintInt16R) + fn(map[uint]int32(nil), (*Encoder).fastpathEncMapUintInt32R, (*Decoder).fastpathDecMapUintInt32R) + fn(map[uint]int64(nil), (*Encoder).fastpathEncMapUintInt64R, (*Decoder).fastpathDecMapUintInt64R) + fn(map[uint]float32(nil), (*Encoder).fastpathEncMapUintFloat32R, (*Decoder).fastpathDecMapUintFloat32R) + fn(map[uint]float64(nil), (*Encoder).fastpathEncMapUintFloat64R, (*Decoder).fastpathDecMapUintFloat64R) + fn(map[uint]bool(nil), (*Encoder).fastpathEncMapUintBoolR, (*Decoder).fastpathDecMapUintBoolR) + fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR) + fn(map[uint8]uint(nil), (*Encoder).fastpathEncMapUint8UintR, (*Decoder).fastpathDecMapUint8UintR) + fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint16(nil), (*Encoder).fastpathEncMapUint8Uint16R, (*Decoder).fastpathDecMapUint8Uint16R) + fn(map[uint8]uint32(nil), (*Encoder).fastpathEncMapUint8Uint32R, (*Decoder).fastpathDecMapUint8Uint32R) + fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R) + fn(map[uint8]uintptr(nil), (*Encoder).fastpathEncMapUint8UintptrR, (*Decoder).fastpathDecMapUint8UintptrR) + fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR) + fn(map[uint8]int8(nil), (*Encoder).fastpathEncMapUint8Int8R, (*Decoder).fastpathDecMapUint8Int8R) + fn(map[uint8]int16(nil), (*Encoder).fastpathEncMapUint8Int16R, (*Decoder).fastpathDecMapUint8Int16R) + fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R) + fn(map[uint8]int64(nil), (*Encoder).fastpathEncMapUint8Int64R, (*Decoder).fastpathDecMapUint8Int64R) + fn(map[uint8]float32(nil), (*Encoder).fastpathEncMapUint8Float32R, (*Decoder).fastpathDecMapUint8Float32R) + fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR) + fn(map[uint16]interface{}(nil), (*Encoder).fastpathEncMapUint16IntfR, (*Decoder).fastpathDecMapUint16IntfR) + fn(map[uint16]string(nil), (*Encoder).fastpathEncMapUint16StringR, (*Decoder).fastpathDecMapUint16StringR) + fn(map[uint16]uint(nil), (*Encoder).fastpathEncMapUint16UintR, (*Decoder).fastpathDecMapUint16UintR) + fn(map[uint16]uint8(nil), (*Encoder).fastpathEncMapUint16Uint8R, (*Decoder).fastpathDecMapUint16Uint8R) + fn(map[uint16]uint16(nil), (*Encoder).fastpathEncMapUint16Uint16R, (*Decoder).fastpathDecMapUint16Uint16R) + fn(map[uint16]uint32(nil), (*Encoder).fastpathEncMapUint16Uint32R, (*Decoder).fastpathDecMapUint16Uint32R) + fn(map[uint16]uint64(nil), (*Encoder).fastpathEncMapUint16Uint64R, (*Decoder).fastpathDecMapUint16Uint64R) + fn(map[uint16]uintptr(nil), (*Encoder).fastpathEncMapUint16UintptrR, (*Decoder).fastpathDecMapUint16UintptrR) + fn(map[uint16]int(nil), (*Encoder).fastpathEncMapUint16IntR, (*Decoder).fastpathDecMapUint16IntR) + fn(map[uint16]int8(nil), (*Encoder).fastpathEncMapUint16Int8R, (*Decoder).fastpathDecMapUint16Int8R) + fn(map[uint16]int16(nil), (*Encoder).fastpathEncMapUint16Int16R, (*Decoder).fastpathDecMapUint16Int16R) + fn(map[uint16]int32(nil), (*Encoder).fastpathEncMapUint16Int32R, (*Decoder).fastpathDecMapUint16Int32R) + fn(map[uint16]int64(nil), (*Encoder).fastpathEncMapUint16Int64R, (*Decoder).fastpathDecMapUint16Int64R) + fn(map[uint16]float32(nil), (*Encoder).fastpathEncMapUint16Float32R, (*Decoder).fastpathDecMapUint16Float32R) + fn(map[uint16]float64(nil), (*Encoder).fastpathEncMapUint16Float64R, (*Decoder).fastpathDecMapUint16Float64R) + fn(map[uint16]bool(nil), (*Encoder).fastpathEncMapUint16BoolR, (*Decoder).fastpathDecMapUint16BoolR) + fn(map[uint32]interface{}(nil), (*Encoder).fastpathEncMapUint32IntfR, (*Decoder).fastpathDecMapUint32IntfR) + fn(map[uint32]string(nil), (*Encoder).fastpathEncMapUint32StringR, (*Decoder).fastpathDecMapUint32StringR) + fn(map[uint32]uint(nil), (*Encoder).fastpathEncMapUint32UintR, (*Decoder).fastpathDecMapUint32UintR) + fn(map[uint32]uint8(nil), (*Encoder).fastpathEncMapUint32Uint8R, (*Decoder).fastpathDecMapUint32Uint8R) + fn(map[uint32]uint16(nil), (*Encoder).fastpathEncMapUint32Uint16R, (*Decoder).fastpathDecMapUint32Uint16R) + fn(map[uint32]uint32(nil), (*Encoder).fastpathEncMapUint32Uint32R, (*Decoder).fastpathDecMapUint32Uint32R) + fn(map[uint32]uint64(nil), (*Encoder).fastpathEncMapUint32Uint64R, (*Decoder).fastpathDecMapUint32Uint64R) + fn(map[uint32]uintptr(nil), (*Encoder).fastpathEncMapUint32UintptrR, (*Decoder).fastpathDecMapUint32UintptrR) + fn(map[uint32]int(nil), (*Encoder).fastpathEncMapUint32IntR, (*Decoder).fastpathDecMapUint32IntR) + fn(map[uint32]int8(nil), (*Encoder).fastpathEncMapUint32Int8R, (*Decoder).fastpathDecMapUint32Int8R) + fn(map[uint32]int16(nil), (*Encoder).fastpathEncMapUint32Int16R, (*Decoder).fastpathDecMapUint32Int16R) + fn(map[uint32]int32(nil), (*Encoder).fastpathEncMapUint32Int32R, (*Decoder).fastpathDecMapUint32Int32R) + fn(map[uint32]int64(nil), (*Encoder).fastpathEncMapUint32Int64R, (*Decoder).fastpathDecMapUint32Int64R) + fn(map[uint32]float32(nil), (*Encoder).fastpathEncMapUint32Float32R, (*Decoder).fastpathDecMapUint32Float32R) + fn(map[uint32]float64(nil), (*Encoder).fastpathEncMapUint32Float64R, (*Decoder).fastpathDecMapUint32Float64R) + fn(map[uint32]bool(nil), (*Encoder).fastpathEncMapUint32BoolR, (*Decoder).fastpathDecMapUint32BoolR) + fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR) + fn(map[uint64]uint(nil), (*Encoder).fastpathEncMapUint64UintR, (*Decoder).fastpathDecMapUint64UintR) + fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint16(nil), (*Encoder).fastpathEncMapUint64Uint16R, (*Decoder).fastpathDecMapUint64Uint16R) + fn(map[uint64]uint32(nil), (*Encoder).fastpathEncMapUint64Uint32R, (*Decoder).fastpathDecMapUint64Uint32R) + fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R) + fn(map[uint64]uintptr(nil), (*Encoder).fastpathEncMapUint64UintptrR, (*Decoder).fastpathDecMapUint64UintptrR) + fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR) + fn(map[uint64]int8(nil), (*Encoder).fastpathEncMapUint64Int8R, (*Decoder).fastpathDecMapUint64Int8R) + fn(map[uint64]int16(nil), (*Encoder).fastpathEncMapUint64Int16R, (*Decoder).fastpathDecMapUint64Int16R) + fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R) + fn(map[uint64]int64(nil), (*Encoder).fastpathEncMapUint64Int64R, (*Decoder).fastpathDecMapUint64Int64R) + fn(map[uint64]float32(nil), (*Encoder).fastpathEncMapUint64Float32R, (*Decoder).fastpathDecMapUint64Float32R) + fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR) + fn(map[uintptr]interface{}(nil), (*Encoder).fastpathEncMapUintptrIntfR, (*Decoder).fastpathDecMapUintptrIntfR) + fn(map[uintptr]string(nil), (*Encoder).fastpathEncMapUintptrStringR, (*Decoder).fastpathDecMapUintptrStringR) + fn(map[uintptr]uint(nil), (*Encoder).fastpathEncMapUintptrUintR, (*Decoder).fastpathDecMapUintptrUintR) + fn(map[uintptr]uint8(nil), (*Encoder).fastpathEncMapUintptrUint8R, (*Decoder).fastpathDecMapUintptrUint8R) + fn(map[uintptr]uint16(nil), (*Encoder).fastpathEncMapUintptrUint16R, (*Decoder).fastpathDecMapUintptrUint16R) + fn(map[uintptr]uint32(nil), (*Encoder).fastpathEncMapUintptrUint32R, (*Decoder).fastpathDecMapUintptrUint32R) + fn(map[uintptr]uint64(nil), (*Encoder).fastpathEncMapUintptrUint64R, (*Decoder).fastpathDecMapUintptrUint64R) + fn(map[uintptr]uintptr(nil), (*Encoder).fastpathEncMapUintptrUintptrR, (*Decoder).fastpathDecMapUintptrUintptrR) + fn(map[uintptr]int(nil), (*Encoder).fastpathEncMapUintptrIntR, (*Decoder).fastpathDecMapUintptrIntR) + fn(map[uintptr]int8(nil), (*Encoder).fastpathEncMapUintptrInt8R, (*Decoder).fastpathDecMapUintptrInt8R) + fn(map[uintptr]int16(nil), (*Encoder).fastpathEncMapUintptrInt16R, (*Decoder).fastpathDecMapUintptrInt16R) + fn(map[uintptr]int32(nil), (*Encoder).fastpathEncMapUintptrInt32R, (*Decoder).fastpathDecMapUintptrInt32R) + fn(map[uintptr]int64(nil), (*Encoder).fastpathEncMapUintptrInt64R, (*Decoder).fastpathDecMapUintptrInt64R) + fn(map[uintptr]float32(nil), (*Encoder).fastpathEncMapUintptrFloat32R, (*Decoder).fastpathDecMapUintptrFloat32R) + fn(map[uintptr]float64(nil), (*Encoder).fastpathEncMapUintptrFloat64R, (*Decoder).fastpathDecMapUintptrFloat64R) + fn(map[uintptr]bool(nil), (*Encoder).fastpathEncMapUintptrBoolR, (*Decoder).fastpathDecMapUintptrBoolR) + fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR) + fn(map[int]uint(nil), (*Encoder).fastpathEncMapIntUintR, (*Decoder).fastpathDecMapIntUintR) + fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R) + fn(map[int]uint16(nil), (*Encoder).fastpathEncMapIntUint16R, (*Decoder).fastpathDecMapIntUint16R) + fn(map[int]uint32(nil), (*Encoder).fastpathEncMapIntUint32R, (*Decoder).fastpathDecMapIntUint32R) + fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R) + fn(map[int]uintptr(nil), (*Encoder).fastpathEncMapIntUintptrR, (*Decoder).fastpathDecMapIntUintptrR) + fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR) + fn(map[int]int8(nil), (*Encoder).fastpathEncMapIntInt8R, (*Decoder).fastpathDecMapIntInt8R) + fn(map[int]int16(nil), (*Encoder).fastpathEncMapIntInt16R, (*Decoder).fastpathDecMapIntInt16R) + fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R) + fn(map[int]int64(nil), (*Encoder).fastpathEncMapIntInt64R, (*Decoder).fastpathDecMapIntInt64R) + fn(map[int]float32(nil), (*Encoder).fastpathEncMapIntFloat32R, (*Decoder).fastpathDecMapIntFloat32R) + fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR) + fn(map[int8]interface{}(nil), (*Encoder).fastpathEncMapInt8IntfR, (*Decoder).fastpathDecMapInt8IntfR) + fn(map[int8]string(nil), (*Encoder).fastpathEncMapInt8StringR, (*Decoder).fastpathDecMapInt8StringR) + fn(map[int8]uint(nil), (*Encoder).fastpathEncMapInt8UintR, (*Decoder).fastpathDecMapInt8UintR) + fn(map[int8]uint8(nil), (*Encoder).fastpathEncMapInt8Uint8R, (*Decoder).fastpathDecMapInt8Uint8R) + fn(map[int8]uint16(nil), (*Encoder).fastpathEncMapInt8Uint16R, (*Decoder).fastpathDecMapInt8Uint16R) + fn(map[int8]uint32(nil), (*Encoder).fastpathEncMapInt8Uint32R, (*Decoder).fastpathDecMapInt8Uint32R) + fn(map[int8]uint64(nil), (*Encoder).fastpathEncMapInt8Uint64R, (*Decoder).fastpathDecMapInt8Uint64R) + fn(map[int8]uintptr(nil), (*Encoder).fastpathEncMapInt8UintptrR, (*Decoder).fastpathDecMapInt8UintptrR) + fn(map[int8]int(nil), (*Encoder).fastpathEncMapInt8IntR, (*Decoder).fastpathDecMapInt8IntR) + fn(map[int8]int8(nil), (*Encoder).fastpathEncMapInt8Int8R, (*Decoder).fastpathDecMapInt8Int8R) + fn(map[int8]int16(nil), (*Encoder).fastpathEncMapInt8Int16R, (*Decoder).fastpathDecMapInt8Int16R) + fn(map[int8]int32(nil), (*Encoder).fastpathEncMapInt8Int32R, (*Decoder).fastpathDecMapInt8Int32R) + fn(map[int8]int64(nil), (*Encoder).fastpathEncMapInt8Int64R, (*Decoder).fastpathDecMapInt8Int64R) + fn(map[int8]float32(nil), (*Encoder).fastpathEncMapInt8Float32R, (*Decoder).fastpathDecMapInt8Float32R) + fn(map[int8]float64(nil), (*Encoder).fastpathEncMapInt8Float64R, (*Decoder).fastpathDecMapInt8Float64R) + fn(map[int8]bool(nil), (*Encoder).fastpathEncMapInt8BoolR, (*Decoder).fastpathDecMapInt8BoolR) + fn(map[int16]interface{}(nil), (*Encoder).fastpathEncMapInt16IntfR, (*Decoder).fastpathDecMapInt16IntfR) + fn(map[int16]string(nil), (*Encoder).fastpathEncMapInt16StringR, (*Decoder).fastpathDecMapInt16StringR) + fn(map[int16]uint(nil), (*Encoder).fastpathEncMapInt16UintR, (*Decoder).fastpathDecMapInt16UintR) + fn(map[int16]uint8(nil), (*Encoder).fastpathEncMapInt16Uint8R, (*Decoder).fastpathDecMapInt16Uint8R) + fn(map[int16]uint16(nil), (*Encoder).fastpathEncMapInt16Uint16R, (*Decoder).fastpathDecMapInt16Uint16R) + fn(map[int16]uint32(nil), (*Encoder).fastpathEncMapInt16Uint32R, (*Decoder).fastpathDecMapInt16Uint32R) + fn(map[int16]uint64(nil), (*Encoder).fastpathEncMapInt16Uint64R, (*Decoder).fastpathDecMapInt16Uint64R) + fn(map[int16]uintptr(nil), (*Encoder).fastpathEncMapInt16UintptrR, (*Decoder).fastpathDecMapInt16UintptrR) + fn(map[int16]int(nil), (*Encoder).fastpathEncMapInt16IntR, (*Decoder).fastpathDecMapInt16IntR) + fn(map[int16]int8(nil), (*Encoder).fastpathEncMapInt16Int8R, (*Decoder).fastpathDecMapInt16Int8R) + fn(map[int16]int16(nil), (*Encoder).fastpathEncMapInt16Int16R, (*Decoder).fastpathDecMapInt16Int16R) + fn(map[int16]int32(nil), (*Encoder).fastpathEncMapInt16Int32R, (*Decoder).fastpathDecMapInt16Int32R) + fn(map[int16]int64(nil), (*Encoder).fastpathEncMapInt16Int64R, (*Decoder).fastpathDecMapInt16Int64R) + fn(map[int16]float32(nil), (*Encoder).fastpathEncMapInt16Float32R, (*Decoder).fastpathDecMapInt16Float32R) + fn(map[int16]float64(nil), (*Encoder).fastpathEncMapInt16Float64R, (*Decoder).fastpathDecMapInt16Float64R) + fn(map[int16]bool(nil), (*Encoder).fastpathEncMapInt16BoolR, (*Decoder).fastpathDecMapInt16BoolR) + fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR) + fn(map[int32]uint(nil), (*Encoder).fastpathEncMapInt32UintR, (*Decoder).fastpathDecMapInt32UintR) + fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R) + fn(map[int32]uint16(nil), (*Encoder).fastpathEncMapInt32Uint16R, (*Decoder).fastpathDecMapInt32Uint16R) + fn(map[int32]uint32(nil), (*Encoder).fastpathEncMapInt32Uint32R, (*Decoder).fastpathDecMapInt32Uint32R) + fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R) + fn(map[int32]uintptr(nil), (*Encoder).fastpathEncMapInt32UintptrR, (*Decoder).fastpathDecMapInt32UintptrR) + fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR) + fn(map[int32]int8(nil), (*Encoder).fastpathEncMapInt32Int8R, (*Decoder).fastpathDecMapInt32Int8R) + fn(map[int32]int16(nil), (*Encoder).fastpathEncMapInt32Int16R, (*Decoder).fastpathDecMapInt32Int16R) + fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R) + fn(map[int32]int64(nil), (*Encoder).fastpathEncMapInt32Int64R, (*Decoder).fastpathDecMapInt32Int64R) + fn(map[int32]float32(nil), (*Encoder).fastpathEncMapInt32Float32R, (*Decoder).fastpathDecMapInt32Float32R) + fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR) + fn(map[int64]interface{}(nil), (*Encoder).fastpathEncMapInt64IntfR, (*Decoder).fastpathDecMapInt64IntfR) + fn(map[int64]string(nil), (*Encoder).fastpathEncMapInt64StringR, (*Decoder).fastpathDecMapInt64StringR) + fn(map[int64]uint(nil), (*Encoder).fastpathEncMapInt64UintR, (*Decoder).fastpathDecMapInt64UintR) + fn(map[int64]uint8(nil), (*Encoder).fastpathEncMapInt64Uint8R, (*Decoder).fastpathDecMapInt64Uint8R) + fn(map[int64]uint16(nil), (*Encoder).fastpathEncMapInt64Uint16R, (*Decoder).fastpathDecMapInt64Uint16R) + fn(map[int64]uint32(nil), (*Encoder).fastpathEncMapInt64Uint32R, (*Decoder).fastpathDecMapInt64Uint32R) + fn(map[int64]uint64(nil), (*Encoder).fastpathEncMapInt64Uint64R, (*Decoder).fastpathDecMapInt64Uint64R) + fn(map[int64]uintptr(nil), (*Encoder).fastpathEncMapInt64UintptrR, (*Decoder).fastpathDecMapInt64UintptrR) + fn(map[int64]int(nil), (*Encoder).fastpathEncMapInt64IntR, (*Decoder).fastpathDecMapInt64IntR) + fn(map[int64]int8(nil), (*Encoder).fastpathEncMapInt64Int8R, (*Decoder).fastpathDecMapInt64Int8R) + fn(map[int64]int16(nil), (*Encoder).fastpathEncMapInt64Int16R, (*Decoder).fastpathDecMapInt64Int16R) + fn(map[int64]int32(nil), (*Encoder).fastpathEncMapInt64Int32R, (*Decoder).fastpathDecMapInt64Int32R) + fn(map[int64]int64(nil), (*Encoder).fastpathEncMapInt64Int64R, (*Decoder).fastpathDecMapInt64Int64R) + fn(map[int64]float32(nil), (*Encoder).fastpathEncMapInt64Float32R, (*Decoder).fastpathDecMapInt64Float32R) + fn(map[int64]float64(nil), (*Encoder).fastpathEncMapInt64Float64R, (*Decoder).fastpathDecMapInt64Float64R) + fn(map[int64]bool(nil), (*Encoder).fastpathEncMapInt64BoolR, (*Decoder).fastpathDecMapInt64BoolR) + fn(map[bool]interface{}(nil), (*Encoder).fastpathEncMapBoolIntfR, (*Decoder).fastpathDecMapBoolIntfR) + fn(map[bool]string(nil), (*Encoder).fastpathEncMapBoolStringR, (*Decoder).fastpathDecMapBoolStringR) + fn(map[bool]uint(nil), (*Encoder).fastpathEncMapBoolUintR, (*Decoder).fastpathDecMapBoolUintR) + fn(map[bool]uint8(nil), (*Encoder).fastpathEncMapBoolUint8R, (*Decoder).fastpathDecMapBoolUint8R) + fn(map[bool]uint16(nil), (*Encoder).fastpathEncMapBoolUint16R, (*Decoder).fastpathDecMapBoolUint16R) + fn(map[bool]uint32(nil), (*Encoder).fastpathEncMapBoolUint32R, (*Decoder).fastpathDecMapBoolUint32R) + fn(map[bool]uint64(nil), (*Encoder).fastpathEncMapBoolUint64R, (*Decoder).fastpathDecMapBoolUint64R) + fn(map[bool]uintptr(nil), (*Encoder).fastpathEncMapBoolUintptrR, (*Decoder).fastpathDecMapBoolUintptrR) + fn(map[bool]int(nil), (*Encoder).fastpathEncMapBoolIntR, (*Decoder).fastpathDecMapBoolIntR) + fn(map[bool]int8(nil), (*Encoder).fastpathEncMapBoolInt8R, (*Decoder).fastpathDecMapBoolInt8R) + fn(map[bool]int16(nil), (*Encoder).fastpathEncMapBoolInt16R, (*Decoder).fastpathDecMapBoolInt16R) + fn(map[bool]int32(nil), (*Encoder).fastpathEncMapBoolInt32R, (*Decoder).fastpathDecMapBoolInt32R) + fn(map[bool]int64(nil), (*Encoder).fastpathEncMapBoolInt64R, (*Decoder).fastpathDecMapBoolInt64R) + fn(map[bool]float32(nil), (*Encoder).fastpathEncMapBoolFloat32R, (*Decoder).fastpathDecMapBoolFloat32R) + fn(map[bool]float64(nil), (*Encoder).fastpathEncMapBoolFloat64R, (*Decoder).fastpathDecMapBoolFloat64R) + fn(map[bool]bool(nil), (*Encoder).fastpathEncMapBoolBoolR, (*Decoder).fastpathDecMapBoolBoolR) + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.EncSliceIntfV(v, e) + case *[]interface{}: + fastpathTV.EncSliceIntfV(*v, e) + case []string: + fastpathTV.EncSliceStringV(v, e) + case *[]string: + fastpathTV.EncSliceStringV(*v, e) + case []float32: + fastpathTV.EncSliceFloat32V(v, e) + case *[]float32: + fastpathTV.EncSliceFloat32V(*v, e) + case []float64: + fastpathTV.EncSliceFloat64V(v, e) + case *[]float64: + fastpathTV.EncSliceFloat64V(*v, e) + case []uint: + fastpathTV.EncSliceUintV(v, e) + case *[]uint: + fastpathTV.EncSliceUintV(*v, e) + case []uint16: + fastpathTV.EncSliceUint16V(v, e) + case *[]uint16: + fastpathTV.EncSliceUint16V(*v, e) + case []uint32: + fastpathTV.EncSliceUint32V(v, e) + case *[]uint32: + fastpathTV.EncSliceUint32V(*v, e) + case []uint64: + fastpathTV.EncSliceUint64V(v, e) + case *[]uint64: + fastpathTV.EncSliceUint64V(*v, e) + case []uintptr: + fastpathTV.EncSliceUintptrV(v, e) + case *[]uintptr: + fastpathTV.EncSliceUintptrV(*v, e) + case []int: + fastpathTV.EncSliceIntV(v, e) + case *[]int: + fastpathTV.EncSliceIntV(*v, e) + case []int8: + fastpathTV.EncSliceInt8V(v, e) + case *[]int8: + fastpathTV.EncSliceInt8V(*v, e) + case []int16: + fastpathTV.EncSliceInt16V(v, e) + case *[]int16: + fastpathTV.EncSliceInt16V(*v, e) + case []int32: + fastpathTV.EncSliceInt32V(v, e) + case *[]int32: + fastpathTV.EncSliceInt32V(*v, e) + case []int64: + fastpathTV.EncSliceInt64V(v, e) + case *[]int64: + fastpathTV.EncSliceInt64V(*v, e) + case []bool: + fastpathTV.EncSliceBoolV(v, e) + case *[]bool: + fastpathTV.EncSliceBoolV(*v, e) + + case map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(v, e) + case *map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(*v, e) + case map[interface{}]string: + fastpathTV.EncMapIntfStringV(v, e) + case *map[interface{}]string: + fastpathTV.EncMapIntfStringV(*v, e) + case map[interface{}]uint: + fastpathTV.EncMapIntfUintV(v, e) + case *map[interface{}]uint: + fastpathTV.EncMapIntfUintV(*v, e) + case map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(v, e) + case *map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(*v, e) + case map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(v, e) + case *map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(*v, e) + case map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(v, e) + case *map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(*v, e) + case map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(v, e) + case *map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(*v, e) + case map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(v, e) + case *map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(*v, e) + case map[interface{}]int: + fastpathTV.EncMapIntfIntV(v, e) + case *map[interface{}]int: + fastpathTV.EncMapIntfIntV(*v, e) + case map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(v, e) + case *map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(*v, e) + case map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(v, e) + case *map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(*v, e) + case map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(v, e) + case *map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(*v, e) + case map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(v, e) + case *map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(*v, e) + case map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(v, e) + case *map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(*v, e) + case map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(v, e) + case *map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(*v, e) + case map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(v, e) + case *map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(*v, e) + case map[string]interface{}: + fastpathTV.EncMapStringIntfV(v, e) + case *map[string]interface{}: + fastpathTV.EncMapStringIntfV(*v, e) + case map[string]string: + fastpathTV.EncMapStringStringV(v, e) + case *map[string]string: + fastpathTV.EncMapStringStringV(*v, e) + case map[string]uint: + fastpathTV.EncMapStringUintV(v, e) + case *map[string]uint: + fastpathTV.EncMapStringUintV(*v, e) + case map[string]uint8: + fastpathTV.EncMapStringUint8V(v, e) + case *map[string]uint8: + fastpathTV.EncMapStringUint8V(*v, e) + case map[string]uint16: + fastpathTV.EncMapStringUint16V(v, e) + case *map[string]uint16: + fastpathTV.EncMapStringUint16V(*v, e) + case map[string]uint32: + fastpathTV.EncMapStringUint32V(v, e) + case *map[string]uint32: + fastpathTV.EncMapStringUint32V(*v, e) + case map[string]uint64: + fastpathTV.EncMapStringUint64V(v, e) + case *map[string]uint64: + fastpathTV.EncMapStringUint64V(*v, e) + case map[string]uintptr: + fastpathTV.EncMapStringUintptrV(v, e) + case *map[string]uintptr: + fastpathTV.EncMapStringUintptrV(*v, e) + case map[string]int: + fastpathTV.EncMapStringIntV(v, e) + case *map[string]int: + fastpathTV.EncMapStringIntV(*v, e) + case map[string]int8: + fastpathTV.EncMapStringInt8V(v, e) + case *map[string]int8: + fastpathTV.EncMapStringInt8V(*v, e) + case map[string]int16: + fastpathTV.EncMapStringInt16V(v, e) + case *map[string]int16: + fastpathTV.EncMapStringInt16V(*v, e) + case map[string]int32: + fastpathTV.EncMapStringInt32V(v, e) + case *map[string]int32: + fastpathTV.EncMapStringInt32V(*v, e) + case map[string]int64: + fastpathTV.EncMapStringInt64V(v, e) + case *map[string]int64: + fastpathTV.EncMapStringInt64V(*v, e) + case map[string]float32: + fastpathTV.EncMapStringFloat32V(v, e) + case *map[string]float32: + fastpathTV.EncMapStringFloat32V(*v, e) + case map[string]float64: + fastpathTV.EncMapStringFloat64V(v, e) + case *map[string]float64: + fastpathTV.EncMapStringFloat64V(*v, e) + case map[string]bool: + fastpathTV.EncMapStringBoolV(v, e) + case *map[string]bool: + fastpathTV.EncMapStringBoolV(*v, e) + case map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(v, e) + case *map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(*v, e) + case map[float32]string: + fastpathTV.EncMapFloat32StringV(v, e) + case *map[float32]string: + fastpathTV.EncMapFloat32StringV(*v, e) + case map[float32]uint: + fastpathTV.EncMapFloat32UintV(v, e) + case *map[float32]uint: + fastpathTV.EncMapFloat32UintV(*v, e) + case map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(v, e) + case *map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(*v, e) + case map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(v, e) + case *map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(*v, e) + case map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(v, e) + case *map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(*v, e) + case map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(v, e) + case *map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(*v, e) + case map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(v, e) + case *map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(*v, e) + case map[float32]int: + fastpathTV.EncMapFloat32IntV(v, e) + case *map[float32]int: + fastpathTV.EncMapFloat32IntV(*v, e) + case map[float32]int8: + fastpathTV.EncMapFloat32Int8V(v, e) + case *map[float32]int8: + fastpathTV.EncMapFloat32Int8V(*v, e) + case map[float32]int16: + fastpathTV.EncMapFloat32Int16V(v, e) + case *map[float32]int16: + fastpathTV.EncMapFloat32Int16V(*v, e) + case map[float32]int32: + fastpathTV.EncMapFloat32Int32V(v, e) + case *map[float32]int32: + fastpathTV.EncMapFloat32Int32V(*v, e) + case map[float32]int64: + fastpathTV.EncMapFloat32Int64V(v, e) + case *map[float32]int64: + fastpathTV.EncMapFloat32Int64V(*v, e) + case map[float32]float32: + fastpathTV.EncMapFloat32Float32V(v, e) + case *map[float32]float32: + fastpathTV.EncMapFloat32Float32V(*v, e) + case map[float32]float64: + fastpathTV.EncMapFloat32Float64V(v, e) + case *map[float32]float64: + fastpathTV.EncMapFloat32Float64V(*v, e) + case map[float32]bool: + fastpathTV.EncMapFloat32BoolV(v, e) + case *map[float32]bool: + fastpathTV.EncMapFloat32BoolV(*v, e) + case map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(v, e) + case *map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(*v, e) + case map[float64]string: + fastpathTV.EncMapFloat64StringV(v, e) + case *map[float64]string: + fastpathTV.EncMapFloat64StringV(*v, e) + case map[float64]uint: + fastpathTV.EncMapFloat64UintV(v, e) + case *map[float64]uint: + fastpathTV.EncMapFloat64UintV(*v, e) + case map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(v, e) + case *map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(*v, e) + case map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(v, e) + case *map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(*v, e) + case map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(v, e) + case *map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(*v, e) + case map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(v, e) + case *map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(*v, e) + case map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(v, e) + case *map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(*v, e) + case map[float64]int: + fastpathTV.EncMapFloat64IntV(v, e) + case *map[float64]int: + fastpathTV.EncMapFloat64IntV(*v, e) + case map[float64]int8: + fastpathTV.EncMapFloat64Int8V(v, e) + case *map[float64]int8: + fastpathTV.EncMapFloat64Int8V(*v, e) + case map[float64]int16: + fastpathTV.EncMapFloat64Int16V(v, e) + case *map[float64]int16: + fastpathTV.EncMapFloat64Int16V(*v, e) + case map[float64]int32: + fastpathTV.EncMapFloat64Int32V(v, e) + case *map[float64]int32: + fastpathTV.EncMapFloat64Int32V(*v, e) + case map[float64]int64: + fastpathTV.EncMapFloat64Int64V(v, e) + case *map[float64]int64: + fastpathTV.EncMapFloat64Int64V(*v, e) + case map[float64]float32: + fastpathTV.EncMapFloat64Float32V(v, e) + case *map[float64]float32: + fastpathTV.EncMapFloat64Float32V(*v, e) + case map[float64]float64: + fastpathTV.EncMapFloat64Float64V(v, e) + case *map[float64]float64: + fastpathTV.EncMapFloat64Float64V(*v, e) + case map[float64]bool: + fastpathTV.EncMapFloat64BoolV(v, e) + case *map[float64]bool: + fastpathTV.EncMapFloat64BoolV(*v, e) + case map[uint]interface{}: + fastpathTV.EncMapUintIntfV(v, e) + case *map[uint]interface{}: + fastpathTV.EncMapUintIntfV(*v, e) + case map[uint]string: + fastpathTV.EncMapUintStringV(v, e) + case *map[uint]string: + fastpathTV.EncMapUintStringV(*v, e) + case map[uint]uint: + fastpathTV.EncMapUintUintV(v, e) + case *map[uint]uint: + fastpathTV.EncMapUintUintV(*v, e) + case map[uint]uint8: + fastpathTV.EncMapUintUint8V(v, e) + case *map[uint]uint8: + fastpathTV.EncMapUintUint8V(*v, e) + case map[uint]uint16: + fastpathTV.EncMapUintUint16V(v, e) + case *map[uint]uint16: + fastpathTV.EncMapUintUint16V(*v, e) + case map[uint]uint32: + fastpathTV.EncMapUintUint32V(v, e) + case *map[uint]uint32: + fastpathTV.EncMapUintUint32V(*v, e) + case map[uint]uint64: + fastpathTV.EncMapUintUint64V(v, e) + case *map[uint]uint64: + fastpathTV.EncMapUintUint64V(*v, e) + case map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(v, e) + case *map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(*v, e) + case map[uint]int: + fastpathTV.EncMapUintIntV(v, e) + case *map[uint]int: + fastpathTV.EncMapUintIntV(*v, e) + case map[uint]int8: + fastpathTV.EncMapUintInt8V(v, e) + case *map[uint]int8: + fastpathTV.EncMapUintInt8V(*v, e) + case map[uint]int16: + fastpathTV.EncMapUintInt16V(v, e) + case *map[uint]int16: + fastpathTV.EncMapUintInt16V(*v, e) + case map[uint]int32: + fastpathTV.EncMapUintInt32V(v, e) + case *map[uint]int32: + fastpathTV.EncMapUintInt32V(*v, e) + case map[uint]int64: + fastpathTV.EncMapUintInt64V(v, e) + case *map[uint]int64: + fastpathTV.EncMapUintInt64V(*v, e) + case map[uint]float32: + fastpathTV.EncMapUintFloat32V(v, e) + case *map[uint]float32: + fastpathTV.EncMapUintFloat32V(*v, e) + case map[uint]float64: + fastpathTV.EncMapUintFloat64V(v, e) + case *map[uint]float64: + fastpathTV.EncMapUintFloat64V(*v, e) + case map[uint]bool: + fastpathTV.EncMapUintBoolV(v, e) + case *map[uint]bool: + fastpathTV.EncMapUintBoolV(*v, e) + case map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(v, e) + case *map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(*v, e) + case map[uint8]string: + fastpathTV.EncMapUint8StringV(v, e) + case *map[uint8]string: + fastpathTV.EncMapUint8StringV(*v, e) + case map[uint8]uint: + fastpathTV.EncMapUint8UintV(v, e) + case *map[uint8]uint: + fastpathTV.EncMapUint8UintV(*v, e) + case map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(v, e) + case *map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(*v, e) + case map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(v, e) + case *map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(*v, e) + case map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(v, e) + case *map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(*v, e) + case map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(v, e) + case *map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(*v, e) + case map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(v, e) + case *map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(*v, e) + case map[uint8]int: + fastpathTV.EncMapUint8IntV(v, e) + case *map[uint8]int: + fastpathTV.EncMapUint8IntV(*v, e) + case map[uint8]int8: + fastpathTV.EncMapUint8Int8V(v, e) + case *map[uint8]int8: + fastpathTV.EncMapUint8Int8V(*v, e) + case map[uint8]int16: + fastpathTV.EncMapUint8Int16V(v, e) + case *map[uint8]int16: + fastpathTV.EncMapUint8Int16V(*v, e) + case map[uint8]int32: + fastpathTV.EncMapUint8Int32V(v, e) + case *map[uint8]int32: + fastpathTV.EncMapUint8Int32V(*v, e) + case map[uint8]int64: + fastpathTV.EncMapUint8Int64V(v, e) + case *map[uint8]int64: + fastpathTV.EncMapUint8Int64V(*v, e) + case map[uint8]float32: + fastpathTV.EncMapUint8Float32V(v, e) + case *map[uint8]float32: + fastpathTV.EncMapUint8Float32V(*v, e) + case map[uint8]float64: + fastpathTV.EncMapUint8Float64V(v, e) + case *map[uint8]float64: + fastpathTV.EncMapUint8Float64V(*v, e) + case map[uint8]bool: + fastpathTV.EncMapUint8BoolV(v, e) + case *map[uint8]bool: + fastpathTV.EncMapUint8BoolV(*v, e) + case map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(v, e) + case *map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(*v, e) + case map[uint16]string: + fastpathTV.EncMapUint16StringV(v, e) + case *map[uint16]string: + fastpathTV.EncMapUint16StringV(*v, e) + case map[uint16]uint: + fastpathTV.EncMapUint16UintV(v, e) + case *map[uint16]uint: + fastpathTV.EncMapUint16UintV(*v, e) + case map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(v, e) + case *map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(*v, e) + case map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(v, e) + case *map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(*v, e) + case map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(v, e) + case *map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(*v, e) + case map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(v, e) + case *map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(*v, e) + case map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(v, e) + case *map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(*v, e) + case map[uint16]int: + fastpathTV.EncMapUint16IntV(v, e) + case *map[uint16]int: + fastpathTV.EncMapUint16IntV(*v, e) + case map[uint16]int8: + fastpathTV.EncMapUint16Int8V(v, e) + case *map[uint16]int8: + fastpathTV.EncMapUint16Int8V(*v, e) + case map[uint16]int16: + fastpathTV.EncMapUint16Int16V(v, e) + case *map[uint16]int16: + fastpathTV.EncMapUint16Int16V(*v, e) + case map[uint16]int32: + fastpathTV.EncMapUint16Int32V(v, e) + case *map[uint16]int32: + fastpathTV.EncMapUint16Int32V(*v, e) + case map[uint16]int64: + fastpathTV.EncMapUint16Int64V(v, e) + case *map[uint16]int64: + fastpathTV.EncMapUint16Int64V(*v, e) + case map[uint16]float32: + fastpathTV.EncMapUint16Float32V(v, e) + case *map[uint16]float32: + fastpathTV.EncMapUint16Float32V(*v, e) + case map[uint16]float64: + fastpathTV.EncMapUint16Float64V(v, e) + case *map[uint16]float64: + fastpathTV.EncMapUint16Float64V(*v, e) + case map[uint16]bool: + fastpathTV.EncMapUint16BoolV(v, e) + case *map[uint16]bool: + fastpathTV.EncMapUint16BoolV(*v, e) + case map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(v, e) + case *map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(*v, e) + case map[uint32]string: + fastpathTV.EncMapUint32StringV(v, e) + case *map[uint32]string: + fastpathTV.EncMapUint32StringV(*v, e) + case map[uint32]uint: + fastpathTV.EncMapUint32UintV(v, e) + case *map[uint32]uint: + fastpathTV.EncMapUint32UintV(*v, e) + case map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(v, e) + case *map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(*v, e) + case map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(v, e) + case *map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(*v, e) + case map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(v, e) + case *map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(*v, e) + case map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(v, e) + case *map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(*v, e) + case map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(v, e) + case *map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(*v, e) + case map[uint32]int: + fastpathTV.EncMapUint32IntV(v, e) + case *map[uint32]int: + fastpathTV.EncMapUint32IntV(*v, e) + case map[uint32]int8: + fastpathTV.EncMapUint32Int8V(v, e) + case *map[uint32]int8: + fastpathTV.EncMapUint32Int8V(*v, e) + case map[uint32]int16: + fastpathTV.EncMapUint32Int16V(v, e) + case *map[uint32]int16: + fastpathTV.EncMapUint32Int16V(*v, e) + case map[uint32]int32: + fastpathTV.EncMapUint32Int32V(v, e) + case *map[uint32]int32: + fastpathTV.EncMapUint32Int32V(*v, e) + case map[uint32]int64: + fastpathTV.EncMapUint32Int64V(v, e) + case *map[uint32]int64: + fastpathTV.EncMapUint32Int64V(*v, e) + case map[uint32]float32: + fastpathTV.EncMapUint32Float32V(v, e) + case *map[uint32]float32: + fastpathTV.EncMapUint32Float32V(*v, e) + case map[uint32]float64: + fastpathTV.EncMapUint32Float64V(v, e) + case *map[uint32]float64: + fastpathTV.EncMapUint32Float64V(*v, e) + case map[uint32]bool: + fastpathTV.EncMapUint32BoolV(v, e) + case *map[uint32]bool: + fastpathTV.EncMapUint32BoolV(*v, e) + case map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(v, e) + case *map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(*v, e) + case map[uint64]string: + fastpathTV.EncMapUint64StringV(v, e) + case *map[uint64]string: + fastpathTV.EncMapUint64StringV(*v, e) + case map[uint64]uint: + fastpathTV.EncMapUint64UintV(v, e) + case *map[uint64]uint: + fastpathTV.EncMapUint64UintV(*v, e) + case map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(v, e) + case *map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(*v, e) + case map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(v, e) + case *map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(*v, e) + case map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(v, e) + case *map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(*v, e) + case map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(v, e) + case *map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(*v, e) + case map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(v, e) + case *map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(*v, e) + case map[uint64]int: + fastpathTV.EncMapUint64IntV(v, e) + case *map[uint64]int: + fastpathTV.EncMapUint64IntV(*v, e) + case map[uint64]int8: + fastpathTV.EncMapUint64Int8V(v, e) + case *map[uint64]int8: + fastpathTV.EncMapUint64Int8V(*v, e) + case map[uint64]int16: + fastpathTV.EncMapUint64Int16V(v, e) + case *map[uint64]int16: + fastpathTV.EncMapUint64Int16V(*v, e) + case map[uint64]int32: + fastpathTV.EncMapUint64Int32V(v, e) + case *map[uint64]int32: + fastpathTV.EncMapUint64Int32V(*v, e) + case map[uint64]int64: + fastpathTV.EncMapUint64Int64V(v, e) + case *map[uint64]int64: + fastpathTV.EncMapUint64Int64V(*v, e) + case map[uint64]float32: + fastpathTV.EncMapUint64Float32V(v, e) + case *map[uint64]float32: + fastpathTV.EncMapUint64Float32V(*v, e) + case map[uint64]float64: + fastpathTV.EncMapUint64Float64V(v, e) + case *map[uint64]float64: + fastpathTV.EncMapUint64Float64V(*v, e) + case map[uint64]bool: + fastpathTV.EncMapUint64BoolV(v, e) + case *map[uint64]bool: + fastpathTV.EncMapUint64BoolV(*v, e) + case map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(v, e) + case *map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(*v, e) + case map[uintptr]string: + fastpathTV.EncMapUintptrStringV(v, e) + case *map[uintptr]string: + fastpathTV.EncMapUintptrStringV(*v, e) + case map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(v, e) + case *map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(*v, e) + case map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(v, e) + case *map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(*v, e) + case map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(v, e) + case *map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(*v, e) + case map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(v, e) + case *map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(*v, e) + case map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(v, e) + case *map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(*v, e) + case map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(v, e) + case *map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(*v, e) + case map[uintptr]int: + fastpathTV.EncMapUintptrIntV(v, e) + case *map[uintptr]int: + fastpathTV.EncMapUintptrIntV(*v, e) + case map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(v, e) + case *map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(*v, e) + case map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(v, e) + case *map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(*v, e) + case map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(v, e) + case *map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(*v, e) + case map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(v, e) + case *map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(*v, e) + case map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(v, e) + case *map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(*v, e) + case map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(v, e) + case *map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(*v, e) + case map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(v, e) + case *map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(*v, e) + case map[int]interface{}: + fastpathTV.EncMapIntIntfV(v, e) + case *map[int]interface{}: + fastpathTV.EncMapIntIntfV(*v, e) + case map[int]string: + fastpathTV.EncMapIntStringV(v, e) + case *map[int]string: + fastpathTV.EncMapIntStringV(*v, e) + case map[int]uint: + fastpathTV.EncMapIntUintV(v, e) + case *map[int]uint: + fastpathTV.EncMapIntUintV(*v, e) + case map[int]uint8: + fastpathTV.EncMapIntUint8V(v, e) + case *map[int]uint8: + fastpathTV.EncMapIntUint8V(*v, e) + case map[int]uint16: + fastpathTV.EncMapIntUint16V(v, e) + case *map[int]uint16: + fastpathTV.EncMapIntUint16V(*v, e) + case map[int]uint32: + fastpathTV.EncMapIntUint32V(v, e) + case *map[int]uint32: + fastpathTV.EncMapIntUint32V(*v, e) + case map[int]uint64: + fastpathTV.EncMapIntUint64V(v, e) + case *map[int]uint64: + fastpathTV.EncMapIntUint64V(*v, e) + case map[int]uintptr: + fastpathTV.EncMapIntUintptrV(v, e) + case *map[int]uintptr: + fastpathTV.EncMapIntUintptrV(*v, e) + case map[int]int: + fastpathTV.EncMapIntIntV(v, e) + case *map[int]int: + fastpathTV.EncMapIntIntV(*v, e) + case map[int]int8: + fastpathTV.EncMapIntInt8V(v, e) + case *map[int]int8: + fastpathTV.EncMapIntInt8V(*v, e) + case map[int]int16: + fastpathTV.EncMapIntInt16V(v, e) + case *map[int]int16: + fastpathTV.EncMapIntInt16V(*v, e) + case map[int]int32: + fastpathTV.EncMapIntInt32V(v, e) + case *map[int]int32: + fastpathTV.EncMapIntInt32V(*v, e) + case map[int]int64: + fastpathTV.EncMapIntInt64V(v, e) + case *map[int]int64: + fastpathTV.EncMapIntInt64V(*v, e) + case map[int]float32: + fastpathTV.EncMapIntFloat32V(v, e) + case *map[int]float32: + fastpathTV.EncMapIntFloat32V(*v, e) + case map[int]float64: + fastpathTV.EncMapIntFloat64V(v, e) + case *map[int]float64: + fastpathTV.EncMapIntFloat64V(*v, e) + case map[int]bool: + fastpathTV.EncMapIntBoolV(v, e) + case *map[int]bool: + fastpathTV.EncMapIntBoolV(*v, e) + case map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(v, e) + case *map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(*v, e) + case map[int8]string: + fastpathTV.EncMapInt8StringV(v, e) + case *map[int8]string: + fastpathTV.EncMapInt8StringV(*v, e) + case map[int8]uint: + fastpathTV.EncMapInt8UintV(v, e) + case *map[int8]uint: + fastpathTV.EncMapInt8UintV(*v, e) + case map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(v, e) + case *map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(*v, e) + case map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(v, e) + case *map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(*v, e) + case map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(v, e) + case *map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(*v, e) + case map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(v, e) + case *map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(*v, e) + case map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(v, e) + case *map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(*v, e) + case map[int8]int: + fastpathTV.EncMapInt8IntV(v, e) + case *map[int8]int: + fastpathTV.EncMapInt8IntV(*v, e) + case map[int8]int8: + fastpathTV.EncMapInt8Int8V(v, e) + case *map[int8]int8: + fastpathTV.EncMapInt8Int8V(*v, e) + case map[int8]int16: + fastpathTV.EncMapInt8Int16V(v, e) + case *map[int8]int16: + fastpathTV.EncMapInt8Int16V(*v, e) + case map[int8]int32: + fastpathTV.EncMapInt8Int32V(v, e) + case *map[int8]int32: + fastpathTV.EncMapInt8Int32V(*v, e) + case map[int8]int64: + fastpathTV.EncMapInt8Int64V(v, e) + case *map[int8]int64: + fastpathTV.EncMapInt8Int64V(*v, e) + case map[int8]float32: + fastpathTV.EncMapInt8Float32V(v, e) + case *map[int8]float32: + fastpathTV.EncMapInt8Float32V(*v, e) + case map[int8]float64: + fastpathTV.EncMapInt8Float64V(v, e) + case *map[int8]float64: + fastpathTV.EncMapInt8Float64V(*v, e) + case map[int8]bool: + fastpathTV.EncMapInt8BoolV(v, e) + case *map[int8]bool: + fastpathTV.EncMapInt8BoolV(*v, e) + case map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(v, e) + case *map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(*v, e) + case map[int16]string: + fastpathTV.EncMapInt16StringV(v, e) + case *map[int16]string: + fastpathTV.EncMapInt16StringV(*v, e) + case map[int16]uint: + fastpathTV.EncMapInt16UintV(v, e) + case *map[int16]uint: + fastpathTV.EncMapInt16UintV(*v, e) + case map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(v, e) + case *map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(*v, e) + case map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(v, e) + case *map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(*v, e) + case map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(v, e) + case *map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(*v, e) + case map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(v, e) + case *map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(*v, e) + case map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(v, e) + case *map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(*v, e) + case map[int16]int: + fastpathTV.EncMapInt16IntV(v, e) + case *map[int16]int: + fastpathTV.EncMapInt16IntV(*v, e) + case map[int16]int8: + fastpathTV.EncMapInt16Int8V(v, e) + case *map[int16]int8: + fastpathTV.EncMapInt16Int8V(*v, e) + case map[int16]int16: + fastpathTV.EncMapInt16Int16V(v, e) + case *map[int16]int16: + fastpathTV.EncMapInt16Int16V(*v, e) + case map[int16]int32: + fastpathTV.EncMapInt16Int32V(v, e) + case *map[int16]int32: + fastpathTV.EncMapInt16Int32V(*v, e) + case map[int16]int64: + fastpathTV.EncMapInt16Int64V(v, e) + case *map[int16]int64: + fastpathTV.EncMapInt16Int64V(*v, e) + case map[int16]float32: + fastpathTV.EncMapInt16Float32V(v, e) + case *map[int16]float32: + fastpathTV.EncMapInt16Float32V(*v, e) + case map[int16]float64: + fastpathTV.EncMapInt16Float64V(v, e) + case *map[int16]float64: + fastpathTV.EncMapInt16Float64V(*v, e) + case map[int16]bool: + fastpathTV.EncMapInt16BoolV(v, e) + case *map[int16]bool: + fastpathTV.EncMapInt16BoolV(*v, e) + case map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(v, e) + case *map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(*v, e) + case map[int32]string: + fastpathTV.EncMapInt32StringV(v, e) + case *map[int32]string: + fastpathTV.EncMapInt32StringV(*v, e) + case map[int32]uint: + fastpathTV.EncMapInt32UintV(v, e) + case *map[int32]uint: + fastpathTV.EncMapInt32UintV(*v, e) + case map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(v, e) + case *map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(*v, e) + case map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(v, e) + case *map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(*v, e) + case map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(v, e) + case *map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(*v, e) + case map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(v, e) + case *map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(*v, e) + case map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(v, e) + case *map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(*v, e) + case map[int32]int: + fastpathTV.EncMapInt32IntV(v, e) + case *map[int32]int: + fastpathTV.EncMapInt32IntV(*v, e) + case map[int32]int8: + fastpathTV.EncMapInt32Int8V(v, e) + case *map[int32]int8: + fastpathTV.EncMapInt32Int8V(*v, e) + case map[int32]int16: + fastpathTV.EncMapInt32Int16V(v, e) + case *map[int32]int16: + fastpathTV.EncMapInt32Int16V(*v, e) + case map[int32]int32: + fastpathTV.EncMapInt32Int32V(v, e) + case *map[int32]int32: + fastpathTV.EncMapInt32Int32V(*v, e) + case map[int32]int64: + fastpathTV.EncMapInt32Int64V(v, e) + case *map[int32]int64: + fastpathTV.EncMapInt32Int64V(*v, e) + case map[int32]float32: + fastpathTV.EncMapInt32Float32V(v, e) + case *map[int32]float32: + fastpathTV.EncMapInt32Float32V(*v, e) + case map[int32]float64: + fastpathTV.EncMapInt32Float64V(v, e) + case *map[int32]float64: + fastpathTV.EncMapInt32Float64V(*v, e) + case map[int32]bool: + fastpathTV.EncMapInt32BoolV(v, e) + case *map[int32]bool: + fastpathTV.EncMapInt32BoolV(*v, e) + case map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(v, e) + case *map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(*v, e) + case map[int64]string: + fastpathTV.EncMapInt64StringV(v, e) + case *map[int64]string: + fastpathTV.EncMapInt64StringV(*v, e) + case map[int64]uint: + fastpathTV.EncMapInt64UintV(v, e) + case *map[int64]uint: + fastpathTV.EncMapInt64UintV(*v, e) + case map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(v, e) + case *map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(*v, e) + case map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(v, e) + case *map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(*v, e) + case map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(v, e) + case *map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(*v, e) + case map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(v, e) + case *map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(*v, e) + case map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(v, e) + case *map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(*v, e) + case map[int64]int: + fastpathTV.EncMapInt64IntV(v, e) + case *map[int64]int: + fastpathTV.EncMapInt64IntV(*v, e) + case map[int64]int8: + fastpathTV.EncMapInt64Int8V(v, e) + case *map[int64]int8: + fastpathTV.EncMapInt64Int8V(*v, e) + case map[int64]int16: + fastpathTV.EncMapInt64Int16V(v, e) + case *map[int64]int16: + fastpathTV.EncMapInt64Int16V(*v, e) + case map[int64]int32: + fastpathTV.EncMapInt64Int32V(v, e) + case *map[int64]int32: + fastpathTV.EncMapInt64Int32V(*v, e) + case map[int64]int64: + fastpathTV.EncMapInt64Int64V(v, e) + case *map[int64]int64: + fastpathTV.EncMapInt64Int64V(*v, e) + case map[int64]float32: + fastpathTV.EncMapInt64Float32V(v, e) + case *map[int64]float32: + fastpathTV.EncMapInt64Float32V(*v, e) + case map[int64]float64: + fastpathTV.EncMapInt64Float64V(v, e) + case *map[int64]float64: + fastpathTV.EncMapInt64Float64V(*v, e) + case map[int64]bool: + fastpathTV.EncMapInt64BoolV(v, e) + case *map[int64]bool: + fastpathTV.EncMapInt64BoolV(*v, e) + case map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(v, e) + case *map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(*v, e) + case map[bool]string: + fastpathTV.EncMapBoolStringV(v, e) + case *map[bool]string: + fastpathTV.EncMapBoolStringV(*v, e) + case map[bool]uint: + fastpathTV.EncMapBoolUintV(v, e) + case *map[bool]uint: + fastpathTV.EncMapBoolUintV(*v, e) + case map[bool]uint8: + fastpathTV.EncMapBoolUint8V(v, e) + case *map[bool]uint8: + fastpathTV.EncMapBoolUint8V(*v, e) + case map[bool]uint16: + fastpathTV.EncMapBoolUint16V(v, e) + case *map[bool]uint16: + fastpathTV.EncMapBoolUint16V(*v, e) + case map[bool]uint32: + fastpathTV.EncMapBoolUint32V(v, e) + case *map[bool]uint32: + fastpathTV.EncMapBoolUint32V(*v, e) + case map[bool]uint64: + fastpathTV.EncMapBoolUint64V(v, e) + case *map[bool]uint64: + fastpathTV.EncMapBoolUint64V(*v, e) + case map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(v, e) + case *map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(*v, e) + case map[bool]int: + fastpathTV.EncMapBoolIntV(v, e) + case *map[bool]int: + fastpathTV.EncMapBoolIntV(*v, e) + case map[bool]int8: + fastpathTV.EncMapBoolInt8V(v, e) + case *map[bool]int8: + fastpathTV.EncMapBoolInt8V(*v, e) + case map[bool]int16: + fastpathTV.EncMapBoolInt16V(v, e) + case *map[bool]int16: + fastpathTV.EncMapBoolInt16V(*v, e) + case map[bool]int32: + fastpathTV.EncMapBoolInt32V(v, e) + case *map[bool]int32: + fastpathTV.EncMapBoolInt32V(*v, e) + case map[bool]int64: + fastpathTV.EncMapBoolInt64V(v, e) + case *map[bool]int64: + fastpathTV.EncMapBoolInt64V(*v, e) + case map[bool]float32: + fastpathTV.EncMapBoolFloat32V(v, e) + case *map[bool]float32: + fastpathTV.EncMapBoolFloat32V(*v, e) + case map[bool]float64: + fastpathTV.EncMapBoolFloat64V(v, e) + case *map[bool]float64: + fastpathTV.EncMapBoolFloat64V(*v, e) + case map[bool]bool: + fastpathTV.EncMapBoolBoolV(v, e) + case *map[bool]bool: + fastpathTV.EncMapBoolBoolV(*v, e) + + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// -- -- fast path functions + +func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntfV(rv2i(rv).([]interface{}), e) + } else { + fastpathTV.EncSliceIntfV(rv2i(rv).([]interface{}), e) + } +} +func (_ fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + e.encode(v2) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + e.encode(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceStringV(rv2i(rv).([]string), e) + } else { + fastpathTV.EncSliceStringV(rv2i(rv).([]string), e) + } +} +func (_ fastpathT) EncSliceStringV(v []string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat32V(rv2i(rv).([]float32), e) + } else { + fastpathTV.EncSliceFloat32V(rv2i(rv).([]float32), e) + } +} +func (_ fastpathT) EncSliceFloat32V(v []float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeFloat32(v2) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeFloat32(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat64V(rv2i(rv).([]float64), e) + } else { + fastpathTV.EncSliceFloat64V(rv2i(rv).([]float64), e) + } +} +func (_ fastpathT) EncSliceFloat64V(v []float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeFloat64(v2) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeFloat64(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUintR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintV(rv2i(rv).([]uint), e) + } else { + fastpathTV.EncSliceUintV(rv2i(rv).([]uint), e) + } +} +func (_ fastpathT) EncSliceUintV(v []uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUintV(v []uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint8R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint8V(rv2i(rv).([]uint8), e) + } else { + fastpathTV.EncSliceUint8V(rv2i(rv).([]uint8), e) + } +} +func (_ fastpathT) EncSliceUint8V(v []uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUint8V(v []uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint16R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint16V(rv2i(rv).([]uint16), e) + } else { + fastpathTV.EncSliceUint16V(rv2i(rv).([]uint16), e) + } +} +func (_ fastpathT) EncSliceUint16V(v []uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint32V(rv2i(rv).([]uint32), e) + } else { + fastpathTV.EncSliceUint32V(rv2i(rv).([]uint32), e) + } +} +func (_ fastpathT) EncSliceUint32V(v []uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint64V(rv2i(rv).([]uint64), e) + } else { + fastpathTV.EncSliceUint64V(rv2i(rv).([]uint64), e) + } +} +func (_ fastpathT) EncSliceUint64V(v []uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUintptrR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintptrV(rv2i(rv).([]uintptr), e) + } else { + fastpathTV.EncSliceUintptrV(rv2i(rv).([]uintptr), e) + } +} +func (_ fastpathT) EncSliceUintptrV(v []uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + e.encode(v2) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + e.encode(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntV(rv2i(rv).([]int), e) + } else { + fastpathTV.EncSliceIntV(rv2i(rv).([]int), e) + } +} +func (_ fastpathT) EncSliceIntV(v []int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt8R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt8V(rv2i(rv).([]int8), e) + } else { + fastpathTV.EncSliceInt8V(rv2i(rv).([]int8), e) + } +} +func (_ fastpathT) EncSliceInt8V(v []int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceInt8V(v []int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt16R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt16V(rv2i(rv).([]int16), e) + } else { + fastpathTV.EncSliceInt16V(rv2i(rv).([]int16), e) + } +} +func (_ fastpathT) EncSliceInt16V(v []int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceInt16V(v []int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt32V(rv2i(rv).([]int32), e) + } else { + fastpathTV.EncSliceInt32V(rv2i(rv).([]int32), e) + } +} +func (_ fastpathT) EncSliceInt32V(v []int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt64V(rv2i(rv).([]int64), e) + } else { + fastpathTV.EncSliceInt64V(rv2i(rv).([]int64), e) + } +} +func (_ fastpathT) EncSliceInt64V(v []int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceBoolV(rv2i(rv).([]bool), e) + } else { + fastpathTV.EncSliceBoolV(rv2i(rv).([]bool), e) + } +} +func (_ fastpathT) EncSliceBoolV(v []bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeBool(v2) + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeBool(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), e) +} +func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfStringV(rv2i(rv).(map[interface{}]string), e) +} +func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUintV(rv2i(rv).(map[interface{}]uint), e) +} +func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), e) +} +func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), e) +} +func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), e) +} +func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), e) +} +func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), e) +} +func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfIntV(rv2i(rv).(map[interface{}]int), e) +} +func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt8V(rv2i(rv).(map[interface{}]int8), e) +} +func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt16V(rv2i(rv).(map[interface{}]int16), e) +} +func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt32V(rv2i(rv).(map[interface{}]int32), e) +} +func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt64V(rv2i(rv).(map[interface{}]int64), e) +} +func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), e) +} +func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), e) +} +func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfBoolV(rv2i(rv).(map[interface{}]bool), e) +} +func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (_ fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[string(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[string(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUintV(rv2i(rv).(map[string]uint), e) +} +func (_ fastpathT) EncMapStringUintV(v map[string]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint16V(rv2i(rv).(map[string]uint16), e) +} +func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint32V(rv2i(rv).(map[string]uint32), e) +} +func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUintptrV(rv2i(rv).(map[string]uintptr), e) +} +func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (_ fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt8V(rv2i(rv).(map[string]int8), e) +} +func (_ fastpathT) EncMapStringInt8V(v map[string]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt16V(rv2i(rv).(map[string]int16), e) +} +func (_ fastpathT) EncMapStringInt16V(v map[string]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (_ fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt64V(rv2i(rv).(map[string]int64), e) +} +func (_ fastpathT) EncMapStringInt64V(v map[string]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringFloat32V(rv2i(rv).(map[string]float32), e) +} +func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (_ fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i uint + for k := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(k2)) + } else { + ee.EncodeStringEnc(cUTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), e) +} +func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32StringV(rv2i(rv).(map[float32]string), e) +} +func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[float32(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[float32(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32UintV(rv2i(rv).(map[float32]uint), e) +} +func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), e) +} +func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), e) +} +func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), e) +} +func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), e) +} +func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), e) +} +func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32IntV(rv2i(rv).(map[float32]int), e) +} +func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int8V(rv2i(rv).(map[float32]int8), e) +} +func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int16V(rv2i(rv).(map[float32]int16), e) +} +func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int32V(rv2i(rv).(map[float32]int32), e) +} +func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int64V(rv2i(rv).(map[float32]int64), e) +} +func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Float32V(rv2i(rv).(map[float32]float32), e) +} +func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Float64V(rv2i(rv).(map[float32]float64), e) +} +func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32BoolV(rv2i(rv).(map[float32]bool), e) +} +func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), e) +} +func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64StringV(rv2i(rv).(map[float64]string), e) +} +func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[float64(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[float64(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64UintV(rv2i(rv).(map[float64]uint), e) +} +func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), e) +} +func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), e) +} +func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), e) +} +func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), e) +} +func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), e) +} +func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64IntV(rv2i(rv).(map[float64]int), e) +} +func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int8V(rv2i(rv).(map[float64]int8), e) +} +func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int16V(rv2i(rv).(map[float64]int16), e) +} +func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int32V(rv2i(rv).(map[float64]int32), e) +} +func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int64V(rv2i(rv).(map[float64]int64), e) +} +func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Float32V(rv2i(rv).(map[float64]float32), e) +} +func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Float64V(rv2i(rv).(map[float64]float64), e) +} +func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64BoolV(rv2i(rv).(map[float64]bool), e) +} +func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i uint + for k := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintIntfV(rv2i(rv).(map[uint]interface{}), e) +} +func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintStringV(rv2i(rv).(map[uint]string), e) +} +func (_ fastpathT) EncMapUintStringV(v map[uint]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[uint(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[uint(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUintV(rv2i(rv).(map[uint]uint), e) +} +func (_ fastpathT) EncMapUintUintV(v map[uint]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint8V(rv2i(rv).(map[uint]uint8), e) +} +func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint16V(rv2i(rv).(map[uint]uint16), e) +} +func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint32V(rv2i(rv).(map[uint]uint32), e) +} +func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint64V(rv2i(rv).(map[uint]uint64), e) +} +func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUintptrV(rv2i(rv).(map[uint]uintptr), e) +} +func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintIntV(rv2i(rv).(map[uint]int), e) +} +func (_ fastpathT) EncMapUintIntV(v map[uint]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt8V(rv2i(rv).(map[uint]int8), e) +} +func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt16V(rv2i(rv).(map[uint]int16), e) +} +func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt32V(rv2i(rv).(map[uint]int32), e) +} +func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt64V(rv2i(rv).(map[uint]int64), e) +} +func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintFloat32V(rv2i(rv).(map[uint]float32), e) +} +func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintFloat64V(rv2i(rv).(map[uint]float64), e) +} +func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintBoolV(rv2i(rv).(map[uint]bool), e) +} +func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[uint8(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[uint8(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8UintV(rv2i(rv).(map[uint8]uint), e) +} +func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), e) +} +func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), e) +} +func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), e) +} +func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int8V(rv2i(rv).(map[uint8]int8), e) +} +func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int16V(rv2i(rv).(map[uint8]int16), e) +} +func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int64V(rv2i(rv).(map[uint8]int64), e) +} +func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Float32V(rv2i(rv).(map[uint8]float32), e) +} +func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), e) +} +func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16StringV(rv2i(rv).(map[uint16]string), e) +} +func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[uint16(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[uint16(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16UintV(rv2i(rv).(map[uint16]uint), e) +} +func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), e) +} +func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), e) +} +func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), e) +} +func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), e) +} +func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), e) +} +func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16IntV(rv2i(rv).(map[uint16]int), e) +} +func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int8V(rv2i(rv).(map[uint16]int8), e) +} +func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int16V(rv2i(rv).(map[uint16]int16), e) +} +func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int32V(rv2i(rv).(map[uint16]int32), e) +} +func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int64V(rv2i(rv).(map[uint16]int64), e) +} +func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Float32V(rv2i(rv).(map[uint16]float32), e) +} +func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Float64V(rv2i(rv).(map[uint16]float64), e) +} +func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16BoolV(rv2i(rv).(map[uint16]bool), e) +} +func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), e) +} +func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32StringV(rv2i(rv).(map[uint32]string), e) +} +func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[uint32(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[uint32(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32UintV(rv2i(rv).(map[uint32]uint), e) +} +func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), e) +} +func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), e) +} +func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), e) +} +func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), e) +} +func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), e) +} +func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32IntV(rv2i(rv).(map[uint32]int), e) +} +func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int8V(rv2i(rv).(map[uint32]int8), e) +} +func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int16V(rv2i(rv).(map[uint32]int16), e) +} +func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int32V(rv2i(rv).(map[uint32]int32), e) +} +func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int64V(rv2i(rv).(map[uint32]int64), e) +} +func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Float32V(rv2i(rv).(map[uint32]float32), e) +} +func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Float64V(rv2i(rv).(map[uint32]float64), e) +} +func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32BoolV(rv2i(rv).(map[uint32]bool), e) +} +func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[uint64(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[uint64(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64UintV(rv2i(rv).(map[uint64]uint), e) +} +func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), e) +} +func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), e) +} +func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), e) +} +func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int8V(rv2i(rv).(map[uint64]int8), e) +} +func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int16V(rv2i(rv).(map[uint64]int16), e) +} +func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int64V(rv2i(rv).(map[uint64]int64), e) +} +func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Float32V(rv2i(rv).(map[uint64]float32), e) +} +func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), e) +} +func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrStringV(rv2i(rv).(map[uintptr]string), e) +} +func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[uintptr(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[uintptr(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUintV(rv2i(rv).(map[uintptr]uint), e) +} +func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), e) +} +func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), e) +} +func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), e) +} +func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), e) +} +func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), e) +} +func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrIntV(rv2i(rv).(map[uintptr]int), e) +} +func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), e) +} +func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), e) +} +func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), e) +} +func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), e) +} +func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), e) +} +func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), e) +} +func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), e) +} +func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i uint + for k := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (_ fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[int(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[int(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUintV(rv2i(rv).(map[int]uint), e) +} +func (_ fastpathT) EncMapIntUintV(v map[int]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint16V(rv2i(rv).(map[int]uint16), e) +} +func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint32V(rv2i(rv).(map[int]uint32), e) +} +func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUintptrV(rv2i(rv).(map[int]uintptr), e) +} +func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (_ fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt8V(rv2i(rv).(map[int]int8), e) +} +func (_ fastpathT) EncMapIntInt8V(v map[int]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt16V(rv2i(rv).(map[int]int16), e) +} +func (_ fastpathT) EncMapIntInt16V(v map[int]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (_ fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt64V(rv2i(rv).(map[int]int64), e) +} +func (_ fastpathT) EncMapIntInt64V(v map[int]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntFloat32V(rv2i(rv).(map[int]float32), e) +} +func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (_ fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8IntfV(rv2i(rv).(map[int8]interface{}), e) +} +func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8StringV(rv2i(rv).(map[int8]string), e) +} +func (_ fastpathT) EncMapInt8StringV(v map[int8]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[int8(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[int8(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8UintV(rv2i(rv).(map[int8]uint), e) +} +func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint8V(rv2i(rv).(map[int8]uint8), e) +} +func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint16V(rv2i(rv).(map[int8]uint16), e) +} +func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint32V(rv2i(rv).(map[int8]uint32), e) +} +func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint64V(rv2i(rv).(map[int8]uint64), e) +} +func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), e) +} +func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8IntV(rv2i(rv).(map[int8]int), e) +} +func (_ fastpathT) EncMapInt8IntV(v map[int8]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int8V(rv2i(rv).(map[int8]int8), e) +} +func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int16V(rv2i(rv).(map[int8]int16), e) +} +func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int32V(rv2i(rv).(map[int8]int32), e) +} +func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int64V(rv2i(rv).(map[int8]int64), e) +} +func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Float32V(rv2i(rv).(map[int8]float32), e) +} +func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Float64V(rv2i(rv).(map[int8]float64), e) +} +func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8BoolV(rv2i(rv).(map[int8]bool), e) +} +func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16IntfV(rv2i(rv).(map[int16]interface{}), e) +} +func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16StringV(rv2i(rv).(map[int16]string), e) +} +func (_ fastpathT) EncMapInt16StringV(v map[int16]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[int16(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[int16(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16UintV(rv2i(rv).(map[int16]uint), e) +} +func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint8V(rv2i(rv).(map[int16]uint8), e) +} +func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint16V(rv2i(rv).(map[int16]uint16), e) +} +func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint32V(rv2i(rv).(map[int16]uint32), e) +} +func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint64V(rv2i(rv).(map[int16]uint64), e) +} +func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), e) +} +func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16IntV(rv2i(rv).(map[int16]int), e) +} +func (_ fastpathT) EncMapInt16IntV(v map[int16]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int8V(rv2i(rv).(map[int16]int8), e) +} +func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int16V(rv2i(rv).(map[int16]int16), e) +} +func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int32V(rv2i(rv).(map[int16]int32), e) +} +func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int64V(rv2i(rv).(map[int16]int64), e) +} +func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Float32V(rv2i(rv).(map[int16]float32), e) +} +func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Float64V(rv2i(rv).(map[int16]float64), e) +} +func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16BoolV(rv2i(rv).(map[int16]bool), e) +} +func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (_ fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[int32(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[int32(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32UintV(rv2i(rv).(map[int32]uint), e) +} +func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint16V(rv2i(rv).(map[int32]uint16), e) +} +func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint32V(rv2i(rv).(map[int32]uint32), e) +} +func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), e) +} +func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (_ fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int8V(rv2i(rv).(map[int32]int8), e) +} +func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int16V(rv2i(rv).(map[int32]int16), e) +} +func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int64V(rv2i(rv).(map[int32]int64), e) +} +func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Float32V(rv2i(rv).(map[int32]float32), e) +} +func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64IntfV(rv2i(rv).(map[int64]interface{}), e) +} +func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64StringV(rv2i(rv).(map[int64]string), e) +} +func (_ fastpathT) EncMapInt64StringV(v map[int64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[int64(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[int64(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64UintV(rv2i(rv).(map[int64]uint), e) +} +func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint8V(rv2i(rv).(map[int64]uint8), e) +} +func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint16V(rv2i(rv).(map[int64]uint16), e) +} +func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint32V(rv2i(rv).(map[int64]uint32), e) +} +func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint64V(rv2i(rv).(map[int64]uint64), e) +} +func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), e) +} +func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64IntV(rv2i(rv).(map[int64]int), e) +} +func (_ fastpathT) EncMapInt64IntV(v map[int64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int8V(rv2i(rv).(map[int64]int8), e) +} +func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int16V(rv2i(rv).(map[int64]int16), e) +} +func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int32V(rv2i(rv).(map[int64]int32), e) +} +func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int64V(rv2i(rv).(map[int64]int64), e) +} +func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Float32V(rv2i(rv).(map[int64]float32), e) +} +func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Float64V(rv2i(rv).(map[int64]float64), e) +} +func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64BoolV(rv2i(rv).(map[int64]bool), e) +} +func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i uint + for k := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolIntfV(rv2i(rv).(map[bool]interface{}), e) +} +func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolStringV(rv2i(rv).(map[bool]string), e) +} +func (_ fastpathT) EncMapBoolStringV(v map[bool]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v[bool(k2)])) + } else { + ee.EncodeStringEnc(cUTF8, v[bool(k2)]) + } + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(v2)) + } else { + ee.EncodeStringEnc(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUintV(rv2i(rv).(map[bool]uint), e) +} +func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint8V(rv2i(rv).(map[bool]uint8), e) +} +func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint16V(rv2i(rv).(map[bool]uint16), e) +} +func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint32V(rv2i(rv).(map[bool]uint32), e) +} +func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint64V(rv2i(rv).(map[bool]uint64), e) +} +func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), e) +} +func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolIntV(rv2i(rv).(map[bool]int), e) +} +func (_ fastpathT) EncMapBoolIntV(v map[bool]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt8V(rv2i(rv).(map[bool]int8), e) +} +func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt16V(rv2i(rv).(map[bool]int16), e) +} +func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt32V(rv2i(rv).(map[bool]int32), e) +} +func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt64V(rv2i(rv).(map[bool]int64), e) +} +func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolFloat32V(rv2i(rv).(map[bool]float32), e) +} +func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolFloat64V(rv2i(rv).(map[bool]float64), e) +} +func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolBoolV(rv2i(rv).(map[bool]bool), e) +} +func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i uint + for k := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + var changed bool + switch v := iv.(type) { + + case []interface{}: + var v2 []interface{} + v2, changed = fastpathTV.DecSliceIntfV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]interface{}: + var v2 []interface{} + v2, changed = fastpathTV.DecSliceIntfV(*v, true, d) + if changed { + *v = v2 + } + case []string: + var v2 []string + v2, changed = fastpathTV.DecSliceStringV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]string: + var v2 []string + v2, changed = fastpathTV.DecSliceStringV(*v, true, d) + if changed { + *v = v2 + } + case []float32: + var v2 []float32 + v2, changed = fastpathTV.DecSliceFloat32V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]float32: + var v2 []float32 + v2, changed = fastpathTV.DecSliceFloat32V(*v, true, d) + if changed { + *v = v2 + } + case []float64: + var v2 []float64 + v2, changed = fastpathTV.DecSliceFloat64V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]float64: + var v2 []float64 + v2, changed = fastpathTV.DecSliceFloat64V(*v, true, d) + if changed { + *v = v2 + } + case []uint: + var v2 []uint + v2, changed = fastpathTV.DecSliceUintV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uint: + var v2 []uint + v2, changed = fastpathTV.DecSliceUintV(*v, true, d) + if changed { + *v = v2 + } + case []uint16: + var v2 []uint16 + v2, changed = fastpathTV.DecSliceUint16V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uint16: + var v2 []uint16 + v2, changed = fastpathTV.DecSliceUint16V(*v, true, d) + if changed { + *v = v2 + } + case []uint32: + var v2 []uint32 + v2, changed = fastpathTV.DecSliceUint32V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uint32: + var v2 []uint32 + v2, changed = fastpathTV.DecSliceUint32V(*v, true, d) + if changed { + *v = v2 + } + case []uint64: + var v2 []uint64 + v2, changed = fastpathTV.DecSliceUint64V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uint64: + var v2 []uint64 + v2, changed = fastpathTV.DecSliceUint64V(*v, true, d) + if changed { + *v = v2 + } + case []uintptr: + var v2 []uintptr + v2, changed = fastpathTV.DecSliceUintptrV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uintptr: + var v2 []uintptr + v2, changed = fastpathTV.DecSliceUintptrV(*v, true, d) + if changed { + *v = v2 + } + case []int: + var v2 []int + v2, changed = fastpathTV.DecSliceIntV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int: + var v2 []int + v2, changed = fastpathTV.DecSliceIntV(*v, true, d) + if changed { + *v = v2 + } + case []int8: + var v2 []int8 + v2, changed = fastpathTV.DecSliceInt8V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int8: + var v2 []int8 + v2, changed = fastpathTV.DecSliceInt8V(*v, true, d) + if changed { + *v = v2 + } + case []int16: + var v2 []int16 + v2, changed = fastpathTV.DecSliceInt16V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int16: + var v2 []int16 + v2, changed = fastpathTV.DecSliceInt16V(*v, true, d) + if changed { + *v = v2 + } + case []int32: + var v2 []int32 + v2, changed = fastpathTV.DecSliceInt32V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int32: + var v2 []int32 + v2, changed = fastpathTV.DecSliceInt32V(*v, true, d) + if changed { + *v = v2 + } + case []int64: + var v2 []int64 + v2, changed = fastpathTV.DecSliceInt64V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int64: + var v2 []int64 + v2, changed = fastpathTV.DecSliceInt64V(*v, true, d) + if changed { + *v = v2 + } + case []bool: + var v2 []bool + v2, changed = fastpathTV.DecSliceBoolV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]bool: + var v2 []bool + v2, changed = fastpathTV.DecSliceBoolV(*v, true, d) + if changed { + *v = v2 + } + + case map[interface{}]interface{}: + fastpathTV.DecMapIntfIntfV(v, false, d) + case *map[interface{}]interface{}: + var v2 map[interface{}]interface{} + v2, changed = fastpathTV.DecMapIntfIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]string: + fastpathTV.DecMapIntfStringV(v, false, d) + case *map[interface{}]string: + var v2 map[interface{}]string + v2, changed = fastpathTV.DecMapIntfStringV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint: + fastpathTV.DecMapIntfUintV(v, false, d) + case *map[interface{}]uint: + var v2 map[interface{}]uint + v2, changed = fastpathTV.DecMapIntfUintV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint8: + fastpathTV.DecMapIntfUint8V(v, false, d) + case *map[interface{}]uint8: + var v2 map[interface{}]uint8 + v2, changed = fastpathTV.DecMapIntfUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint16: + fastpathTV.DecMapIntfUint16V(v, false, d) + case *map[interface{}]uint16: + var v2 map[interface{}]uint16 + v2, changed = fastpathTV.DecMapIntfUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint32: + fastpathTV.DecMapIntfUint32V(v, false, d) + case *map[interface{}]uint32: + var v2 map[interface{}]uint32 + v2, changed = fastpathTV.DecMapIntfUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint64: + fastpathTV.DecMapIntfUint64V(v, false, d) + case *map[interface{}]uint64: + var v2 map[interface{}]uint64 + v2, changed = fastpathTV.DecMapIntfUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uintptr: + fastpathTV.DecMapIntfUintptrV(v, false, d) + case *map[interface{}]uintptr: + var v2 map[interface{}]uintptr + v2, changed = fastpathTV.DecMapIntfUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int: + fastpathTV.DecMapIntfIntV(v, false, d) + case *map[interface{}]int: + var v2 map[interface{}]int + v2, changed = fastpathTV.DecMapIntfIntV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int8: + fastpathTV.DecMapIntfInt8V(v, false, d) + case *map[interface{}]int8: + var v2 map[interface{}]int8 + v2, changed = fastpathTV.DecMapIntfInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int16: + fastpathTV.DecMapIntfInt16V(v, false, d) + case *map[interface{}]int16: + var v2 map[interface{}]int16 + v2, changed = fastpathTV.DecMapIntfInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int32: + fastpathTV.DecMapIntfInt32V(v, false, d) + case *map[interface{}]int32: + var v2 map[interface{}]int32 + v2, changed = fastpathTV.DecMapIntfInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int64: + fastpathTV.DecMapIntfInt64V(v, false, d) + case *map[interface{}]int64: + var v2 map[interface{}]int64 + v2, changed = fastpathTV.DecMapIntfInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]float32: + fastpathTV.DecMapIntfFloat32V(v, false, d) + case *map[interface{}]float32: + var v2 map[interface{}]float32 + v2, changed = fastpathTV.DecMapIntfFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]float64: + fastpathTV.DecMapIntfFloat64V(v, false, d) + case *map[interface{}]float64: + var v2 map[interface{}]float64 + v2, changed = fastpathTV.DecMapIntfFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]bool: + fastpathTV.DecMapIntfBoolV(v, false, d) + case *map[interface{}]bool: + var v2 map[interface{}]bool + v2, changed = fastpathTV.DecMapIntfBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[string]interface{}: + fastpathTV.DecMapStringIntfV(v, false, d) + case *map[string]interface{}: + var v2 map[string]interface{} + v2, changed = fastpathTV.DecMapStringIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[string]string: + fastpathTV.DecMapStringStringV(v, false, d) + case *map[string]string: + var v2 map[string]string + v2, changed = fastpathTV.DecMapStringStringV(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint: + fastpathTV.DecMapStringUintV(v, false, d) + case *map[string]uint: + var v2 map[string]uint + v2, changed = fastpathTV.DecMapStringUintV(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint8: + fastpathTV.DecMapStringUint8V(v, false, d) + case *map[string]uint8: + var v2 map[string]uint8 + v2, changed = fastpathTV.DecMapStringUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint16: + fastpathTV.DecMapStringUint16V(v, false, d) + case *map[string]uint16: + var v2 map[string]uint16 + v2, changed = fastpathTV.DecMapStringUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint32: + fastpathTV.DecMapStringUint32V(v, false, d) + case *map[string]uint32: + var v2 map[string]uint32 + v2, changed = fastpathTV.DecMapStringUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint64: + fastpathTV.DecMapStringUint64V(v, false, d) + case *map[string]uint64: + var v2 map[string]uint64 + v2, changed = fastpathTV.DecMapStringUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[string]uintptr: + fastpathTV.DecMapStringUintptrV(v, false, d) + case *map[string]uintptr: + var v2 map[string]uintptr + v2, changed = fastpathTV.DecMapStringUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[string]int: + fastpathTV.DecMapStringIntV(v, false, d) + case *map[string]int: + var v2 map[string]int + v2, changed = fastpathTV.DecMapStringIntV(*v, true, d) + if changed { + *v = v2 + } + case map[string]int8: + fastpathTV.DecMapStringInt8V(v, false, d) + case *map[string]int8: + var v2 map[string]int8 + v2, changed = fastpathTV.DecMapStringInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[string]int16: + fastpathTV.DecMapStringInt16V(v, false, d) + case *map[string]int16: + var v2 map[string]int16 + v2, changed = fastpathTV.DecMapStringInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[string]int32: + fastpathTV.DecMapStringInt32V(v, false, d) + case *map[string]int32: + var v2 map[string]int32 + v2, changed = fastpathTV.DecMapStringInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[string]int64: + fastpathTV.DecMapStringInt64V(v, false, d) + case *map[string]int64: + var v2 map[string]int64 + v2, changed = fastpathTV.DecMapStringInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[string]float32: + fastpathTV.DecMapStringFloat32V(v, false, d) + case *map[string]float32: + var v2 map[string]float32 + v2, changed = fastpathTV.DecMapStringFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[string]float64: + fastpathTV.DecMapStringFloat64V(v, false, d) + case *map[string]float64: + var v2 map[string]float64 + v2, changed = fastpathTV.DecMapStringFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[string]bool: + fastpathTV.DecMapStringBoolV(v, false, d) + case *map[string]bool: + var v2 map[string]bool + v2, changed = fastpathTV.DecMapStringBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]interface{}: + fastpathTV.DecMapFloat32IntfV(v, false, d) + case *map[float32]interface{}: + var v2 map[float32]interface{} + v2, changed = fastpathTV.DecMapFloat32IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]string: + fastpathTV.DecMapFloat32StringV(v, false, d) + case *map[float32]string: + var v2 map[float32]string + v2, changed = fastpathTV.DecMapFloat32StringV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint: + fastpathTV.DecMapFloat32UintV(v, false, d) + case *map[float32]uint: + var v2 map[float32]uint + v2, changed = fastpathTV.DecMapFloat32UintV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint8: + fastpathTV.DecMapFloat32Uint8V(v, false, d) + case *map[float32]uint8: + var v2 map[float32]uint8 + v2, changed = fastpathTV.DecMapFloat32Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint16: + fastpathTV.DecMapFloat32Uint16V(v, false, d) + case *map[float32]uint16: + var v2 map[float32]uint16 + v2, changed = fastpathTV.DecMapFloat32Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint32: + fastpathTV.DecMapFloat32Uint32V(v, false, d) + case *map[float32]uint32: + var v2 map[float32]uint32 + v2, changed = fastpathTV.DecMapFloat32Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint64: + fastpathTV.DecMapFloat32Uint64V(v, false, d) + case *map[float32]uint64: + var v2 map[float32]uint64 + v2, changed = fastpathTV.DecMapFloat32Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uintptr: + fastpathTV.DecMapFloat32UintptrV(v, false, d) + case *map[float32]uintptr: + var v2 map[float32]uintptr + v2, changed = fastpathTV.DecMapFloat32UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int: + fastpathTV.DecMapFloat32IntV(v, false, d) + case *map[float32]int: + var v2 map[float32]int + v2, changed = fastpathTV.DecMapFloat32IntV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int8: + fastpathTV.DecMapFloat32Int8V(v, false, d) + case *map[float32]int8: + var v2 map[float32]int8 + v2, changed = fastpathTV.DecMapFloat32Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int16: + fastpathTV.DecMapFloat32Int16V(v, false, d) + case *map[float32]int16: + var v2 map[float32]int16 + v2, changed = fastpathTV.DecMapFloat32Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int32: + fastpathTV.DecMapFloat32Int32V(v, false, d) + case *map[float32]int32: + var v2 map[float32]int32 + v2, changed = fastpathTV.DecMapFloat32Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int64: + fastpathTV.DecMapFloat32Int64V(v, false, d) + case *map[float32]int64: + var v2 map[float32]int64 + v2, changed = fastpathTV.DecMapFloat32Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]float32: + fastpathTV.DecMapFloat32Float32V(v, false, d) + case *map[float32]float32: + var v2 map[float32]float32 + v2, changed = fastpathTV.DecMapFloat32Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]float64: + fastpathTV.DecMapFloat32Float64V(v, false, d) + case *map[float32]float64: + var v2 map[float32]float64 + v2, changed = fastpathTV.DecMapFloat32Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]bool: + fastpathTV.DecMapFloat32BoolV(v, false, d) + case *map[float32]bool: + var v2 map[float32]bool + v2, changed = fastpathTV.DecMapFloat32BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]interface{}: + fastpathTV.DecMapFloat64IntfV(v, false, d) + case *map[float64]interface{}: + var v2 map[float64]interface{} + v2, changed = fastpathTV.DecMapFloat64IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]string: + fastpathTV.DecMapFloat64StringV(v, false, d) + case *map[float64]string: + var v2 map[float64]string + v2, changed = fastpathTV.DecMapFloat64StringV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint: + fastpathTV.DecMapFloat64UintV(v, false, d) + case *map[float64]uint: + var v2 map[float64]uint + v2, changed = fastpathTV.DecMapFloat64UintV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint8: + fastpathTV.DecMapFloat64Uint8V(v, false, d) + case *map[float64]uint8: + var v2 map[float64]uint8 + v2, changed = fastpathTV.DecMapFloat64Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint16: + fastpathTV.DecMapFloat64Uint16V(v, false, d) + case *map[float64]uint16: + var v2 map[float64]uint16 + v2, changed = fastpathTV.DecMapFloat64Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint32: + fastpathTV.DecMapFloat64Uint32V(v, false, d) + case *map[float64]uint32: + var v2 map[float64]uint32 + v2, changed = fastpathTV.DecMapFloat64Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint64: + fastpathTV.DecMapFloat64Uint64V(v, false, d) + case *map[float64]uint64: + var v2 map[float64]uint64 + v2, changed = fastpathTV.DecMapFloat64Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uintptr: + fastpathTV.DecMapFloat64UintptrV(v, false, d) + case *map[float64]uintptr: + var v2 map[float64]uintptr + v2, changed = fastpathTV.DecMapFloat64UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int: + fastpathTV.DecMapFloat64IntV(v, false, d) + case *map[float64]int: + var v2 map[float64]int + v2, changed = fastpathTV.DecMapFloat64IntV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int8: + fastpathTV.DecMapFloat64Int8V(v, false, d) + case *map[float64]int8: + var v2 map[float64]int8 + v2, changed = fastpathTV.DecMapFloat64Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int16: + fastpathTV.DecMapFloat64Int16V(v, false, d) + case *map[float64]int16: + var v2 map[float64]int16 + v2, changed = fastpathTV.DecMapFloat64Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int32: + fastpathTV.DecMapFloat64Int32V(v, false, d) + case *map[float64]int32: + var v2 map[float64]int32 + v2, changed = fastpathTV.DecMapFloat64Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int64: + fastpathTV.DecMapFloat64Int64V(v, false, d) + case *map[float64]int64: + var v2 map[float64]int64 + v2, changed = fastpathTV.DecMapFloat64Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]float32: + fastpathTV.DecMapFloat64Float32V(v, false, d) + case *map[float64]float32: + var v2 map[float64]float32 + v2, changed = fastpathTV.DecMapFloat64Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]float64: + fastpathTV.DecMapFloat64Float64V(v, false, d) + case *map[float64]float64: + var v2 map[float64]float64 + v2, changed = fastpathTV.DecMapFloat64Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]bool: + fastpathTV.DecMapFloat64BoolV(v, false, d) + case *map[float64]bool: + var v2 map[float64]bool + v2, changed = fastpathTV.DecMapFloat64BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]interface{}: + fastpathTV.DecMapUintIntfV(v, false, d) + case *map[uint]interface{}: + var v2 map[uint]interface{} + v2, changed = fastpathTV.DecMapUintIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]string: + fastpathTV.DecMapUintStringV(v, false, d) + case *map[uint]string: + var v2 map[uint]string + v2, changed = fastpathTV.DecMapUintStringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint: + fastpathTV.DecMapUintUintV(v, false, d) + case *map[uint]uint: + var v2 map[uint]uint + v2, changed = fastpathTV.DecMapUintUintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint8: + fastpathTV.DecMapUintUint8V(v, false, d) + case *map[uint]uint8: + var v2 map[uint]uint8 + v2, changed = fastpathTV.DecMapUintUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint16: + fastpathTV.DecMapUintUint16V(v, false, d) + case *map[uint]uint16: + var v2 map[uint]uint16 + v2, changed = fastpathTV.DecMapUintUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint32: + fastpathTV.DecMapUintUint32V(v, false, d) + case *map[uint]uint32: + var v2 map[uint]uint32 + v2, changed = fastpathTV.DecMapUintUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint64: + fastpathTV.DecMapUintUint64V(v, false, d) + case *map[uint]uint64: + var v2 map[uint]uint64 + v2, changed = fastpathTV.DecMapUintUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uintptr: + fastpathTV.DecMapUintUintptrV(v, false, d) + case *map[uint]uintptr: + var v2 map[uint]uintptr + v2, changed = fastpathTV.DecMapUintUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int: + fastpathTV.DecMapUintIntV(v, false, d) + case *map[uint]int: + var v2 map[uint]int + v2, changed = fastpathTV.DecMapUintIntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int8: + fastpathTV.DecMapUintInt8V(v, false, d) + case *map[uint]int8: + var v2 map[uint]int8 + v2, changed = fastpathTV.DecMapUintInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int16: + fastpathTV.DecMapUintInt16V(v, false, d) + case *map[uint]int16: + var v2 map[uint]int16 + v2, changed = fastpathTV.DecMapUintInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int32: + fastpathTV.DecMapUintInt32V(v, false, d) + case *map[uint]int32: + var v2 map[uint]int32 + v2, changed = fastpathTV.DecMapUintInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int64: + fastpathTV.DecMapUintInt64V(v, false, d) + case *map[uint]int64: + var v2 map[uint]int64 + v2, changed = fastpathTV.DecMapUintInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]float32: + fastpathTV.DecMapUintFloat32V(v, false, d) + case *map[uint]float32: + var v2 map[uint]float32 + v2, changed = fastpathTV.DecMapUintFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]float64: + fastpathTV.DecMapUintFloat64V(v, false, d) + case *map[uint]float64: + var v2 map[uint]float64 + v2, changed = fastpathTV.DecMapUintFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]bool: + fastpathTV.DecMapUintBoolV(v, false, d) + case *map[uint]bool: + var v2 map[uint]bool + v2, changed = fastpathTV.DecMapUintBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]interface{}: + fastpathTV.DecMapUint8IntfV(v, false, d) + case *map[uint8]interface{}: + var v2 map[uint8]interface{} + v2, changed = fastpathTV.DecMapUint8IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]string: + fastpathTV.DecMapUint8StringV(v, false, d) + case *map[uint8]string: + var v2 map[uint8]string + v2, changed = fastpathTV.DecMapUint8StringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint: + fastpathTV.DecMapUint8UintV(v, false, d) + case *map[uint8]uint: + var v2 map[uint8]uint + v2, changed = fastpathTV.DecMapUint8UintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint8: + fastpathTV.DecMapUint8Uint8V(v, false, d) + case *map[uint8]uint8: + var v2 map[uint8]uint8 + v2, changed = fastpathTV.DecMapUint8Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint16: + fastpathTV.DecMapUint8Uint16V(v, false, d) + case *map[uint8]uint16: + var v2 map[uint8]uint16 + v2, changed = fastpathTV.DecMapUint8Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint32: + fastpathTV.DecMapUint8Uint32V(v, false, d) + case *map[uint8]uint32: + var v2 map[uint8]uint32 + v2, changed = fastpathTV.DecMapUint8Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint64: + fastpathTV.DecMapUint8Uint64V(v, false, d) + case *map[uint8]uint64: + var v2 map[uint8]uint64 + v2, changed = fastpathTV.DecMapUint8Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uintptr: + fastpathTV.DecMapUint8UintptrV(v, false, d) + case *map[uint8]uintptr: + var v2 map[uint8]uintptr + v2, changed = fastpathTV.DecMapUint8UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int: + fastpathTV.DecMapUint8IntV(v, false, d) + case *map[uint8]int: + var v2 map[uint8]int + v2, changed = fastpathTV.DecMapUint8IntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int8: + fastpathTV.DecMapUint8Int8V(v, false, d) + case *map[uint8]int8: + var v2 map[uint8]int8 + v2, changed = fastpathTV.DecMapUint8Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int16: + fastpathTV.DecMapUint8Int16V(v, false, d) + case *map[uint8]int16: + var v2 map[uint8]int16 + v2, changed = fastpathTV.DecMapUint8Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int32: + fastpathTV.DecMapUint8Int32V(v, false, d) + case *map[uint8]int32: + var v2 map[uint8]int32 + v2, changed = fastpathTV.DecMapUint8Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int64: + fastpathTV.DecMapUint8Int64V(v, false, d) + case *map[uint8]int64: + var v2 map[uint8]int64 + v2, changed = fastpathTV.DecMapUint8Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]float32: + fastpathTV.DecMapUint8Float32V(v, false, d) + case *map[uint8]float32: + var v2 map[uint8]float32 + v2, changed = fastpathTV.DecMapUint8Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]float64: + fastpathTV.DecMapUint8Float64V(v, false, d) + case *map[uint8]float64: + var v2 map[uint8]float64 + v2, changed = fastpathTV.DecMapUint8Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]bool: + fastpathTV.DecMapUint8BoolV(v, false, d) + case *map[uint8]bool: + var v2 map[uint8]bool + v2, changed = fastpathTV.DecMapUint8BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]interface{}: + fastpathTV.DecMapUint16IntfV(v, false, d) + case *map[uint16]interface{}: + var v2 map[uint16]interface{} + v2, changed = fastpathTV.DecMapUint16IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]string: + fastpathTV.DecMapUint16StringV(v, false, d) + case *map[uint16]string: + var v2 map[uint16]string + v2, changed = fastpathTV.DecMapUint16StringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint: + fastpathTV.DecMapUint16UintV(v, false, d) + case *map[uint16]uint: + var v2 map[uint16]uint + v2, changed = fastpathTV.DecMapUint16UintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint8: + fastpathTV.DecMapUint16Uint8V(v, false, d) + case *map[uint16]uint8: + var v2 map[uint16]uint8 + v2, changed = fastpathTV.DecMapUint16Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint16: + fastpathTV.DecMapUint16Uint16V(v, false, d) + case *map[uint16]uint16: + var v2 map[uint16]uint16 + v2, changed = fastpathTV.DecMapUint16Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint32: + fastpathTV.DecMapUint16Uint32V(v, false, d) + case *map[uint16]uint32: + var v2 map[uint16]uint32 + v2, changed = fastpathTV.DecMapUint16Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint64: + fastpathTV.DecMapUint16Uint64V(v, false, d) + case *map[uint16]uint64: + var v2 map[uint16]uint64 + v2, changed = fastpathTV.DecMapUint16Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uintptr: + fastpathTV.DecMapUint16UintptrV(v, false, d) + case *map[uint16]uintptr: + var v2 map[uint16]uintptr + v2, changed = fastpathTV.DecMapUint16UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int: + fastpathTV.DecMapUint16IntV(v, false, d) + case *map[uint16]int: + var v2 map[uint16]int + v2, changed = fastpathTV.DecMapUint16IntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int8: + fastpathTV.DecMapUint16Int8V(v, false, d) + case *map[uint16]int8: + var v2 map[uint16]int8 + v2, changed = fastpathTV.DecMapUint16Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int16: + fastpathTV.DecMapUint16Int16V(v, false, d) + case *map[uint16]int16: + var v2 map[uint16]int16 + v2, changed = fastpathTV.DecMapUint16Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int32: + fastpathTV.DecMapUint16Int32V(v, false, d) + case *map[uint16]int32: + var v2 map[uint16]int32 + v2, changed = fastpathTV.DecMapUint16Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int64: + fastpathTV.DecMapUint16Int64V(v, false, d) + case *map[uint16]int64: + var v2 map[uint16]int64 + v2, changed = fastpathTV.DecMapUint16Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]float32: + fastpathTV.DecMapUint16Float32V(v, false, d) + case *map[uint16]float32: + var v2 map[uint16]float32 + v2, changed = fastpathTV.DecMapUint16Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]float64: + fastpathTV.DecMapUint16Float64V(v, false, d) + case *map[uint16]float64: + var v2 map[uint16]float64 + v2, changed = fastpathTV.DecMapUint16Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]bool: + fastpathTV.DecMapUint16BoolV(v, false, d) + case *map[uint16]bool: + var v2 map[uint16]bool + v2, changed = fastpathTV.DecMapUint16BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]interface{}: + fastpathTV.DecMapUint32IntfV(v, false, d) + case *map[uint32]interface{}: + var v2 map[uint32]interface{} + v2, changed = fastpathTV.DecMapUint32IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]string: + fastpathTV.DecMapUint32StringV(v, false, d) + case *map[uint32]string: + var v2 map[uint32]string + v2, changed = fastpathTV.DecMapUint32StringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint: + fastpathTV.DecMapUint32UintV(v, false, d) + case *map[uint32]uint: + var v2 map[uint32]uint + v2, changed = fastpathTV.DecMapUint32UintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint8: + fastpathTV.DecMapUint32Uint8V(v, false, d) + case *map[uint32]uint8: + var v2 map[uint32]uint8 + v2, changed = fastpathTV.DecMapUint32Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint16: + fastpathTV.DecMapUint32Uint16V(v, false, d) + case *map[uint32]uint16: + var v2 map[uint32]uint16 + v2, changed = fastpathTV.DecMapUint32Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint32: + fastpathTV.DecMapUint32Uint32V(v, false, d) + case *map[uint32]uint32: + var v2 map[uint32]uint32 + v2, changed = fastpathTV.DecMapUint32Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint64: + fastpathTV.DecMapUint32Uint64V(v, false, d) + case *map[uint32]uint64: + var v2 map[uint32]uint64 + v2, changed = fastpathTV.DecMapUint32Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uintptr: + fastpathTV.DecMapUint32UintptrV(v, false, d) + case *map[uint32]uintptr: + var v2 map[uint32]uintptr + v2, changed = fastpathTV.DecMapUint32UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int: + fastpathTV.DecMapUint32IntV(v, false, d) + case *map[uint32]int: + var v2 map[uint32]int + v2, changed = fastpathTV.DecMapUint32IntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int8: + fastpathTV.DecMapUint32Int8V(v, false, d) + case *map[uint32]int8: + var v2 map[uint32]int8 + v2, changed = fastpathTV.DecMapUint32Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int16: + fastpathTV.DecMapUint32Int16V(v, false, d) + case *map[uint32]int16: + var v2 map[uint32]int16 + v2, changed = fastpathTV.DecMapUint32Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int32: + fastpathTV.DecMapUint32Int32V(v, false, d) + case *map[uint32]int32: + var v2 map[uint32]int32 + v2, changed = fastpathTV.DecMapUint32Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int64: + fastpathTV.DecMapUint32Int64V(v, false, d) + case *map[uint32]int64: + var v2 map[uint32]int64 + v2, changed = fastpathTV.DecMapUint32Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]float32: + fastpathTV.DecMapUint32Float32V(v, false, d) + case *map[uint32]float32: + var v2 map[uint32]float32 + v2, changed = fastpathTV.DecMapUint32Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]float64: + fastpathTV.DecMapUint32Float64V(v, false, d) + case *map[uint32]float64: + var v2 map[uint32]float64 + v2, changed = fastpathTV.DecMapUint32Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]bool: + fastpathTV.DecMapUint32BoolV(v, false, d) + case *map[uint32]bool: + var v2 map[uint32]bool + v2, changed = fastpathTV.DecMapUint32BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]interface{}: + fastpathTV.DecMapUint64IntfV(v, false, d) + case *map[uint64]interface{}: + var v2 map[uint64]interface{} + v2, changed = fastpathTV.DecMapUint64IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]string: + fastpathTV.DecMapUint64StringV(v, false, d) + case *map[uint64]string: + var v2 map[uint64]string + v2, changed = fastpathTV.DecMapUint64StringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint: + fastpathTV.DecMapUint64UintV(v, false, d) + case *map[uint64]uint: + var v2 map[uint64]uint + v2, changed = fastpathTV.DecMapUint64UintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint8: + fastpathTV.DecMapUint64Uint8V(v, false, d) + case *map[uint64]uint8: + var v2 map[uint64]uint8 + v2, changed = fastpathTV.DecMapUint64Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint16: + fastpathTV.DecMapUint64Uint16V(v, false, d) + case *map[uint64]uint16: + var v2 map[uint64]uint16 + v2, changed = fastpathTV.DecMapUint64Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint32: + fastpathTV.DecMapUint64Uint32V(v, false, d) + case *map[uint64]uint32: + var v2 map[uint64]uint32 + v2, changed = fastpathTV.DecMapUint64Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint64: + fastpathTV.DecMapUint64Uint64V(v, false, d) + case *map[uint64]uint64: + var v2 map[uint64]uint64 + v2, changed = fastpathTV.DecMapUint64Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uintptr: + fastpathTV.DecMapUint64UintptrV(v, false, d) + case *map[uint64]uintptr: + var v2 map[uint64]uintptr + v2, changed = fastpathTV.DecMapUint64UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int: + fastpathTV.DecMapUint64IntV(v, false, d) + case *map[uint64]int: + var v2 map[uint64]int + v2, changed = fastpathTV.DecMapUint64IntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int8: + fastpathTV.DecMapUint64Int8V(v, false, d) + case *map[uint64]int8: + var v2 map[uint64]int8 + v2, changed = fastpathTV.DecMapUint64Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int16: + fastpathTV.DecMapUint64Int16V(v, false, d) + case *map[uint64]int16: + var v2 map[uint64]int16 + v2, changed = fastpathTV.DecMapUint64Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int32: + fastpathTV.DecMapUint64Int32V(v, false, d) + case *map[uint64]int32: + var v2 map[uint64]int32 + v2, changed = fastpathTV.DecMapUint64Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int64: + fastpathTV.DecMapUint64Int64V(v, false, d) + case *map[uint64]int64: + var v2 map[uint64]int64 + v2, changed = fastpathTV.DecMapUint64Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]float32: + fastpathTV.DecMapUint64Float32V(v, false, d) + case *map[uint64]float32: + var v2 map[uint64]float32 + v2, changed = fastpathTV.DecMapUint64Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]float64: + fastpathTV.DecMapUint64Float64V(v, false, d) + case *map[uint64]float64: + var v2 map[uint64]float64 + v2, changed = fastpathTV.DecMapUint64Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]bool: + fastpathTV.DecMapUint64BoolV(v, false, d) + case *map[uint64]bool: + var v2 map[uint64]bool + v2, changed = fastpathTV.DecMapUint64BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]interface{}: + fastpathTV.DecMapUintptrIntfV(v, false, d) + case *map[uintptr]interface{}: + var v2 map[uintptr]interface{} + v2, changed = fastpathTV.DecMapUintptrIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]string: + fastpathTV.DecMapUintptrStringV(v, false, d) + case *map[uintptr]string: + var v2 map[uintptr]string + v2, changed = fastpathTV.DecMapUintptrStringV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint: + fastpathTV.DecMapUintptrUintV(v, false, d) + case *map[uintptr]uint: + var v2 map[uintptr]uint + v2, changed = fastpathTV.DecMapUintptrUintV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint8: + fastpathTV.DecMapUintptrUint8V(v, false, d) + case *map[uintptr]uint8: + var v2 map[uintptr]uint8 + v2, changed = fastpathTV.DecMapUintptrUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint16: + fastpathTV.DecMapUintptrUint16V(v, false, d) + case *map[uintptr]uint16: + var v2 map[uintptr]uint16 + v2, changed = fastpathTV.DecMapUintptrUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint32: + fastpathTV.DecMapUintptrUint32V(v, false, d) + case *map[uintptr]uint32: + var v2 map[uintptr]uint32 + v2, changed = fastpathTV.DecMapUintptrUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint64: + fastpathTV.DecMapUintptrUint64V(v, false, d) + case *map[uintptr]uint64: + var v2 map[uintptr]uint64 + v2, changed = fastpathTV.DecMapUintptrUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uintptr: + fastpathTV.DecMapUintptrUintptrV(v, false, d) + case *map[uintptr]uintptr: + var v2 map[uintptr]uintptr + v2, changed = fastpathTV.DecMapUintptrUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int: + fastpathTV.DecMapUintptrIntV(v, false, d) + case *map[uintptr]int: + var v2 map[uintptr]int + v2, changed = fastpathTV.DecMapUintptrIntV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int8: + fastpathTV.DecMapUintptrInt8V(v, false, d) + case *map[uintptr]int8: + var v2 map[uintptr]int8 + v2, changed = fastpathTV.DecMapUintptrInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int16: + fastpathTV.DecMapUintptrInt16V(v, false, d) + case *map[uintptr]int16: + var v2 map[uintptr]int16 + v2, changed = fastpathTV.DecMapUintptrInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int32: + fastpathTV.DecMapUintptrInt32V(v, false, d) + case *map[uintptr]int32: + var v2 map[uintptr]int32 + v2, changed = fastpathTV.DecMapUintptrInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int64: + fastpathTV.DecMapUintptrInt64V(v, false, d) + case *map[uintptr]int64: + var v2 map[uintptr]int64 + v2, changed = fastpathTV.DecMapUintptrInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]float32: + fastpathTV.DecMapUintptrFloat32V(v, false, d) + case *map[uintptr]float32: + var v2 map[uintptr]float32 + v2, changed = fastpathTV.DecMapUintptrFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]float64: + fastpathTV.DecMapUintptrFloat64V(v, false, d) + case *map[uintptr]float64: + var v2 map[uintptr]float64 + v2, changed = fastpathTV.DecMapUintptrFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]bool: + fastpathTV.DecMapUintptrBoolV(v, false, d) + case *map[uintptr]bool: + var v2 map[uintptr]bool + v2, changed = fastpathTV.DecMapUintptrBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int]interface{}: + fastpathTV.DecMapIntIntfV(v, false, d) + case *map[int]interface{}: + var v2 map[int]interface{} + v2, changed = fastpathTV.DecMapIntIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int]string: + fastpathTV.DecMapIntStringV(v, false, d) + case *map[int]string: + var v2 map[int]string + v2, changed = fastpathTV.DecMapIntStringV(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint: + fastpathTV.DecMapIntUintV(v, false, d) + case *map[int]uint: + var v2 map[int]uint + v2, changed = fastpathTV.DecMapIntUintV(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint8: + fastpathTV.DecMapIntUint8V(v, false, d) + case *map[int]uint8: + var v2 map[int]uint8 + v2, changed = fastpathTV.DecMapIntUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint16: + fastpathTV.DecMapIntUint16V(v, false, d) + case *map[int]uint16: + var v2 map[int]uint16 + v2, changed = fastpathTV.DecMapIntUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint32: + fastpathTV.DecMapIntUint32V(v, false, d) + case *map[int]uint32: + var v2 map[int]uint32 + v2, changed = fastpathTV.DecMapIntUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint64: + fastpathTV.DecMapIntUint64V(v, false, d) + case *map[int]uint64: + var v2 map[int]uint64 + v2, changed = fastpathTV.DecMapIntUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int]uintptr: + fastpathTV.DecMapIntUintptrV(v, false, d) + case *map[int]uintptr: + var v2 map[int]uintptr + v2, changed = fastpathTV.DecMapIntUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int]int: + fastpathTV.DecMapIntIntV(v, false, d) + case *map[int]int: + var v2 map[int]int + v2, changed = fastpathTV.DecMapIntIntV(*v, true, d) + if changed { + *v = v2 + } + case map[int]int8: + fastpathTV.DecMapIntInt8V(v, false, d) + case *map[int]int8: + var v2 map[int]int8 + v2, changed = fastpathTV.DecMapIntInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[int]int16: + fastpathTV.DecMapIntInt16V(v, false, d) + case *map[int]int16: + var v2 map[int]int16 + v2, changed = fastpathTV.DecMapIntInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[int]int32: + fastpathTV.DecMapIntInt32V(v, false, d) + case *map[int]int32: + var v2 map[int]int32 + v2, changed = fastpathTV.DecMapIntInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[int]int64: + fastpathTV.DecMapIntInt64V(v, false, d) + case *map[int]int64: + var v2 map[int]int64 + v2, changed = fastpathTV.DecMapIntInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[int]float32: + fastpathTV.DecMapIntFloat32V(v, false, d) + case *map[int]float32: + var v2 map[int]float32 + v2, changed = fastpathTV.DecMapIntFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[int]float64: + fastpathTV.DecMapIntFloat64V(v, false, d) + case *map[int]float64: + var v2 map[int]float64 + v2, changed = fastpathTV.DecMapIntFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[int]bool: + fastpathTV.DecMapIntBoolV(v, false, d) + case *map[int]bool: + var v2 map[int]bool + v2, changed = fastpathTV.DecMapIntBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]interface{}: + fastpathTV.DecMapInt8IntfV(v, false, d) + case *map[int8]interface{}: + var v2 map[int8]interface{} + v2, changed = fastpathTV.DecMapInt8IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]string: + fastpathTV.DecMapInt8StringV(v, false, d) + case *map[int8]string: + var v2 map[int8]string + v2, changed = fastpathTV.DecMapInt8StringV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint: + fastpathTV.DecMapInt8UintV(v, false, d) + case *map[int8]uint: + var v2 map[int8]uint + v2, changed = fastpathTV.DecMapInt8UintV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint8: + fastpathTV.DecMapInt8Uint8V(v, false, d) + case *map[int8]uint8: + var v2 map[int8]uint8 + v2, changed = fastpathTV.DecMapInt8Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint16: + fastpathTV.DecMapInt8Uint16V(v, false, d) + case *map[int8]uint16: + var v2 map[int8]uint16 + v2, changed = fastpathTV.DecMapInt8Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint32: + fastpathTV.DecMapInt8Uint32V(v, false, d) + case *map[int8]uint32: + var v2 map[int8]uint32 + v2, changed = fastpathTV.DecMapInt8Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint64: + fastpathTV.DecMapInt8Uint64V(v, false, d) + case *map[int8]uint64: + var v2 map[int8]uint64 + v2, changed = fastpathTV.DecMapInt8Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uintptr: + fastpathTV.DecMapInt8UintptrV(v, false, d) + case *map[int8]uintptr: + var v2 map[int8]uintptr + v2, changed = fastpathTV.DecMapInt8UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int: + fastpathTV.DecMapInt8IntV(v, false, d) + case *map[int8]int: + var v2 map[int8]int + v2, changed = fastpathTV.DecMapInt8IntV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int8: + fastpathTV.DecMapInt8Int8V(v, false, d) + case *map[int8]int8: + var v2 map[int8]int8 + v2, changed = fastpathTV.DecMapInt8Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int16: + fastpathTV.DecMapInt8Int16V(v, false, d) + case *map[int8]int16: + var v2 map[int8]int16 + v2, changed = fastpathTV.DecMapInt8Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int32: + fastpathTV.DecMapInt8Int32V(v, false, d) + case *map[int8]int32: + var v2 map[int8]int32 + v2, changed = fastpathTV.DecMapInt8Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int64: + fastpathTV.DecMapInt8Int64V(v, false, d) + case *map[int8]int64: + var v2 map[int8]int64 + v2, changed = fastpathTV.DecMapInt8Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]float32: + fastpathTV.DecMapInt8Float32V(v, false, d) + case *map[int8]float32: + var v2 map[int8]float32 + v2, changed = fastpathTV.DecMapInt8Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]float64: + fastpathTV.DecMapInt8Float64V(v, false, d) + case *map[int8]float64: + var v2 map[int8]float64 + v2, changed = fastpathTV.DecMapInt8Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]bool: + fastpathTV.DecMapInt8BoolV(v, false, d) + case *map[int8]bool: + var v2 map[int8]bool + v2, changed = fastpathTV.DecMapInt8BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]interface{}: + fastpathTV.DecMapInt16IntfV(v, false, d) + case *map[int16]interface{}: + var v2 map[int16]interface{} + v2, changed = fastpathTV.DecMapInt16IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]string: + fastpathTV.DecMapInt16StringV(v, false, d) + case *map[int16]string: + var v2 map[int16]string + v2, changed = fastpathTV.DecMapInt16StringV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint: + fastpathTV.DecMapInt16UintV(v, false, d) + case *map[int16]uint: + var v2 map[int16]uint + v2, changed = fastpathTV.DecMapInt16UintV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint8: + fastpathTV.DecMapInt16Uint8V(v, false, d) + case *map[int16]uint8: + var v2 map[int16]uint8 + v2, changed = fastpathTV.DecMapInt16Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint16: + fastpathTV.DecMapInt16Uint16V(v, false, d) + case *map[int16]uint16: + var v2 map[int16]uint16 + v2, changed = fastpathTV.DecMapInt16Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint32: + fastpathTV.DecMapInt16Uint32V(v, false, d) + case *map[int16]uint32: + var v2 map[int16]uint32 + v2, changed = fastpathTV.DecMapInt16Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint64: + fastpathTV.DecMapInt16Uint64V(v, false, d) + case *map[int16]uint64: + var v2 map[int16]uint64 + v2, changed = fastpathTV.DecMapInt16Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uintptr: + fastpathTV.DecMapInt16UintptrV(v, false, d) + case *map[int16]uintptr: + var v2 map[int16]uintptr + v2, changed = fastpathTV.DecMapInt16UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int: + fastpathTV.DecMapInt16IntV(v, false, d) + case *map[int16]int: + var v2 map[int16]int + v2, changed = fastpathTV.DecMapInt16IntV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int8: + fastpathTV.DecMapInt16Int8V(v, false, d) + case *map[int16]int8: + var v2 map[int16]int8 + v2, changed = fastpathTV.DecMapInt16Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int16: + fastpathTV.DecMapInt16Int16V(v, false, d) + case *map[int16]int16: + var v2 map[int16]int16 + v2, changed = fastpathTV.DecMapInt16Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int32: + fastpathTV.DecMapInt16Int32V(v, false, d) + case *map[int16]int32: + var v2 map[int16]int32 + v2, changed = fastpathTV.DecMapInt16Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int64: + fastpathTV.DecMapInt16Int64V(v, false, d) + case *map[int16]int64: + var v2 map[int16]int64 + v2, changed = fastpathTV.DecMapInt16Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]float32: + fastpathTV.DecMapInt16Float32V(v, false, d) + case *map[int16]float32: + var v2 map[int16]float32 + v2, changed = fastpathTV.DecMapInt16Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]float64: + fastpathTV.DecMapInt16Float64V(v, false, d) + case *map[int16]float64: + var v2 map[int16]float64 + v2, changed = fastpathTV.DecMapInt16Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]bool: + fastpathTV.DecMapInt16BoolV(v, false, d) + case *map[int16]bool: + var v2 map[int16]bool + v2, changed = fastpathTV.DecMapInt16BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]interface{}: + fastpathTV.DecMapInt32IntfV(v, false, d) + case *map[int32]interface{}: + var v2 map[int32]interface{} + v2, changed = fastpathTV.DecMapInt32IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]string: + fastpathTV.DecMapInt32StringV(v, false, d) + case *map[int32]string: + var v2 map[int32]string + v2, changed = fastpathTV.DecMapInt32StringV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint: + fastpathTV.DecMapInt32UintV(v, false, d) + case *map[int32]uint: + var v2 map[int32]uint + v2, changed = fastpathTV.DecMapInt32UintV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint8: + fastpathTV.DecMapInt32Uint8V(v, false, d) + case *map[int32]uint8: + var v2 map[int32]uint8 + v2, changed = fastpathTV.DecMapInt32Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint16: + fastpathTV.DecMapInt32Uint16V(v, false, d) + case *map[int32]uint16: + var v2 map[int32]uint16 + v2, changed = fastpathTV.DecMapInt32Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint32: + fastpathTV.DecMapInt32Uint32V(v, false, d) + case *map[int32]uint32: + var v2 map[int32]uint32 + v2, changed = fastpathTV.DecMapInt32Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint64: + fastpathTV.DecMapInt32Uint64V(v, false, d) + case *map[int32]uint64: + var v2 map[int32]uint64 + v2, changed = fastpathTV.DecMapInt32Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uintptr: + fastpathTV.DecMapInt32UintptrV(v, false, d) + case *map[int32]uintptr: + var v2 map[int32]uintptr + v2, changed = fastpathTV.DecMapInt32UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int: + fastpathTV.DecMapInt32IntV(v, false, d) + case *map[int32]int: + var v2 map[int32]int + v2, changed = fastpathTV.DecMapInt32IntV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int8: + fastpathTV.DecMapInt32Int8V(v, false, d) + case *map[int32]int8: + var v2 map[int32]int8 + v2, changed = fastpathTV.DecMapInt32Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int16: + fastpathTV.DecMapInt32Int16V(v, false, d) + case *map[int32]int16: + var v2 map[int32]int16 + v2, changed = fastpathTV.DecMapInt32Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int32: + fastpathTV.DecMapInt32Int32V(v, false, d) + case *map[int32]int32: + var v2 map[int32]int32 + v2, changed = fastpathTV.DecMapInt32Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int64: + fastpathTV.DecMapInt32Int64V(v, false, d) + case *map[int32]int64: + var v2 map[int32]int64 + v2, changed = fastpathTV.DecMapInt32Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]float32: + fastpathTV.DecMapInt32Float32V(v, false, d) + case *map[int32]float32: + var v2 map[int32]float32 + v2, changed = fastpathTV.DecMapInt32Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]float64: + fastpathTV.DecMapInt32Float64V(v, false, d) + case *map[int32]float64: + var v2 map[int32]float64 + v2, changed = fastpathTV.DecMapInt32Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]bool: + fastpathTV.DecMapInt32BoolV(v, false, d) + case *map[int32]bool: + var v2 map[int32]bool + v2, changed = fastpathTV.DecMapInt32BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]interface{}: + fastpathTV.DecMapInt64IntfV(v, false, d) + case *map[int64]interface{}: + var v2 map[int64]interface{} + v2, changed = fastpathTV.DecMapInt64IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]string: + fastpathTV.DecMapInt64StringV(v, false, d) + case *map[int64]string: + var v2 map[int64]string + v2, changed = fastpathTV.DecMapInt64StringV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint: + fastpathTV.DecMapInt64UintV(v, false, d) + case *map[int64]uint: + var v2 map[int64]uint + v2, changed = fastpathTV.DecMapInt64UintV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint8: + fastpathTV.DecMapInt64Uint8V(v, false, d) + case *map[int64]uint8: + var v2 map[int64]uint8 + v2, changed = fastpathTV.DecMapInt64Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint16: + fastpathTV.DecMapInt64Uint16V(v, false, d) + case *map[int64]uint16: + var v2 map[int64]uint16 + v2, changed = fastpathTV.DecMapInt64Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint32: + fastpathTV.DecMapInt64Uint32V(v, false, d) + case *map[int64]uint32: + var v2 map[int64]uint32 + v2, changed = fastpathTV.DecMapInt64Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint64: + fastpathTV.DecMapInt64Uint64V(v, false, d) + case *map[int64]uint64: + var v2 map[int64]uint64 + v2, changed = fastpathTV.DecMapInt64Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uintptr: + fastpathTV.DecMapInt64UintptrV(v, false, d) + case *map[int64]uintptr: + var v2 map[int64]uintptr + v2, changed = fastpathTV.DecMapInt64UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int: + fastpathTV.DecMapInt64IntV(v, false, d) + case *map[int64]int: + var v2 map[int64]int + v2, changed = fastpathTV.DecMapInt64IntV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int8: + fastpathTV.DecMapInt64Int8V(v, false, d) + case *map[int64]int8: + var v2 map[int64]int8 + v2, changed = fastpathTV.DecMapInt64Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int16: + fastpathTV.DecMapInt64Int16V(v, false, d) + case *map[int64]int16: + var v2 map[int64]int16 + v2, changed = fastpathTV.DecMapInt64Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int32: + fastpathTV.DecMapInt64Int32V(v, false, d) + case *map[int64]int32: + var v2 map[int64]int32 + v2, changed = fastpathTV.DecMapInt64Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int64: + fastpathTV.DecMapInt64Int64V(v, false, d) + case *map[int64]int64: + var v2 map[int64]int64 + v2, changed = fastpathTV.DecMapInt64Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]float32: + fastpathTV.DecMapInt64Float32V(v, false, d) + case *map[int64]float32: + var v2 map[int64]float32 + v2, changed = fastpathTV.DecMapInt64Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]float64: + fastpathTV.DecMapInt64Float64V(v, false, d) + case *map[int64]float64: + var v2 map[int64]float64 + v2, changed = fastpathTV.DecMapInt64Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]bool: + fastpathTV.DecMapInt64BoolV(v, false, d) + case *map[int64]bool: + var v2 map[int64]bool + v2, changed = fastpathTV.DecMapInt64BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]interface{}: + fastpathTV.DecMapBoolIntfV(v, false, d) + case *map[bool]interface{}: + var v2 map[bool]interface{} + v2, changed = fastpathTV.DecMapBoolIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]string: + fastpathTV.DecMapBoolStringV(v, false, d) + case *map[bool]string: + var v2 map[bool]string + v2, changed = fastpathTV.DecMapBoolStringV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint: + fastpathTV.DecMapBoolUintV(v, false, d) + case *map[bool]uint: + var v2 map[bool]uint + v2, changed = fastpathTV.DecMapBoolUintV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint8: + fastpathTV.DecMapBoolUint8V(v, false, d) + case *map[bool]uint8: + var v2 map[bool]uint8 + v2, changed = fastpathTV.DecMapBoolUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint16: + fastpathTV.DecMapBoolUint16V(v, false, d) + case *map[bool]uint16: + var v2 map[bool]uint16 + v2, changed = fastpathTV.DecMapBoolUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint32: + fastpathTV.DecMapBoolUint32V(v, false, d) + case *map[bool]uint32: + var v2 map[bool]uint32 + v2, changed = fastpathTV.DecMapBoolUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint64: + fastpathTV.DecMapBoolUint64V(v, false, d) + case *map[bool]uint64: + var v2 map[bool]uint64 + v2, changed = fastpathTV.DecMapBoolUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uintptr: + fastpathTV.DecMapBoolUintptrV(v, false, d) + case *map[bool]uintptr: + var v2 map[bool]uintptr + v2, changed = fastpathTV.DecMapBoolUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int: + fastpathTV.DecMapBoolIntV(v, false, d) + case *map[bool]int: + var v2 map[bool]int + v2, changed = fastpathTV.DecMapBoolIntV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int8: + fastpathTV.DecMapBoolInt8V(v, false, d) + case *map[bool]int8: + var v2 map[bool]int8 + v2, changed = fastpathTV.DecMapBoolInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int16: + fastpathTV.DecMapBoolInt16V(v, false, d) + case *map[bool]int16: + var v2 map[bool]int16 + v2, changed = fastpathTV.DecMapBoolInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int32: + fastpathTV.DecMapBoolInt32V(v, false, d) + case *map[bool]int32: + var v2 map[bool]int32 + v2, changed = fastpathTV.DecMapBoolInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int64: + fastpathTV.DecMapBoolInt64V(v, false, d) + case *map[bool]int64: + var v2 map[bool]int64 + v2, changed = fastpathTV.DecMapBoolInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]float32: + fastpathTV.DecMapBoolFloat32V(v, false, d) + case *map[bool]float32: + var v2 map[bool]float32 + v2, changed = fastpathTV.DecMapBoolFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]float64: + fastpathTV.DecMapBoolFloat64V(v, false, d) + case *map[bool]float64: + var v2 map[bool]float64 + v2, changed = fastpathTV.DecMapBoolFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]bool: + fastpathTV.DecMapBoolBoolV(v, false, d) + case *map[bool]bool: + var v2 map[bool]bool + v2, changed = fastpathTV.DecMapBoolBoolV(*v, true, d) + if changed { + *v = v2 + } + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { + switch v := iv.(type) { + + case *[]interface{}: + *v = nil + case *[]string: + *v = nil + case *[]float32: + *v = nil + case *[]float64: + *v = nil + case *[]uint: + *v = nil + case *[]uint8: + *v = nil + case *[]uint16: + *v = nil + case *[]uint32: + *v = nil + case *[]uint64: + *v = nil + case *[]uintptr: + *v = nil + case *[]int: + *v = nil + case *[]int8: + *v = nil + case *[]int16: + *v = nil + case *[]int32: + *v = nil + case *[]int64: + *v = nil + case *[]bool: + *v = nil + + case *map[interface{}]interface{}: + *v = nil + case *map[interface{}]string: + *v = nil + case *map[interface{}]uint: + *v = nil + case *map[interface{}]uint8: + *v = nil + case *map[interface{}]uint16: + *v = nil + case *map[interface{}]uint32: + *v = nil + case *map[interface{}]uint64: + *v = nil + case *map[interface{}]uintptr: + *v = nil + case *map[interface{}]int: + *v = nil + case *map[interface{}]int8: + *v = nil + case *map[interface{}]int16: + *v = nil + case *map[interface{}]int32: + *v = nil + case *map[interface{}]int64: + *v = nil + case *map[interface{}]float32: + *v = nil + case *map[interface{}]float64: + *v = nil + case *map[interface{}]bool: + *v = nil + case *map[string]interface{}: + *v = nil + case *map[string]string: + *v = nil + case *map[string]uint: + *v = nil + case *map[string]uint8: + *v = nil + case *map[string]uint16: + *v = nil + case *map[string]uint32: + *v = nil + case *map[string]uint64: + *v = nil + case *map[string]uintptr: + *v = nil + case *map[string]int: + *v = nil + case *map[string]int8: + *v = nil + case *map[string]int16: + *v = nil + case *map[string]int32: + *v = nil + case *map[string]int64: + *v = nil + case *map[string]float32: + *v = nil + case *map[string]float64: + *v = nil + case *map[string]bool: + *v = nil + case *map[float32]interface{}: + *v = nil + case *map[float32]string: + *v = nil + case *map[float32]uint: + *v = nil + case *map[float32]uint8: + *v = nil + case *map[float32]uint16: + *v = nil + case *map[float32]uint32: + *v = nil + case *map[float32]uint64: + *v = nil + case *map[float32]uintptr: + *v = nil + case *map[float32]int: + *v = nil + case *map[float32]int8: + *v = nil + case *map[float32]int16: + *v = nil + case *map[float32]int32: + *v = nil + case *map[float32]int64: + *v = nil + case *map[float32]float32: + *v = nil + case *map[float32]float64: + *v = nil + case *map[float32]bool: + *v = nil + case *map[float64]interface{}: + *v = nil + case *map[float64]string: + *v = nil + case *map[float64]uint: + *v = nil + case *map[float64]uint8: + *v = nil + case *map[float64]uint16: + *v = nil + case *map[float64]uint32: + *v = nil + case *map[float64]uint64: + *v = nil + case *map[float64]uintptr: + *v = nil + case *map[float64]int: + *v = nil + case *map[float64]int8: + *v = nil + case *map[float64]int16: + *v = nil + case *map[float64]int32: + *v = nil + case *map[float64]int64: + *v = nil + case *map[float64]float32: + *v = nil + case *map[float64]float64: + *v = nil + case *map[float64]bool: + *v = nil + case *map[uint]interface{}: + *v = nil + case *map[uint]string: + *v = nil + case *map[uint]uint: + *v = nil + case *map[uint]uint8: + *v = nil + case *map[uint]uint16: + *v = nil + case *map[uint]uint32: + *v = nil + case *map[uint]uint64: + *v = nil + case *map[uint]uintptr: + *v = nil + case *map[uint]int: + *v = nil + case *map[uint]int8: + *v = nil + case *map[uint]int16: + *v = nil + case *map[uint]int32: + *v = nil + case *map[uint]int64: + *v = nil + case *map[uint]float32: + *v = nil + case *map[uint]float64: + *v = nil + case *map[uint]bool: + *v = nil + case *map[uint8]interface{}: + *v = nil + case *map[uint8]string: + *v = nil + case *map[uint8]uint: + *v = nil + case *map[uint8]uint8: + *v = nil + case *map[uint8]uint16: + *v = nil + case *map[uint8]uint32: + *v = nil + case *map[uint8]uint64: + *v = nil + case *map[uint8]uintptr: + *v = nil + case *map[uint8]int: + *v = nil + case *map[uint8]int8: + *v = nil + case *map[uint8]int16: + *v = nil + case *map[uint8]int32: + *v = nil + case *map[uint8]int64: + *v = nil + case *map[uint8]float32: + *v = nil + case *map[uint8]float64: + *v = nil + case *map[uint8]bool: + *v = nil + case *map[uint16]interface{}: + *v = nil + case *map[uint16]string: + *v = nil + case *map[uint16]uint: + *v = nil + case *map[uint16]uint8: + *v = nil + case *map[uint16]uint16: + *v = nil + case *map[uint16]uint32: + *v = nil + case *map[uint16]uint64: + *v = nil + case *map[uint16]uintptr: + *v = nil + case *map[uint16]int: + *v = nil + case *map[uint16]int8: + *v = nil + case *map[uint16]int16: + *v = nil + case *map[uint16]int32: + *v = nil + case *map[uint16]int64: + *v = nil + case *map[uint16]float32: + *v = nil + case *map[uint16]float64: + *v = nil + case *map[uint16]bool: + *v = nil + case *map[uint32]interface{}: + *v = nil + case *map[uint32]string: + *v = nil + case *map[uint32]uint: + *v = nil + case *map[uint32]uint8: + *v = nil + case *map[uint32]uint16: + *v = nil + case *map[uint32]uint32: + *v = nil + case *map[uint32]uint64: + *v = nil + case *map[uint32]uintptr: + *v = nil + case *map[uint32]int: + *v = nil + case *map[uint32]int8: + *v = nil + case *map[uint32]int16: + *v = nil + case *map[uint32]int32: + *v = nil + case *map[uint32]int64: + *v = nil + case *map[uint32]float32: + *v = nil + case *map[uint32]float64: + *v = nil + case *map[uint32]bool: + *v = nil + case *map[uint64]interface{}: + *v = nil + case *map[uint64]string: + *v = nil + case *map[uint64]uint: + *v = nil + case *map[uint64]uint8: + *v = nil + case *map[uint64]uint16: + *v = nil + case *map[uint64]uint32: + *v = nil + case *map[uint64]uint64: + *v = nil + case *map[uint64]uintptr: + *v = nil + case *map[uint64]int: + *v = nil + case *map[uint64]int8: + *v = nil + case *map[uint64]int16: + *v = nil + case *map[uint64]int32: + *v = nil + case *map[uint64]int64: + *v = nil + case *map[uint64]float32: + *v = nil + case *map[uint64]float64: + *v = nil + case *map[uint64]bool: + *v = nil + case *map[uintptr]interface{}: + *v = nil + case *map[uintptr]string: + *v = nil + case *map[uintptr]uint: + *v = nil + case *map[uintptr]uint8: + *v = nil + case *map[uintptr]uint16: + *v = nil + case *map[uintptr]uint32: + *v = nil + case *map[uintptr]uint64: + *v = nil + case *map[uintptr]uintptr: + *v = nil + case *map[uintptr]int: + *v = nil + case *map[uintptr]int8: + *v = nil + case *map[uintptr]int16: + *v = nil + case *map[uintptr]int32: + *v = nil + case *map[uintptr]int64: + *v = nil + case *map[uintptr]float32: + *v = nil + case *map[uintptr]float64: + *v = nil + case *map[uintptr]bool: + *v = nil + case *map[int]interface{}: + *v = nil + case *map[int]string: + *v = nil + case *map[int]uint: + *v = nil + case *map[int]uint8: + *v = nil + case *map[int]uint16: + *v = nil + case *map[int]uint32: + *v = nil + case *map[int]uint64: + *v = nil + case *map[int]uintptr: + *v = nil + case *map[int]int: + *v = nil + case *map[int]int8: + *v = nil + case *map[int]int16: + *v = nil + case *map[int]int32: + *v = nil + case *map[int]int64: + *v = nil + case *map[int]float32: + *v = nil + case *map[int]float64: + *v = nil + case *map[int]bool: + *v = nil + case *map[int8]interface{}: + *v = nil + case *map[int8]string: + *v = nil + case *map[int8]uint: + *v = nil + case *map[int8]uint8: + *v = nil + case *map[int8]uint16: + *v = nil + case *map[int8]uint32: + *v = nil + case *map[int8]uint64: + *v = nil + case *map[int8]uintptr: + *v = nil + case *map[int8]int: + *v = nil + case *map[int8]int8: + *v = nil + case *map[int8]int16: + *v = nil + case *map[int8]int32: + *v = nil + case *map[int8]int64: + *v = nil + case *map[int8]float32: + *v = nil + case *map[int8]float64: + *v = nil + case *map[int8]bool: + *v = nil + case *map[int16]interface{}: + *v = nil + case *map[int16]string: + *v = nil + case *map[int16]uint: + *v = nil + case *map[int16]uint8: + *v = nil + case *map[int16]uint16: + *v = nil + case *map[int16]uint32: + *v = nil + case *map[int16]uint64: + *v = nil + case *map[int16]uintptr: + *v = nil + case *map[int16]int: + *v = nil + case *map[int16]int8: + *v = nil + case *map[int16]int16: + *v = nil + case *map[int16]int32: + *v = nil + case *map[int16]int64: + *v = nil + case *map[int16]float32: + *v = nil + case *map[int16]float64: + *v = nil + case *map[int16]bool: + *v = nil + case *map[int32]interface{}: + *v = nil + case *map[int32]string: + *v = nil + case *map[int32]uint: + *v = nil + case *map[int32]uint8: + *v = nil + case *map[int32]uint16: + *v = nil + case *map[int32]uint32: + *v = nil + case *map[int32]uint64: + *v = nil + case *map[int32]uintptr: + *v = nil + case *map[int32]int: + *v = nil + case *map[int32]int8: + *v = nil + case *map[int32]int16: + *v = nil + case *map[int32]int32: + *v = nil + case *map[int32]int64: + *v = nil + case *map[int32]float32: + *v = nil + case *map[int32]float64: + *v = nil + case *map[int32]bool: + *v = nil + case *map[int64]interface{}: + *v = nil + case *map[int64]string: + *v = nil + case *map[int64]uint: + *v = nil + case *map[int64]uint8: + *v = nil + case *map[int64]uint16: + *v = nil + case *map[int64]uint32: + *v = nil + case *map[int64]uint64: + *v = nil + case *map[int64]uintptr: + *v = nil + case *map[int64]int: + *v = nil + case *map[int64]int8: + *v = nil + case *map[int64]int16: + *v = nil + case *map[int64]int32: + *v = nil + case *map[int64]int64: + *v = nil + case *map[int64]float32: + *v = nil + case *map[int64]float64: + *v = nil + case *map[int64]bool: + *v = nil + case *map[bool]interface{}: + *v = nil + case *map[bool]string: + *v = nil + case *map[bool]uint: + *v = nil + case *map[bool]uint8: + *v = nil + case *map[bool]uint16: + *v = nil + case *map[bool]uint32: + *v = nil + case *map[bool]uint64: + *v = nil + case *map[bool]uintptr: + *v = nil + case *map[bool]int: + *v = nil + case *map[bool]int8: + *v = nil + case *map[bool]int16: + *v = nil + case *map[bool]int32: + *v = nil + case *map[bool]int64: + *v = nil + case *map[bool]float32: + *v = nil + case *map[bool]float64: + *v = nil + case *map[bool]bool: + *v = nil + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// -- -- fast path functions + +func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]interface{}) + v, changed := fastpathTV.DecSliceIntfV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]interface{}) + v2, changed := fastpathTV.DecSliceIntfV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) { + v, changed := f.DecSliceIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ []interface{}, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []interface{}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]interface{}, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + } else { + xlen = 8 + } + v = make([]interface{}, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, nil) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = nil + } else { + d.decode(&v[uint(j)]) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]interface{}, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]string) + v, changed := fastpathTV.DecSliceStringV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]string) + v2, changed := fastpathTV.DecSliceStringV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) { + v, changed := f.DecSliceStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ []string, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []string{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]string, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + } else { + xlen = 8 + } + v = make([]string, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, "") + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = "" + } else { + v[uint(j)] = dd.DecodeString() + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]string, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]float32) + v, changed := fastpathTV.DecSliceFloat32V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]float32) + v2, changed := fastpathTV.DecSliceFloat32V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) { + v, changed := f.DecSliceFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ []float32, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]float32, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]float32, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = float32(chkOvf.Float32V(dd.DecodeFloat64())) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]float32, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]float64) + v, changed := fastpathTV.DecSliceFloat64V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]float64) + v2, changed := fastpathTV.DecSliceFloat64V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) { + v, changed := f.DecSliceFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ []float64, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]float64, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]float64, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = dd.DecodeFloat64() + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]float64, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUintR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint) + v, changed := fastpathTV.DecSliceUintV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint) + v2, changed := fastpathTV.DecSliceUintV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUintX(vp *[]uint, d *Decoder) { + v, changed := f.DecSliceUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]uint, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uint, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]uint, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint8R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint8) + v, changed := fastpathTV.DecSliceUint8V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint8) + v2, changed := fastpathTV.DecSliceUint8V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUint8X(vp *[]uint8, d *Decoder) { + v, changed := f.DecSliceUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]uint8, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]uint8, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]uint8, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint16R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint16) + v, changed := fastpathTV.DecSliceUint16V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint16) + v2, changed := fastpathTV.DecSliceUint16V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUint16X(vp *[]uint16, d *Decoder) { + v, changed := f.DecSliceUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ []uint16, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]uint16, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + } else { + xlen = 8 + } + v = make([]uint16, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]uint16, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint32) + v, changed := fastpathTV.DecSliceUint32V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint32) + v2, changed := fastpathTV.DecSliceUint32V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUint32X(vp *[]uint32, d *Decoder) { + v, changed := f.DecSliceUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ []uint32, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]uint32, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]uint32, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]uint32, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint64) + v, changed := fastpathTV.DecSliceUint64V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint64) + v2, changed := fastpathTV.DecSliceUint64V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) { + v, changed := f.DecSliceUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ []uint64, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]uint64, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uint64, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = dd.DecodeUint64() + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]uint64, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUintptrR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uintptr) + v, changed := fastpathTV.DecSliceUintptrV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uintptr) + v2, changed := fastpathTV.DecSliceUintptrV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, d *Decoder) { + v, changed := f.DecSliceUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ []uintptr, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uintptr{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]uintptr, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uintptr, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]uintptr, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int) + v, changed := fastpathTV.DecSliceIntV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int) + v2, changed := fastpathTV.DecSliceIntV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) { + v, changed := f.DecSliceIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]int, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]int, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]int, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt8R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int8) + v, changed := fastpathTV.DecSliceInt8V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int8) + v2, changed := fastpathTV.DecSliceInt8V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceInt8X(vp *[]int8, d *Decoder) { + v, changed := f.DecSliceInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]int8, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]int8, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]int8, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt16R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int16) + v, changed := fastpathTV.DecSliceInt16V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int16) + v2, changed := fastpathTV.DecSliceInt16V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceInt16X(vp *[]int16, d *Decoder) { + v, changed := f.DecSliceInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []int16, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]int16, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + } else { + xlen = 8 + } + v = make([]int16, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]int16, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int32) + v, changed := fastpathTV.DecSliceInt32V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int32) + v2, changed := fastpathTV.DecSliceInt32V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) { + v, changed := f.DecSliceInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []int32, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]int32, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]int32, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]int32, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int64) + v, changed := fastpathTV.DecSliceInt64V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int64) + v2, changed := fastpathTV.DecSliceInt64V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) { + v, changed := f.DecSliceInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []int64, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]int64, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]int64, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = 0 + } else { + v[uint(j)] = dd.DecodeInt64() + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]int64, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]bool) + v, changed := fastpathTV.DecSliceBoolV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]bool) + v2, changed := fastpathTV.DecSliceBoolV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) { + v, changed := f.DecSliceBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []bool{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]bool, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]bool, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, false) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = false + } else { + v[uint(j)] = dd.DecodeBool() + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]bool, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]interface{}) + v, changed := fastpathTV.DecMapIntfIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), false, d) + } +} +func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, d *Decoder) { + v, changed := f.DecMapIntfIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool, + d *Decoder) (_ map[interface{}]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk interface{} + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]string) + v, changed := fastpathTV.DecMapIntfStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfStringV(rv2i(rv).(map[interface{}]string), false, d) + } +} +func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, d *Decoder) { + v, changed := f.DecMapIntfStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool, + d *Decoder) (_ map[interface{}]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint) + v, changed := fastpathTV.DecMapIntfUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUintV(rv2i(rv).(map[interface{}]uint), false, d) + } +} +func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, d *Decoder) { + v, changed := f.DecMapIntfUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool, + d *Decoder) (_ map[interface{}]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint8) + v, changed := fastpathTV.DecMapIntfUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), false, d) + } +} +func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, d *Decoder) { + v, changed := f.DecMapIntfUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool, + d *Decoder) (_ map[interface{}]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint16) + v, changed := fastpathTV.DecMapIntfUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), false, d) + } +} +func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, d *Decoder) { + v, changed := f.DecMapIntfUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool, + d *Decoder) (_ map[interface{}]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint32) + v, changed := fastpathTV.DecMapIntfUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), false, d) + } +} +func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, d *Decoder) { + v, changed := f.DecMapIntfUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool, + d *Decoder) (_ map[interface{}]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint64) + v, changed := fastpathTV.DecMapIntfUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), false, d) + } +} +func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, d *Decoder) { + v, changed := f.DecMapIntfUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool, + d *Decoder) (_ map[interface{}]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uintptr) + v, changed := fastpathTV.DecMapIntfUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), false, d) + } +} +func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, d *Decoder) { + v, changed := f.DecMapIntfUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool, + d *Decoder) (_ map[interface{}]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int) + v, changed := fastpathTV.DecMapIntfIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfIntV(rv2i(rv).(map[interface{}]int), false, d) + } +} +func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, d *Decoder) { + v, changed := f.DecMapIntfIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool, + d *Decoder) (_ map[interface{}]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int8) + v, changed := fastpathTV.DecMapIntfInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfInt8V(rv2i(rv).(map[interface{}]int8), false, d) + } +} +func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, d *Decoder) { + v, changed := f.DecMapIntfInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool, + d *Decoder) (_ map[interface{}]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int16) + v, changed := fastpathTV.DecMapIntfInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfInt16V(rv2i(rv).(map[interface{}]int16), false, d) + } +} +func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, d *Decoder) { + v, changed := f.DecMapIntfInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool, + d *Decoder) (_ map[interface{}]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int32) + v, changed := fastpathTV.DecMapIntfInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfInt32V(rv2i(rv).(map[interface{}]int32), false, d) + } +} +func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, d *Decoder) { + v, changed := f.DecMapIntfInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool, + d *Decoder) (_ map[interface{}]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int64) + v, changed := fastpathTV.DecMapIntfInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfInt64V(rv2i(rv).(map[interface{}]int64), false, d) + } +} +func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, d *Decoder) { + v, changed := f.DecMapIntfInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool, + d *Decoder) (_ map[interface{}]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]float32) + v, changed := fastpathTV.DecMapIntfFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), false, d) + } +} +func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, d *Decoder) { + v, changed := f.DecMapIntfFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool, + d *Decoder) (_ map[interface{}]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]float64) + v, changed := fastpathTV.DecMapIntfFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), false, d) + } +} +func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, d *Decoder) { + v, changed := f.DecMapIntfFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool, + d *Decoder) (_ map[interface{}]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]bool) + v, changed := fastpathTV.DecMapIntfBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfBoolV(rv2i(rv).(map[interface{}]bool), false, d) + } +} +func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, d *Decoder) { + v, changed := f.DecMapIntfBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool, + d *Decoder) (_ map[interface{}]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk interface{} + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]interface{}) + v, changed := fastpathTV.DecMapStringIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringIntfV(rv2i(rv).(map[string]interface{}), false, d) + } +} +func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) { + v, changed := f.DecMapStringIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool, + d *Decoder) (_ map[string]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk string + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]string) + v, changed := fastpathTV.DecMapStringStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringStringV(rv2i(rv).(map[string]string), false, d) + } +} +func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) { + v, changed := f.DecMapStringStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool, + d *Decoder) (_ map[string]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint) + v, changed := fastpathTV.DecMapStringUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUintV(rv2i(rv).(map[string]uint), false, d) + } +} +func (f fastpathT) DecMapStringUintX(vp *map[string]uint, d *Decoder) { + v, changed := f.DecMapStringUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool, + d *Decoder) (_ map[string]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint8) + v, changed := fastpathTV.DecMapStringUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUint8V(rv2i(rv).(map[string]uint8), false, d) + } +} +func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) { + v, changed := f.DecMapStringUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool, + d *Decoder) (_ map[string]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint16) + v, changed := fastpathTV.DecMapStringUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUint16V(rv2i(rv).(map[string]uint16), false, d) + } +} +func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, d *Decoder) { + v, changed := f.DecMapStringUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool, + d *Decoder) (_ map[string]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint32) + v, changed := fastpathTV.DecMapStringUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUint32V(rv2i(rv).(map[string]uint32), false, d) + } +} +func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, d *Decoder) { + v, changed := f.DecMapStringUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool, + d *Decoder) (_ map[string]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint64) + v, changed := fastpathTV.DecMapStringUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUint64V(rv2i(rv).(map[string]uint64), false, d) + } +} +func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) { + v, changed := f.DecMapStringUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool, + d *Decoder) (_ map[string]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uintptr) + v, changed := fastpathTV.DecMapStringUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUintptrV(rv2i(rv).(map[string]uintptr), false, d) + } +} +func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, d *Decoder) { + v, changed := f.DecMapStringUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool, + d *Decoder) (_ map[string]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int) + v, changed := fastpathTV.DecMapStringIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringIntV(rv2i(rv).(map[string]int), false, d) + } +} +func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) { + v, changed := f.DecMapStringIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool, + d *Decoder) (_ map[string]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int8) + v, changed := fastpathTV.DecMapStringInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringInt8V(rv2i(rv).(map[string]int8), false, d) + } +} +func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, d *Decoder) { + v, changed := f.DecMapStringInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool, + d *Decoder) (_ map[string]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int16) + v, changed := fastpathTV.DecMapStringInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringInt16V(rv2i(rv).(map[string]int16), false, d) + } +} +func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, d *Decoder) { + v, changed := f.DecMapStringInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool, + d *Decoder) (_ map[string]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int32) + v, changed := fastpathTV.DecMapStringInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringInt32V(rv2i(rv).(map[string]int32), false, d) + } +} +func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) { + v, changed := f.DecMapStringInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool, + d *Decoder) (_ map[string]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int64) + v, changed := fastpathTV.DecMapStringInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringInt64V(rv2i(rv).(map[string]int64), false, d) + } +} +func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, d *Decoder) { + v, changed := f.DecMapStringInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool, + d *Decoder) (_ map[string]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]float32) + v, changed := fastpathTV.DecMapStringFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringFloat32V(rv2i(rv).(map[string]float32), false, d) + } +} +func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, d *Decoder) { + v, changed := f.DecMapStringFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool, + d *Decoder) (_ map[string]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]float64) + v, changed := fastpathTV.DecMapStringFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringFloat64V(rv2i(rv).(map[string]float64), false, d) + } +} +func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) { + v, changed := f.DecMapStringFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool, + d *Decoder) (_ map[string]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]bool) + v, changed := fastpathTV.DecMapStringBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringBoolV(rv2i(rv).(map[string]bool), false, d) + } +} +func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) { + v, changed := f.DecMapStringBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool, + d *Decoder) (_ map[string]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk string + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]interface{}) + v, changed := fastpathTV.DecMapFloat32IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), false, d) + } +} +func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, d *Decoder) { + v, changed := f.DecMapFloat32IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool, + d *Decoder) (_ map[float32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk float32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]string) + v, changed := fastpathTV.DecMapFloat32StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32StringV(rv2i(rv).(map[float32]string), false, d) + } +} +func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, d *Decoder) { + v, changed := f.DecMapFloat32StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool, + d *Decoder) (_ map[float32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint) + v, changed := fastpathTV.DecMapFloat32UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32UintV(rv2i(rv).(map[float32]uint), false, d) + } +} +func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, d *Decoder) { + v, changed := f.DecMapFloat32UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool, + d *Decoder) (_ map[float32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint8) + v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), false, d) + } +} +func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, d *Decoder) { + v, changed := f.DecMapFloat32Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool, + d *Decoder) (_ map[float32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint16) + v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), false, d) + } +} +func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, d *Decoder) { + v, changed := f.DecMapFloat32Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool, + d *Decoder) (_ map[float32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint32) + v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), false, d) + } +} +func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, d *Decoder) { + v, changed := f.DecMapFloat32Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool, + d *Decoder) (_ map[float32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint64) + v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), false, d) + } +} +func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, d *Decoder) { + v, changed := f.DecMapFloat32Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool, + d *Decoder) (_ map[float32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uintptr) + v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), false, d) + } +} +func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, d *Decoder) { + v, changed := f.DecMapFloat32UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool, + d *Decoder) (_ map[float32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int) + v, changed := fastpathTV.DecMapFloat32IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32IntV(rv2i(rv).(map[float32]int), false, d) + } +} +func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, d *Decoder) { + v, changed := f.DecMapFloat32IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool, + d *Decoder) (_ map[float32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int8) + v, changed := fastpathTV.DecMapFloat32Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Int8V(rv2i(rv).(map[float32]int8), false, d) + } +} +func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, d *Decoder) { + v, changed := f.DecMapFloat32Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool, + d *Decoder) (_ map[float32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int16) + v, changed := fastpathTV.DecMapFloat32Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Int16V(rv2i(rv).(map[float32]int16), false, d) + } +} +func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, d *Decoder) { + v, changed := f.DecMapFloat32Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool, + d *Decoder) (_ map[float32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int32) + v, changed := fastpathTV.DecMapFloat32Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Int32V(rv2i(rv).(map[float32]int32), false, d) + } +} +func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, d *Decoder) { + v, changed := f.DecMapFloat32Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool, + d *Decoder) (_ map[float32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int64) + v, changed := fastpathTV.DecMapFloat32Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Int64V(rv2i(rv).(map[float32]int64), false, d) + } +} +func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, d *Decoder) { + v, changed := f.DecMapFloat32Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool, + d *Decoder) (_ map[float32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]float32) + v, changed := fastpathTV.DecMapFloat32Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Float32V(rv2i(rv).(map[float32]float32), false, d) + } +} +func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, d *Decoder) { + v, changed := f.DecMapFloat32Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool, + d *Decoder) (_ map[float32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]float64) + v, changed := fastpathTV.DecMapFloat32Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Float64V(rv2i(rv).(map[float32]float64), false, d) + } +} +func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, d *Decoder) { + v, changed := f.DecMapFloat32Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool, + d *Decoder) (_ map[float32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]bool) + v, changed := fastpathTV.DecMapFloat32BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32BoolV(rv2i(rv).(map[float32]bool), false, d) + } +} +func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, d *Decoder) { + v, changed := f.DecMapFloat32BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool, + d *Decoder) (_ map[float32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]interface{}) + v, changed := fastpathTV.DecMapFloat64IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), false, d) + } +} +func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, d *Decoder) { + v, changed := f.DecMapFloat64IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool, + d *Decoder) (_ map[float64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk float64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]string) + v, changed := fastpathTV.DecMapFloat64StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64StringV(rv2i(rv).(map[float64]string), false, d) + } +} +func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, d *Decoder) { + v, changed := f.DecMapFloat64StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool, + d *Decoder) (_ map[float64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint) + v, changed := fastpathTV.DecMapFloat64UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64UintV(rv2i(rv).(map[float64]uint), false, d) + } +} +func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, d *Decoder) { + v, changed := f.DecMapFloat64UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool, + d *Decoder) (_ map[float64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint8) + v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), false, d) + } +} +func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, d *Decoder) { + v, changed := f.DecMapFloat64Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool, + d *Decoder) (_ map[float64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint16) + v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), false, d) + } +} +func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, d *Decoder) { + v, changed := f.DecMapFloat64Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool, + d *Decoder) (_ map[float64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint32) + v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), false, d) + } +} +func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, d *Decoder) { + v, changed := f.DecMapFloat64Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool, + d *Decoder) (_ map[float64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint64) + v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), false, d) + } +} +func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, d *Decoder) { + v, changed := f.DecMapFloat64Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool, + d *Decoder) (_ map[float64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uintptr) + v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), false, d) + } +} +func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, d *Decoder) { + v, changed := f.DecMapFloat64UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool, + d *Decoder) (_ map[float64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int) + v, changed := fastpathTV.DecMapFloat64IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64IntV(rv2i(rv).(map[float64]int), false, d) + } +} +func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, d *Decoder) { + v, changed := f.DecMapFloat64IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool, + d *Decoder) (_ map[float64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int8) + v, changed := fastpathTV.DecMapFloat64Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Int8V(rv2i(rv).(map[float64]int8), false, d) + } +} +func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, d *Decoder) { + v, changed := f.DecMapFloat64Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool, + d *Decoder) (_ map[float64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int16) + v, changed := fastpathTV.DecMapFloat64Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Int16V(rv2i(rv).(map[float64]int16), false, d) + } +} +func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, d *Decoder) { + v, changed := f.DecMapFloat64Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool, + d *Decoder) (_ map[float64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int32) + v, changed := fastpathTV.DecMapFloat64Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Int32V(rv2i(rv).(map[float64]int32), false, d) + } +} +func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, d *Decoder) { + v, changed := f.DecMapFloat64Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool, + d *Decoder) (_ map[float64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int64) + v, changed := fastpathTV.DecMapFloat64Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Int64V(rv2i(rv).(map[float64]int64), false, d) + } +} +func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, d *Decoder) { + v, changed := f.DecMapFloat64Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool, + d *Decoder) (_ map[float64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]float32) + v, changed := fastpathTV.DecMapFloat64Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Float32V(rv2i(rv).(map[float64]float32), false, d) + } +} +func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, d *Decoder) { + v, changed := f.DecMapFloat64Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool, + d *Decoder) (_ map[float64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]float64) + v, changed := fastpathTV.DecMapFloat64Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Float64V(rv2i(rv).(map[float64]float64), false, d) + } +} +func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, d *Decoder) { + v, changed := f.DecMapFloat64Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool, + d *Decoder) (_ map[float64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]bool) + v, changed := fastpathTV.DecMapFloat64BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64BoolV(rv2i(rv).(map[float64]bool), false, d) + } +} +func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, d *Decoder) { + v, changed := f.DecMapFloat64BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool, + d *Decoder) (_ map[float64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk float64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]interface{}) + v, changed := fastpathTV.DecMapUintIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintIntfV(rv2i(rv).(map[uint]interface{}), false, d) + } +} +func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, d *Decoder) { + v, changed := f.DecMapUintIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool, + d *Decoder) (_ map[uint]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]string) + v, changed := fastpathTV.DecMapUintStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintStringV(rv2i(rv).(map[uint]string), false, d) + } +} +func (f fastpathT) DecMapUintStringX(vp *map[uint]string, d *Decoder) { + v, changed := f.DecMapUintStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool, + d *Decoder) (_ map[uint]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint) + v, changed := fastpathTV.DecMapUintUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUintV(rv2i(rv).(map[uint]uint), false, d) + } +} +func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, d *Decoder) { + v, changed := f.DecMapUintUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool, + d *Decoder) (_ map[uint]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint8) + v, changed := fastpathTV.DecMapUintUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUint8V(rv2i(rv).(map[uint]uint8), false, d) + } +} +func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, d *Decoder) { + v, changed := f.DecMapUintUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool, + d *Decoder) (_ map[uint]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint16) + v, changed := fastpathTV.DecMapUintUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUint16V(rv2i(rv).(map[uint]uint16), false, d) + } +} +func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, d *Decoder) { + v, changed := f.DecMapUintUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool, + d *Decoder) (_ map[uint]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint32) + v, changed := fastpathTV.DecMapUintUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUint32V(rv2i(rv).(map[uint]uint32), false, d) + } +} +func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, d *Decoder) { + v, changed := f.DecMapUintUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool, + d *Decoder) (_ map[uint]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint64) + v, changed := fastpathTV.DecMapUintUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUint64V(rv2i(rv).(map[uint]uint64), false, d) + } +} +func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, d *Decoder) { + v, changed := f.DecMapUintUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool, + d *Decoder) (_ map[uint]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uintptr) + v, changed := fastpathTV.DecMapUintUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUintptrV(rv2i(rv).(map[uint]uintptr), false, d) + } +} +func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, d *Decoder) { + v, changed := f.DecMapUintUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool, + d *Decoder) (_ map[uint]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int) + v, changed := fastpathTV.DecMapUintIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintIntV(rv2i(rv).(map[uint]int), false, d) + } +} +func (f fastpathT) DecMapUintIntX(vp *map[uint]int, d *Decoder) { + v, changed := f.DecMapUintIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool, + d *Decoder) (_ map[uint]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int8) + v, changed := fastpathTV.DecMapUintInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintInt8V(rv2i(rv).(map[uint]int8), false, d) + } +} +func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, d *Decoder) { + v, changed := f.DecMapUintInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool, + d *Decoder) (_ map[uint]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int16) + v, changed := fastpathTV.DecMapUintInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintInt16V(rv2i(rv).(map[uint]int16), false, d) + } +} +func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, d *Decoder) { + v, changed := f.DecMapUintInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool, + d *Decoder) (_ map[uint]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int32) + v, changed := fastpathTV.DecMapUintInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintInt32V(rv2i(rv).(map[uint]int32), false, d) + } +} +func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, d *Decoder) { + v, changed := f.DecMapUintInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool, + d *Decoder) (_ map[uint]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int64) + v, changed := fastpathTV.DecMapUintInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintInt64V(rv2i(rv).(map[uint]int64), false, d) + } +} +func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, d *Decoder) { + v, changed := f.DecMapUintInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool, + d *Decoder) (_ map[uint]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]float32) + v, changed := fastpathTV.DecMapUintFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintFloat32V(rv2i(rv).(map[uint]float32), false, d) + } +} +func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, d *Decoder) { + v, changed := f.DecMapUintFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool, + d *Decoder) (_ map[uint]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]float64) + v, changed := fastpathTV.DecMapUintFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintFloat64V(rv2i(rv).(map[uint]float64), false, d) + } +} +func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, d *Decoder) { + v, changed := f.DecMapUintFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool, + d *Decoder) (_ map[uint]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]bool) + v, changed := fastpathTV.DecMapUintBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintBoolV(rv2i(rv).(map[uint]bool), false, d) + } +} +func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, d *Decoder) { + v, changed := f.DecMapUintBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool, + d *Decoder) (_ map[uint]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]interface{}) + v, changed := fastpathTV.DecMapUint8IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), false, d) + } +} +func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) { + v, changed := f.DecMapUint8IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool, + d *Decoder) (_ map[uint8]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint8 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]string) + v, changed := fastpathTV.DecMapUint8StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8StringV(rv2i(rv).(map[uint8]string), false, d) + } +} +func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) { + v, changed := f.DecMapUint8StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool, + d *Decoder) (_ map[uint8]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint) + v, changed := fastpathTV.DecMapUint8UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8UintV(rv2i(rv).(map[uint8]uint), false, d) + } +} +func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, d *Decoder) { + v, changed := f.DecMapUint8UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool, + d *Decoder) (_ map[uint8]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint8) + v, changed := fastpathTV.DecMapUint8Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), false, d) + } +} +func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) { + v, changed := f.DecMapUint8Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool, + d *Decoder) (_ map[uint8]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint16) + v, changed := fastpathTV.DecMapUint8Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), false, d) + } +} +func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, d *Decoder) { + v, changed := f.DecMapUint8Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool, + d *Decoder) (_ map[uint8]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint32) + v, changed := fastpathTV.DecMapUint8Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), false, d) + } +} +func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, d *Decoder) { + v, changed := f.DecMapUint8Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool, + d *Decoder) (_ map[uint8]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint64) + v, changed := fastpathTV.DecMapUint8Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), false, d) + } +} +func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) { + v, changed := f.DecMapUint8Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool, + d *Decoder) (_ map[uint8]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uintptr) + v, changed := fastpathTV.DecMapUint8UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), false, d) + } +} +func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, d *Decoder) { + v, changed := f.DecMapUint8UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool, + d *Decoder) (_ map[uint8]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int) + v, changed := fastpathTV.DecMapUint8IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8IntV(rv2i(rv).(map[uint8]int), false, d) + } +} +func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) { + v, changed := f.DecMapUint8IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool, + d *Decoder) (_ map[uint8]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int8) + v, changed := fastpathTV.DecMapUint8Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Int8V(rv2i(rv).(map[uint8]int8), false, d) + } +} +func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, d *Decoder) { + v, changed := f.DecMapUint8Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool, + d *Decoder) (_ map[uint8]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int16) + v, changed := fastpathTV.DecMapUint8Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Int16V(rv2i(rv).(map[uint8]int16), false, d) + } +} +func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, d *Decoder) { + v, changed := f.DecMapUint8Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool, + d *Decoder) (_ map[uint8]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int32) + v, changed := fastpathTV.DecMapUint8Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Int32V(rv2i(rv).(map[uint8]int32), false, d) + } +} +func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) { + v, changed := f.DecMapUint8Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool, + d *Decoder) (_ map[uint8]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int64) + v, changed := fastpathTV.DecMapUint8Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Int64V(rv2i(rv).(map[uint8]int64), false, d) + } +} +func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, d *Decoder) { + v, changed := f.DecMapUint8Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool, + d *Decoder) (_ map[uint8]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]float32) + v, changed := fastpathTV.DecMapUint8Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Float32V(rv2i(rv).(map[uint8]float32), false, d) + } +} +func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, d *Decoder) { + v, changed := f.DecMapUint8Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool, + d *Decoder) (_ map[uint8]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]float64) + v, changed := fastpathTV.DecMapUint8Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Float64V(rv2i(rv).(map[uint8]float64), false, d) + } +} +func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) { + v, changed := f.DecMapUint8Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool, + d *Decoder) (_ map[uint8]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]bool) + v, changed := fastpathTV.DecMapUint8BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8BoolV(rv2i(rv).(map[uint8]bool), false, d) + } +} +func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) { + v, changed := f.DecMapUint8BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool, + d *Decoder) (_ map[uint8]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint8 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]interface{}) + v, changed := fastpathTV.DecMapUint16IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), false, d) + } +} +func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, d *Decoder) { + v, changed := f.DecMapUint16IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool, + d *Decoder) (_ map[uint16]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint16 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]string) + v, changed := fastpathTV.DecMapUint16StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16StringV(rv2i(rv).(map[uint16]string), false, d) + } +} +func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, d *Decoder) { + v, changed := f.DecMapUint16StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool, + d *Decoder) (_ map[uint16]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint) + v, changed := fastpathTV.DecMapUint16UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16UintV(rv2i(rv).(map[uint16]uint), false, d) + } +} +func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, d *Decoder) { + v, changed := f.DecMapUint16UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool, + d *Decoder) (_ map[uint16]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint8) + v, changed := fastpathTV.DecMapUint16Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), false, d) + } +} +func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, d *Decoder) { + v, changed := f.DecMapUint16Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool, + d *Decoder) (_ map[uint16]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint16) + v, changed := fastpathTV.DecMapUint16Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), false, d) + } +} +func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, d *Decoder) { + v, changed := f.DecMapUint16Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool, + d *Decoder) (_ map[uint16]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint32) + v, changed := fastpathTV.DecMapUint16Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), false, d) + } +} +func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, d *Decoder) { + v, changed := f.DecMapUint16Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool, + d *Decoder) (_ map[uint16]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint64) + v, changed := fastpathTV.DecMapUint16Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), false, d) + } +} +func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, d *Decoder) { + v, changed := f.DecMapUint16Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool, + d *Decoder) (_ map[uint16]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uintptr) + v, changed := fastpathTV.DecMapUint16UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), false, d) + } +} +func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, d *Decoder) { + v, changed := f.DecMapUint16UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool, + d *Decoder) (_ map[uint16]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int) + v, changed := fastpathTV.DecMapUint16IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16IntV(rv2i(rv).(map[uint16]int), false, d) + } +} +func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, d *Decoder) { + v, changed := f.DecMapUint16IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool, + d *Decoder) (_ map[uint16]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int8) + v, changed := fastpathTV.DecMapUint16Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Int8V(rv2i(rv).(map[uint16]int8), false, d) + } +} +func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, d *Decoder) { + v, changed := f.DecMapUint16Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool, + d *Decoder) (_ map[uint16]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int16) + v, changed := fastpathTV.DecMapUint16Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Int16V(rv2i(rv).(map[uint16]int16), false, d) + } +} +func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, d *Decoder) { + v, changed := f.DecMapUint16Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool, + d *Decoder) (_ map[uint16]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int32) + v, changed := fastpathTV.DecMapUint16Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Int32V(rv2i(rv).(map[uint16]int32), false, d) + } +} +func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, d *Decoder) { + v, changed := f.DecMapUint16Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool, + d *Decoder) (_ map[uint16]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int64) + v, changed := fastpathTV.DecMapUint16Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Int64V(rv2i(rv).(map[uint16]int64), false, d) + } +} +func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, d *Decoder) { + v, changed := f.DecMapUint16Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool, + d *Decoder) (_ map[uint16]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]float32) + v, changed := fastpathTV.DecMapUint16Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Float32V(rv2i(rv).(map[uint16]float32), false, d) + } +} +func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, d *Decoder) { + v, changed := f.DecMapUint16Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool, + d *Decoder) (_ map[uint16]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]float64) + v, changed := fastpathTV.DecMapUint16Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Float64V(rv2i(rv).(map[uint16]float64), false, d) + } +} +func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, d *Decoder) { + v, changed := f.DecMapUint16Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool, + d *Decoder) (_ map[uint16]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]bool) + v, changed := fastpathTV.DecMapUint16BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16BoolV(rv2i(rv).(map[uint16]bool), false, d) + } +} +func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, d *Decoder) { + v, changed := f.DecMapUint16BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool, + d *Decoder) (_ map[uint16]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint16 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]interface{}) + v, changed := fastpathTV.DecMapUint32IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), false, d) + } +} +func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, d *Decoder) { + v, changed := f.DecMapUint32IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool, + d *Decoder) (_ map[uint32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]string) + v, changed := fastpathTV.DecMapUint32StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32StringV(rv2i(rv).(map[uint32]string), false, d) + } +} +func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, d *Decoder) { + v, changed := f.DecMapUint32StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool, + d *Decoder) (_ map[uint32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint) + v, changed := fastpathTV.DecMapUint32UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32UintV(rv2i(rv).(map[uint32]uint), false, d) + } +} +func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, d *Decoder) { + v, changed := f.DecMapUint32UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool, + d *Decoder) (_ map[uint32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint8) + v, changed := fastpathTV.DecMapUint32Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), false, d) + } +} +func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, d *Decoder) { + v, changed := f.DecMapUint32Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool, + d *Decoder) (_ map[uint32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint16) + v, changed := fastpathTV.DecMapUint32Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), false, d) + } +} +func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, d *Decoder) { + v, changed := f.DecMapUint32Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool, + d *Decoder) (_ map[uint32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint32) + v, changed := fastpathTV.DecMapUint32Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), false, d) + } +} +func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, d *Decoder) { + v, changed := f.DecMapUint32Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool, + d *Decoder) (_ map[uint32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint64) + v, changed := fastpathTV.DecMapUint32Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), false, d) + } +} +func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, d *Decoder) { + v, changed := f.DecMapUint32Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool, + d *Decoder) (_ map[uint32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uintptr) + v, changed := fastpathTV.DecMapUint32UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), false, d) + } +} +func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, d *Decoder) { + v, changed := f.DecMapUint32UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool, + d *Decoder) (_ map[uint32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int) + v, changed := fastpathTV.DecMapUint32IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32IntV(rv2i(rv).(map[uint32]int), false, d) + } +} +func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, d *Decoder) { + v, changed := f.DecMapUint32IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool, + d *Decoder) (_ map[uint32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int8) + v, changed := fastpathTV.DecMapUint32Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Int8V(rv2i(rv).(map[uint32]int8), false, d) + } +} +func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, d *Decoder) { + v, changed := f.DecMapUint32Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool, + d *Decoder) (_ map[uint32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int16) + v, changed := fastpathTV.DecMapUint32Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Int16V(rv2i(rv).(map[uint32]int16), false, d) + } +} +func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, d *Decoder) { + v, changed := f.DecMapUint32Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool, + d *Decoder) (_ map[uint32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int32) + v, changed := fastpathTV.DecMapUint32Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Int32V(rv2i(rv).(map[uint32]int32), false, d) + } +} +func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, d *Decoder) { + v, changed := f.DecMapUint32Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool, + d *Decoder) (_ map[uint32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int64) + v, changed := fastpathTV.DecMapUint32Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Int64V(rv2i(rv).(map[uint32]int64), false, d) + } +} +func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, d *Decoder) { + v, changed := f.DecMapUint32Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool, + d *Decoder) (_ map[uint32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]float32) + v, changed := fastpathTV.DecMapUint32Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Float32V(rv2i(rv).(map[uint32]float32), false, d) + } +} +func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, d *Decoder) { + v, changed := f.DecMapUint32Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool, + d *Decoder) (_ map[uint32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]float64) + v, changed := fastpathTV.DecMapUint32Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Float64V(rv2i(rv).(map[uint32]float64), false, d) + } +} +func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, d *Decoder) { + v, changed := f.DecMapUint32Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool, + d *Decoder) (_ map[uint32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]bool) + v, changed := fastpathTV.DecMapUint32BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32BoolV(rv2i(rv).(map[uint32]bool), false, d) + } +} +func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, d *Decoder) { + v, changed := f.DecMapUint32BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool, + d *Decoder) (_ map[uint32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]interface{}) + v, changed := fastpathTV.DecMapUint64IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), false, d) + } +} +func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) { + v, changed := f.DecMapUint64IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool, + d *Decoder) (_ map[uint64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]string) + v, changed := fastpathTV.DecMapUint64StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64StringV(rv2i(rv).(map[uint64]string), false, d) + } +} +func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) { + v, changed := f.DecMapUint64StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool, + d *Decoder) (_ map[uint64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint) + v, changed := fastpathTV.DecMapUint64UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64UintV(rv2i(rv).(map[uint64]uint), false, d) + } +} +func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, d *Decoder) { + v, changed := f.DecMapUint64UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool, + d *Decoder) (_ map[uint64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint8) + v, changed := fastpathTV.DecMapUint64Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), false, d) + } +} +func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) { + v, changed := f.DecMapUint64Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool, + d *Decoder) (_ map[uint64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint16) + v, changed := fastpathTV.DecMapUint64Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), false, d) + } +} +func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, d *Decoder) { + v, changed := f.DecMapUint64Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool, + d *Decoder) (_ map[uint64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint32) + v, changed := fastpathTV.DecMapUint64Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), false, d) + } +} +func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, d *Decoder) { + v, changed := f.DecMapUint64Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool, + d *Decoder) (_ map[uint64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint64) + v, changed := fastpathTV.DecMapUint64Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), false, d) + } +} +func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) { + v, changed := f.DecMapUint64Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool, + d *Decoder) (_ map[uint64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uintptr) + v, changed := fastpathTV.DecMapUint64UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), false, d) + } +} +func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, d *Decoder) { + v, changed := f.DecMapUint64UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool, + d *Decoder) (_ map[uint64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int) + v, changed := fastpathTV.DecMapUint64IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64IntV(rv2i(rv).(map[uint64]int), false, d) + } +} +func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) { + v, changed := f.DecMapUint64IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool, + d *Decoder) (_ map[uint64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int8) + v, changed := fastpathTV.DecMapUint64Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Int8V(rv2i(rv).(map[uint64]int8), false, d) + } +} +func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, d *Decoder) { + v, changed := f.DecMapUint64Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool, + d *Decoder) (_ map[uint64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int16) + v, changed := fastpathTV.DecMapUint64Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Int16V(rv2i(rv).(map[uint64]int16), false, d) + } +} +func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, d *Decoder) { + v, changed := f.DecMapUint64Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool, + d *Decoder) (_ map[uint64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int32) + v, changed := fastpathTV.DecMapUint64Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Int32V(rv2i(rv).(map[uint64]int32), false, d) + } +} +func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) { + v, changed := f.DecMapUint64Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool, + d *Decoder) (_ map[uint64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int64) + v, changed := fastpathTV.DecMapUint64Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Int64V(rv2i(rv).(map[uint64]int64), false, d) + } +} +func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, d *Decoder) { + v, changed := f.DecMapUint64Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool, + d *Decoder) (_ map[uint64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]float32) + v, changed := fastpathTV.DecMapUint64Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Float32V(rv2i(rv).(map[uint64]float32), false, d) + } +} +func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, d *Decoder) { + v, changed := f.DecMapUint64Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool, + d *Decoder) (_ map[uint64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]float64) + v, changed := fastpathTV.DecMapUint64Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Float64V(rv2i(rv).(map[uint64]float64), false, d) + } +} +func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) { + v, changed := f.DecMapUint64Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool, + d *Decoder) (_ map[uint64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]bool) + v, changed := fastpathTV.DecMapUint64BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64BoolV(rv2i(rv).(map[uint64]bool), false, d) + } +} +func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) { + v, changed := f.DecMapUint64BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool, + d *Decoder) (_ map[uint64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uint64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]interface{}) + v, changed := fastpathTV.DecMapUintptrIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), false, d) + } +} +func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, d *Decoder) { + v, changed := f.DecMapUintptrIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool, + d *Decoder) (_ map[uintptr]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uintptr + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]string) + v, changed := fastpathTV.DecMapUintptrStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrStringV(rv2i(rv).(map[uintptr]string), false, d) + } +} +func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, d *Decoder) { + v, changed := f.DecMapUintptrStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool, + d *Decoder) (_ map[uintptr]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint) + v, changed := fastpathTV.DecMapUintptrUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUintV(rv2i(rv).(map[uintptr]uint), false, d) + } +} +func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, d *Decoder) { + v, changed := f.DecMapUintptrUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool, + d *Decoder) (_ map[uintptr]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint8) + v, changed := fastpathTV.DecMapUintptrUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), false, d) + } +} +func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, d *Decoder) { + v, changed := f.DecMapUintptrUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool, + d *Decoder) (_ map[uintptr]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint16) + v, changed := fastpathTV.DecMapUintptrUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), false, d) + } +} +func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, d *Decoder) { + v, changed := f.DecMapUintptrUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool, + d *Decoder) (_ map[uintptr]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint32) + v, changed := fastpathTV.DecMapUintptrUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), false, d) + } +} +func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, d *Decoder) { + v, changed := f.DecMapUintptrUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool, + d *Decoder) (_ map[uintptr]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint64) + v, changed := fastpathTV.DecMapUintptrUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), false, d) + } +} +func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, d *Decoder) { + v, changed := f.DecMapUintptrUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool, + d *Decoder) (_ map[uintptr]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uintptr) + v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), false, d) + } +} +func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, d *Decoder) { + v, changed := f.DecMapUintptrUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool, + d *Decoder) (_ map[uintptr]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int) + v, changed := fastpathTV.DecMapUintptrIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrIntV(rv2i(rv).(map[uintptr]int), false, d) + } +} +func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, d *Decoder) { + v, changed := f.DecMapUintptrIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool, + d *Decoder) (_ map[uintptr]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int8) + v, changed := fastpathTV.DecMapUintptrInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), false, d) + } +} +func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, d *Decoder) { + v, changed := f.DecMapUintptrInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool, + d *Decoder) (_ map[uintptr]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int16) + v, changed := fastpathTV.DecMapUintptrInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), false, d) + } +} +func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, d *Decoder) { + v, changed := f.DecMapUintptrInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool, + d *Decoder) (_ map[uintptr]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int32) + v, changed := fastpathTV.DecMapUintptrInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), false, d) + } +} +func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, d *Decoder) { + v, changed := f.DecMapUintptrInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool, + d *Decoder) (_ map[uintptr]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int64) + v, changed := fastpathTV.DecMapUintptrInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), false, d) + } +} +func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, d *Decoder) { + v, changed := f.DecMapUintptrInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool, + d *Decoder) (_ map[uintptr]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]float32) + v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), false, d) + } +} +func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, d *Decoder) { + v, changed := f.DecMapUintptrFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool, + d *Decoder) (_ map[uintptr]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]float64) + v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), false, d) + } +} +func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, d *Decoder) { + v, changed := f.DecMapUintptrFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool, + d *Decoder) (_ map[uintptr]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]bool) + v, changed := fastpathTV.DecMapUintptrBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), false, d) + } +} +func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, d *Decoder) { + v, changed := f.DecMapUintptrBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool, + d *Decoder) (_ map[uintptr]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk uintptr + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]interface{}) + v, changed := fastpathTV.DecMapIntIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntIntfV(rv2i(rv).(map[int]interface{}), false, d) + } +} +func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) { + v, changed := f.DecMapIntIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool, + d *Decoder) (_ map[int]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]string) + v, changed := fastpathTV.DecMapIntStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntStringV(rv2i(rv).(map[int]string), false, d) + } +} +func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) { + v, changed := f.DecMapIntStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool, + d *Decoder) (_ map[int]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint) + v, changed := fastpathTV.DecMapIntUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUintV(rv2i(rv).(map[int]uint), false, d) + } +} +func (f fastpathT) DecMapIntUintX(vp *map[int]uint, d *Decoder) { + v, changed := f.DecMapIntUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool, + d *Decoder) (_ map[int]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint8) + v, changed := fastpathTV.DecMapIntUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUint8V(rv2i(rv).(map[int]uint8), false, d) + } +} +func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) { + v, changed := f.DecMapIntUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool, + d *Decoder) (_ map[int]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint16) + v, changed := fastpathTV.DecMapIntUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUint16V(rv2i(rv).(map[int]uint16), false, d) + } +} +func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, d *Decoder) { + v, changed := f.DecMapIntUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool, + d *Decoder) (_ map[int]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint32) + v, changed := fastpathTV.DecMapIntUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUint32V(rv2i(rv).(map[int]uint32), false, d) + } +} +func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, d *Decoder) { + v, changed := f.DecMapIntUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool, + d *Decoder) (_ map[int]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint64) + v, changed := fastpathTV.DecMapIntUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUint64V(rv2i(rv).(map[int]uint64), false, d) + } +} +func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) { + v, changed := f.DecMapIntUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool, + d *Decoder) (_ map[int]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uintptr) + v, changed := fastpathTV.DecMapIntUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUintptrV(rv2i(rv).(map[int]uintptr), false, d) + } +} +func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, d *Decoder) { + v, changed := f.DecMapIntUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool, + d *Decoder) (_ map[int]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int) + v, changed := fastpathTV.DecMapIntIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntIntV(rv2i(rv).(map[int]int), false, d) + } +} +func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) { + v, changed := f.DecMapIntIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool, + d *Decoder) (_ map[int]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int8) + v, changed := fastpathTV.DecMapIntInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntInt8V(rv2i(rv).(map[int]int8), false, d) + } +} +func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, d *Decoder) { + v, changed := f.DecMapIntInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool, + d *Decoder) (_ map[int]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int16) + v, changed := fastpathTV.DecMapIntInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntInt16V(rv2i(rv).(map[int]int16), false, d) + } +} +func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, d *Decoder) { + v, changed := f.DecMapIntInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool, + d *Decoder) (_ map[int]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int32) + v, changed := fastpathTV.DecMapIntInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntInt32V(rv2i(rv).(map[int]int32), false, d) + } +} +func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) { + v, changed := f.DecMapIntInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool, + d *Decoder) (_ map[int]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int64) + v, changed := fastpathTV.DecMapIntInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntInt64V(rv2i(rv).(map[int]int64), false, d) + } +} +func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, d *Decoder) { + v, changed := f.DecMapIntInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool, + d *Decoder) (_ map[int]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]float32) + v, changed := fastpathTV.DecMapIntFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntFloat32V(rv2i(rv).(map[int]float32), false, d) + } +} +func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, d *Decoder) { + v, changed := f.DecMapIntFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool, + d *Decoder) (_ map[int]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]float64) + v, changed := fastpathTV.DecMapIntFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntFloat64V(rv2i(rv).(map[int]float64), false, d) + } +} +func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) { + v, changed := f.DecMapIntFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool, + d *Decoder) (_ map[int]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]bool) + v, changed := fastpathTV.DecMapIntBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntBoolV(rv2i(rv).(map[int]bool), false, d) + } +} +func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) { + v, changed := f.DecMapIntBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool, + d *Decoder) (_ map[int]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]interface{}) + v, changed := fastpathTV.DecMapInt8IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8IntfV(rv2i(rv).(map[int8]interface{}), false, d) + } +} +func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, d *Decoder) { + v, changed := f.DecMapInt8IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool, + d *Decoder) (_ map[int8]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int8 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]string) + v, changed := fastpathTV.DecMapInt8StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8StringV(rv2i(rv).(map[int8]string), false, d) + } +} +func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, d *Decoder) { + v, changed := f.DecMapInt8StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool, + d *Decoder) (_ map[int8]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint) + v, changed := fastpathTV.DecMapInt8UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8UintV(rv2i(rv).(map[int8]uint), false, d) + } +} +func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, d *Decoder) { + v, changed := f.DecMapInt8UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool, + d *Decoder) (_ map[int8]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint8) + v, changed := fastpathTV.DecMapInt8Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Uint8V(rv2i(rv).(map[int8]uint8), false, d) + } +} +func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, d *Decoder) { + v, changed := f.DecMapInt8Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool, + d *Decoder) (_ map[int8]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint16) + v, changed := fastpathTV.DecMapInt8Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Uint16V(rv2i(rv).(map[int8]uint16), false, d) + } +} +func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, d *Decoder) { + v, changed := f.DecMapInt8Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool, + d *Decoder) (_ map[int8]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint32) + v, changed := fastpathTV.DecMapInt8Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Uint32V(rv2i(rv).(map[int8]uint32), false, d) + } +} +func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, d *Decoder) { + v, changed := f.DecMapInt8Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool, + d *Decoder) (_ map[int8]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint64) + v, changed := fastpathTV.DecMapInt8Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Uint64V(rv2i(rv).(map[int8]uint64), false, d) + } +} +func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, d *Decoder) { + v, changed := f.DecMapInt8Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool, + d *Decoder) (_ map[int8]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uintptr) + v, changed := fastpathTV.DecMapInt8UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), false, d) + } +} +func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, d *Decoder) { + v, changed := f.DecMapInt8UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool, + d *Decoder) (_ map[int8]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int) + v, changed := fastpathTV.DecMapInt8IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8IntV(rv2i(rv).(map[int8]int), false, d) + } +} +func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, d *Decoder) { + v, changed := f.DecMapInt8IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool, + d *Decoder) (_ map[int8]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int8) + v, changed := fastpathTV.DecMapInt8Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Int8V(rv2i(rv).(map[int8]int8), false, d) + } +} +func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, d *Decoder) { + v, changed := f.DecMapInt8Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool, + d *Decoder) (_ map[int8]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int16) + v, changed := fastpathTV.DecMapInt8Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Int16V(rv2i(rv).(map[int8]int16), false, d) + } +} +func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, d *Decoder) { + v, changed := f.DecMapInt8Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool, + d *Decoder) (_ map[int8]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int32) + v, changed := fastpathTV.DecMapInt8Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Int32V(rv2i(rv).(map[int8]int32), false, d) + } +} +func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, d *Decoder) { + v, changed := f.DecMapInt8Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool, + d *Decoder) (_ map[int8]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int64) + v, changed := fastpathTV.DecMapInt8Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Int64V(rv2i(rv).(map[int8]int64), false, d) + } +} +func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, d *Decoder) { + v, changed := f.DecMapInt8Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool, + d *Decoder) (_ map[int8]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]float32) + v, changed := fastpathTV.DecMapInt8Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Float32V(rv2i(rv).(map[int8]float32), false, d) + } +} +func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, d *Decoder) { + v, changed := f.DecMapInt8Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool, + d *Decoder) (_ map[int8]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]float64) + v, changed := fastpathTV.DecMapInt8Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Float64V(rv2i(rv).(map[int8]float64), false, d) + } +} +func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, d *Decoder) { + v, changed := f.DecMapInt8Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool, + d *Decoder) (_ map[int8]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]bool) + v, changed := fastpathTV.DecMapInt8BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8BoolV(rv2i(rv).(map[int8]bool), false, d) + } +} +func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, d *Decoder) { + v, changed := f.DecMapInt8BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool, + d *Decoder) (_ map[int8]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int8 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]interface{}) + v, changed := fastpathTV.DecMapInt16IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16IntfV(rv2i(rv).(map[int16]interface{}), false, d) + } +} +func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, d *Decoder) { + v, changed := f.DecMapInt16IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool, + d *Decoder) (_ map[int16]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int16 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]string) + v, changed := fastpathTV.DecMapInt16StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16StringV(rv2i(rv).(map[int16]string), false, d) + } +} +func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, d *Decoder) { + v, changed := f.DecMapInt16StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool, + d *Decoder) (_ map[int16]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint) + v, changed := fastpathTV.DecMapInt16UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16UintV(rv2i(rv).(map[int16]uint), false, d) + } +} +func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, d *Decoder) { + v, changed := f.DecMapInt16UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool, + d *Decoder) (_ map[int16]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint8) + v, changed := fastpathTV.DecMapInt16Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Uint8V(rv2i(rv).(map[int16]uint8), false, d) + } +} +func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, d *Decoder) { + v, changed := f.DecMapInt16Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool, + d *Decoder) (_ map[int16]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint16) + v, changed := fastpathTV.DecMapInt16Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Uint16V(rv2i(rv).(map[int16]uint16), false, d) + } +} +func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, d *Decoder) { + v, changed := f.DecMapInt16Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool, + d *Decoder) (_ map[int16]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint32) + v, changed := fastpathTV.DecMapInt16Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Uint32V(rv2i(rv).(map[int16]uint32), false, d) + } +} +func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, d *Decoder) { + v, changed := f.DecMapInt16Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool, + d *Decoder) (_ map[int16]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint64) + v, changed := fastpathTV.DecMapInt16Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Uint64V(rv2i(rv).(map[int16]uint64), false, d) + } +} +func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, d *Decoder) { + v, changed := f.DecMapInt16Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool, + d *Decoder) (_ map[int16]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uintptr) + v, changed := fastpathTV.DecMapInt16UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), false, d) + } +} +func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, d *Decoder) { + v, changed := f.DecMapInt16UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool, + d *Decoder) (_ map[int16]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int) + v, changed := fastpathTV.DecMapInt16IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16IntV(rv2i(rv).(map[int16]int), false, d) + } +} +func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, d *Decoder) { + v, changed := f.DecMapInt16IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool, + d *Decoder) (_ map[int16]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int8) + v, changed := fastpathTV.DecMapInt16Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Int8V(rv2i(rv).(map[int16]int8), false, d) + } +} +func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, d *Decoder) { + v, changed := f.DecMapInt16Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool, + d *Decoder) (_ map[int16]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int16) + v, changed := fastpathTV.DecMapInt16Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Int16V(rv2i(rv).(map[int16]int16), false, d) + } +} +func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, d *Decoder) { + v, changed := f.DecMapInt16Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool, + d *Decoder) (_ map[int16]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int32) + v, changed := fastpathTV.DecMapInt16Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Int32V(rv2i(rv).(map[int16]int32), false, d) + } +} +func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, d *Decoder) { + v, changed := f.DecMapInt16Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool, + d *Decoder) (_ map[int16]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int64) + v, changed := fastpathTV.DecMapInt16Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Int64V(rv2i(rv).(map[int16]int64), false, d) + } +} +func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, d *Decoder) { + v, changed := f.DecMapInt16Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool, + d *Decoder) (_ map[int16]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]float32) + v, changed := fastpathTV.DecMapInt16Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Float32V(rv2i(rv).(map[int16]float32), false, d) + } +} +func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, d *Decoder) { + v, changed := f.DecMapInt16Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool, + d *Decoder) (_ map[int16]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]float64) + v, changed := fastpathTV.DecMapInt16Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Float64V(rv2i(rv).(map[int16]float64), false, d) + } +} +func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, d *Decoder) { + v, changed := f.DecMapInt16Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool, + d *Decoder) (_ map[int16]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]bool) + v, changed := fastpathTV.DecMapInt16BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16BoolV(rv2i(rv).(map[int16]bool), false, d) + } +} +func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, d *Decoder) { + v, changed := f.DecMapInt16BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool, + d *Decoder) (_ map[int16]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int16 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]interface{}) + v, changed := fastpathTV.DecMapInt32IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32IntfV(rv2i(rv).(map[int32]interface{}), false, d) + } +} +func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) { + v, changed := f.DecMapInt32IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool, + d *Decoder) (_ map[int32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]string) + v, changed := fastpathTV.DecMapInt32StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32StringV(rv2i(rv).(map[int32]string), false, d) + } +} +func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) { + v, changed := f.DecMapInt32StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool, + d *Decoder) (_ map[int32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint) + v, changed := fastpathTV.DecMapInt32UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32UintV(rv2i(rv).(map[int32]uint), false, d) + } +} +func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, d *Decoder) { + v, changed := f.DecMapInt32UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool, + d *Decoder) (_ map[int32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint8) + v, changed := fastpathTV.DecMapInt32Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Uint8V(rv2i(rv).(map[int32]uint8), false, d) + } +} +func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) { + v, changed := f.DecMapInt32Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool, + d *Decoder) (_ map[int32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint16) + v, changed := fastpathTV.DecMapInt32Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Uint16V(rv2i(rv).(map[int32]uint16), false, d) + } +} +func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, d *Decoder) { + v, changed := f.DecMapInt32Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool, + d *Decoder) (_ map[int32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint32) + v, changed := fastpathTV.DecMapInt32Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Uint32V(rv2i(rv).(map[int32]uint32), false, d) + } +} +func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, d *Decoder) { + v, changed := f.DecMapInt32Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool, + d *Decoder) (_ map[int32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint64) + v, changed := fastpathTV.DecMapInt32Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Uint64V(rv2i(rv).(map[int32]uint64), false, d) + } +} +func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) { + v, changed := f.DecMapInt32Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool, + d *Decoder) (_ map[int32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uintptr) + v, changed := fastpathTV.DecMapInt32UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), false, d) + } +} +func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, d *Decoder) { + v, changed := f.DecMapInt32UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool, + d *Decoder) (_ map[int32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int) + v, changed := fastpathTV.DecMapInt32IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32IntV(rv2i(rv).(map[int32]int), false, d) + } +} +func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) { + v, changed := f.DecMapInt32IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool, + d *Decoder) (_ map[int32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int8) + v, changed := fastpathTV.DecMapInt32Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Int8V(rv2i(rv).(map[int32]int8), false, d) + } +} +func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, d *Decoder) { + v, changed := f.DecMapInt32Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool, + d *Decoder) (_ map[int32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int16) + v, changed := fastpathTV.DecMapInt32Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Int16V(rv2i(rv).(map[int32]int16), false, d) + } +} +func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, d *Decoder) { + v, changed := f.DecMapInt32Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool, + d *Decoder) (_ map[int32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int32) + v, changed := fastpathTV.DecMapInt32Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Int32V(rv2i(rv).(map[int32]int32), false, d) + } +} +func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) { + v, changed := f.DecMapInt32Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool, + d *Decoder) (_ map[int32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int64) + v, changed := fastpathTV.DecMapInt32Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Int64V(rv2i(rv).(map[int32]int64), false, d) + } +} +func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, d *Decoder) { + v, changed := f.DecMapInt32Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool, + d *Decoder) (_ map[int32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]float32) + v, changed := fastpathTV.DecMapInt32Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Float32V(rv2i(rv).(map[int32]float32), false, d) + } +} +func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, d *Decoder) { + v, changed := f.DecMapInt32Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool, + d *Decoder) (_ map[int32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]float64) + v, changed := fastpathTV.DecMapInt32Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Float64V(rv2i(rv).(map[int32]float64), false, d) + } +} +func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) { + v, changed := f.DecMapInt32Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool, + d *Decoder) (_ map[int32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]bool) + v, changed := fastpathTV.DecMapInt32BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32BoolV(rv2i(rv).(map[int32]bool), false, d) + } +} +func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) { + v, changed := f.DecMapInt32BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool, + d *Decoder) (_ map[int32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]interface{}) + v, changed := fastpathTV.DecMapInt64IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64IntfV(rv2i(rv).(map[int64]interface{}), false, d) + } +} +func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, d *Decoder) { + v, changed := f.DecMapInt64IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool, + d *Decoder) (_ map[int64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]string) + v, changed := fastpathTV.DecMapInt64StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64StringV(rv2i(rv).(map[int64]string), false, d) + } +} +func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, d *Decoder) { + v, changed := f.DecMapInt64StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool, + d *Decoder) (_ map[int64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint) + v, changed := fastpathTV.DecMapInt64UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64UintV(rv2i(rv).(map[int64]uint), false, d) + } +} +func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, d *Decoder) { + v, changed := f.DecMapInt64UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool, + d *Decoder) (_ map[int64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint8) + v, changed := fastpathTV.DecMapInt64Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Uint8V(rv2i(rv).(map[int64]uint8), false, d) + } +} +func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, d *Decoder) { + v, changed := f.DecMapInt64Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool, + d *Decoder) (_ map[int64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint16) + v, changed := fastpathTV.DecMapInt64Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Uint16V(rv2i(rv).(map[int64]uint16), false, d) + } +} +func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, d *Decoder) { + v, changed := f.DecMapInt64Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool, + d *Decoder) (_ map[int64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint32) + v, changed := fastpathTV.DecMapInt64Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Uint32V(rv2i(rv).(map[int64]uint32), false, d) + } +} +func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, d *Decoder) { + v, changed := f.DecMapInt64Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool, + d *Decoder) (_ map[int64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint64) + v, changed := fastpathTV.DecMapInt64Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Uint64V(rv2i(rv).(map[int64]uint64), false, d) + } +} +func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, d *Decoder) { + v, changed := f.DecMapInt64Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool, + d *Decoder) (_ map[int64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uintptr) + v, changed := fastpathTV.DecMapInt64UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), false, d) + } +} +func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, d *Decoder) { + v, changed := f.DecMapInt64UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool, + d *Decoder) (_ map[int64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int) + v, changed := fastpathTV.DecMapInt64IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64IntV(rv2i(rv).(map[int64]int), false, d) + } +} +func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, d *Decoder) { + v, changed := f.DecMapInt64IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool, + d *Decoder) (_ map[int64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int8) + v, changed := fastpathTV.DecMapInt64Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Int8V(rv2i(rv).(map[int64]int8), false, d) + } +} +func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, d *Decoder) { + v, changed := f.DecMapInt64Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool, + d *Decoder) (_ map[int64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int16) + v, changed := fastpathTV.DecMapInt64Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Int16V(rv2i(rv).(map[int64]int16), false, d) + } +} +func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, d *Decoder) { + v, changed := f.DecMapInt64Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool, + d *Decoder) (_ map[int64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int32) + v, changed := fastpathTV.DecMapInt64Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Int32V(rv2i(rv).(map[int64]int32), false, d) + } +} +func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, d *Decoder) { + v, changed := f.DecMapInt64Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool, + d *Decoder) (_ map[int64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int64) + v, changed := fastpathTV.DecMapInt64Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Int64V(rv2i(rv).(map[int64]int64), false, d) + } +} +func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, d *Decoder) { + v, changed := f.DecMapInt64Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool, + d *Decoder) (_ map[int64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]float32) + v, changed := fastpathTV.DecMapInt64Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Float32V(rv2i(rv).(map[int64]float32), false, d) + } +} +func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, d *Decoder) { + v, changed := f.DecMapInt64Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool, + d *Decoder) (_ map[int64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]float64) + v, changed := fastpathTV.DecMapInt64Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Float64V(rv2i(rv).(map[int64]float64), false, d) + } +} +func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, d *Decoder) { + v, changed := f.DecMapInt64Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool, + d *Decoder) (_ map[int64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]bool) + v, changed := fastpathTV.DecMapInt64BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64BoolV(rv2i(rv).(map[int64]bool), false, d) + } +} +func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, d *Decoder) { + v, changed := f.DecMapInt64BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool, + d *Decoder) (_ map[int64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk int64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]interface{}) + v, changed := fastpathTV.DecMapBoolIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolIntfV(rv2i(rv).(map[bool]interface{}), false, d) + } +} +func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, d *Decoder) { + v, changed := f.DecMapBoolIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool, + d *Decoder) (_ map[bool]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk bool + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]string) + v, changed := fastpathTV.DecMapBoolStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolStringV(rv2i(rv).(map[bool]string), false, d) + } +} +func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, d *Decoder) { + v, changed := f.DecMapBoolStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool, + d *Decoder) (_ map[bool]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint) + v, changed := fastpathTV.DecMapBoolUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUintV(rv2i(rv).(map[bool]uint), false, d) + } +} +func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, d *Decoder) { + v, changed := f.DecMapBoolUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool, + d *Decoder) (_ map[bool]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint8) + v, changed := fastpathTV.DecMapBoolUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUint8V(rv2i(rv).(map[bool]uint8), false, d) + } +} +func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, d *Decoder) { + v, changed := f.DecMapBoolUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool, + d *Decoder) (_ map[bool]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint16) + v, changed := fastpathTV.DecMapBoolUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUint16V(rv2i(rv).(map[bool]uint16), false, d) + } +} +func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, d *Decoder) { + v, changed := f.DecMapBoolUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool, + d *Decoder) (_ map[bool]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint32) + v, changed := fastpathTV.DecMapBoolUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUint32V(rv2i(rv).(map[bool]uint32), false, d) + } +} +func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, d *Decoder) { + v, changed := f.DecMapBoolUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool, + d *Decoder) (_ map[bool]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint64) + v, changed := fastpathTV.DecMapBoolUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUint64V(rv2i(rv).(map[bool]uint64), false, d) + } +} +func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, d *Decoder) { + v, changed := f.DecMapBoolUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool, + d *Decoder) (_ map[bool]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uintptr) + v, changed := fastpathTV.DecMapBoolUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), false, d) + } +} +func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, d *Decoder) { + v, changed := f.DecMapBoolUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool, + d *Decoder) (_ map[bool]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int) + v, changed := fastpathTV.DecMapBoolIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolIntV(rv2i(rv).(map[bool]int), false, d) + } +} +func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, d *Decoder) { + v, changed := f.DecMapBoolIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool, + d *Decoder) (_ map[bool]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int8) + v, changed := fastpathTV.DecMapBoolInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolInt8V(rv2i(rv).(map[bool]int8), false, d) + } +} +func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, d *Decoder) { + v, changed := f.DecMapBoolInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool, + d *Decoder) (_ map[bool]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int16) + v, changed := fastpathTV.DecMapBoolInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolInt16V(rv2i(rv).(map[bool]int16), false, d) + } +} +func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, d *Decoder) { + v, changed := f.DecMapBoolInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool, + d *Decoder) (_ map[bool]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int32) + v, changed := fastpathTV.DecMapBoolInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolInt32V(rv2i(rv).(map[bool]int32), false, d) + } +} +func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, d *Decoder) { + v, changed := f.DecMapBoolInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool, + d *Decoder) (_ map[bool]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int64) + v, changed := fastpathTV.DecMapBoolInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolInt64V(rv2i(rv).(map[bool]int64), false, d) + } +} +func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, d *Decoder) { + v, changed := f.DecMapBoolInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool, + d *Decoder) (_ map[bool]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]float32) + v, changed := fastpathTV.DecMapBoolFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolFloat32V(rv2i(rv).(map[bool]float32), false, d) + } +} +func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, d *Decoder) { + v, changed := f.DecMapBoolFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool, + d *Decoder) (_ map[bool]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]float64) + v, changed := fastpathTV.DecMapBoolFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolFloat64V(rv2i(rv).(map[bool]float64), false, d) + } +} +func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, d *Decoder) { + v, changed := f.DecMapBoolFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool, + d *Decoder) (_ map[bool]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]bool) + v, changed := fastpathTV.DecMapBoolBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolBoolV(rv2i(rv).(map[bool]bool), false, d) + } +} +func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, d *Decoder) { + v, changed := f.DecMapBoolBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool, + d *Decoder) (_ map[bool]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + var mk bool + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.go.tmpl new file mode 100644 index 0000000000000..7617c43506423 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.go.tmpl @@ -0,0 +1,491 @@ +// +build !notfastpath + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from fast-path.go.tmpl - DO NOT EDIT. + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathEnabled = true + +const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v" + +type fastpathT struct {} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} + +type fastpathA [{{ .FastpathLen }}]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + // Note: we use goto (instead of for loop) so this can be inlined. + // h, i, j := 0, 0, len(x) + var h, i uint + var j = uint(len(x)) +LOOP: + if i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(x)) && x[i].rtid == rtid { + return int(i) + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[uint(i)].rtid < x[uint(j)].rtid } +func (x fastpathAslice) Swap(i, j int) { x[uint(i)], x[uint(j)] = x[uint(j)], x[uint(i)] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + var i uint = 0 + fn := func(v interface{}, + fe func(*Encoder, *codecFnInfo, reflect.Value), + fd func(*Decoder, *codecFnInfo, reflect.Value)) { + xrt := reflect.TypeOf(v) + xptr := rt2id(xrt) + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + } + {{/* do not register []uint8 in fast-path */}} + {{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}} + fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}{{end}} + + {{range .Values}}{{if not .Primitive}}{{if .MapKey }} + fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}} + case []{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) + case *[]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/* +*/}}{{end}}{{end}}{{end}}{{end}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} + case map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) + case *map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/* +*/}}{{end}}{{end}}{{end}} + + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +{{/* +**** removing this block, as they are never called directly **** + + + +**** removing this block, as they are never called directly **** + + + +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case []{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) + case *[]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) +{{end}}{{end}}{{end}} + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} + case map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) + case *map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) +{{end}}{{end}}{{end}} + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + + + +**** removing this block, as they are never called directly **** + + + +**** removing this block, as they are never called directly **** +*/}} + +// -- -- fast path functions +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} +func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), e) + } else { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).([]{{ .Elem }}), e) + } +} +func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) { + if v == nil { e.e.EncodeNil(); return } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { ee.WriteArrayElem() } + {{ encmd .Elem "v2"}} + } + ee.WriteArrayEnd() +} +func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf(fastpathMapBySliceErrMsg, len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + {{ encmd .Elem "v2"}} + } + ee.WriteMapEnd() +} +{{end}}{{end}}{{end}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} +func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e) +} +func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) { + if v == nil { e.e.EncodeNil(); return } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + {{if eq .MapKey "interface{}"}}{{/* out of band + */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l uint + var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}} + for k2 := range v { + l = uint(len(mksv)) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { ee.WriteMapElemKey() } + e.asis(v2[j].v) + if esep { ee.WriteMapElemValue() } + e.encode(v[v2[j].i]) + } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) + var i uint + for k := range v { + v2[i] = {{ $x }}(k) + i++ + } + sort.Sort({{ sorttype .MapKey false}}(v2)) + for _, k2 := range v2 { + if esep { ee.WriteMapElemKey() } + {{if eq .MapKey "string"}} if e.h.StringToRaw {ee.EncodeStringBytesRaw(bytesView(k2))} else {ee.EncodeStringEnc(cUTF8, k2)} {{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} + if esep { ee.WriteMapElemValue() } + {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} + } {{end}} + } else { + for k2, v2 := range v { + if esep { ee.WriteMapElemKey() } + {{if eq .MapKey "string"}} if e.h.StringToRaw {ee.EncodeStringBytesRaw(bytesView(k2))} else {ee.EncodeStringEnc(cUTF8, k2)} {{else}}{{ encmd .MapKey "k2"}}{{end}} + if esep { ee.WriteMapElemValue() } + {{ encmd .Elem "v2"}} + } + } + ee.WriteMapEnd() +} +{{end}}{{end}}{{end}} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + var changed bool + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}} + case []{{ .Elem }}: + var v2 []{{ .Elem }} + v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]{{ .Elem }}: + var v2 []{{ .Elem }} + v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d) + if changed { + *v = v2 + }{{/* +*/}}{{end}}{{end}}{{end}}{{end}} +{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/* +// maps only change if nil, and in that case, there's no point copying +*/}} + case map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d) + case *map[{{ .MapKey }}]{{ .Elem }}: + var v2 map[{{ .MapKey }}]{{ .Elem }} + v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d) + if changed { + *v = v2 + }{{/* +*/}}{{end}}{{end}}{{end}} + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case *[]{{ .Elem }}: + *v = nil {{/* +*/}}{{end}}{{end}}{{end}} +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} + case *map[{{ .MapKey }}]{{ .Elem }}: + *v = nil {{/* +*/}}{{end}}{{end}}{{end}} + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// -- -- fast path functions +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} +{{/* +Slices can change if they +- did not come from an array +- are addressable (from a ptr) +- are settable (e.g. contained in an interface{}) +*/}} +func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]{{ .Elem }}) + v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, !array, d) + if changed { *vp = v } + } else { + v := rv2i(rv).([]{{ .Elem }}) + v2, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) { + v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d) + if changed { *vp = v } +} +func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { + dd := d.d{{/* + // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() + */}} + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { v = []{{ .Elem }}{} } else if len(v) != 0 { v = v[:0] } + changed = true + } + slh.End() + return v, changed + } + d.depthIncr() + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) + if xlen <= cap(v) { + v = v[:uint(xlen)] + } else { + v = make([]{{ .Elem }}, uint(xlen)) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + var j int + for j = 0; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) + } else { + xlen = 8 + } + v = make([]{{ .Elem }}, uint(xlen)) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, {{ zerocmd .Elem }}) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[uint(j)] = {{ zerocmd .Elem }} + } else { + {{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem }}{{ end }} + } + } + if canChange { + if j < len(v) { + v = v[:uint(j)] + changed = true + } else if j == 0 && v == nil { + v = make([]{{ .Elem }}, 0) + changed = true + } + } + slh.End() + d.depthDecr() + return v, changed +} +{{end}}{{end}}{{end}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} +{{/* +Maps can change if they are +- addressable (from a ptr) +- settable (e.g. contained in an interface{}) +*/}} +func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }}) + v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); + if changed { *vp = v } + } else { + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), false, d) + } +} +func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) { + v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d) + if changed { *vp = v } +} +func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, canChange bool, + d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators(){{/* + // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() + */}} + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) + v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + d.depthIncr() + {{ if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + {{end}}var mk {{ .MapKey }} + var mv {{ .Elem }} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { dd.ReadMapElemKey() } + {{ if eq .MapKey "interface{}" }}mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} + }{{ else }}mk = {{ decmd .MapKey }}{{ end }} + if esep { dd.ReadMapElemValue() } + if dd.TryDecodeAsNil() { + if v == nil {} else if d.h.DeleteOnNilMapValue { delete(v, mk) } else { v[mk] = {{ zerocmd .Elem }} } + continue + } + {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } + d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} + if v != nil { v[mk] = mv } + } + dd.ReadMapEnd() + d.depthDecr() + return v, changed +} +{{end}}{{end}}{{end}} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.not.go b/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.not.go new file mode 100644 index 0000000000000..cf97db0f27c1e --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/fast-path.not.go @@ -0,0 +1,47 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build notfastpath + +package codec + +import "reflect" + +const fastpathEnabled = false + +// The generated fast-path code is very large, and adds a few seconds to the build time. +// This causes test execution, execution of small tools which use codec, etc +// to take a long time. +// +// To mitigate, we now support the notfastpath tag. +// This tag disables fastpath during build, allowing for faster build, test execution, +// short-program runs, etc. + +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } +func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false } + +type fastpathT struct{} +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} +type fastpathA [0]fastpathE + +func (x fastpathA) index(rtid uintptr) int { return -1 } + +func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) { + fn := d.h.fn(uint8SliceTyp, true, true) + d.kSlice(&fn.i, reflect.ValueOf(&v).Elem()) + return v, true +} + +var fastpathAV fastpathA +var fastpathTV fastpathT + +// ---- +type TestMammoth2Wrapper struct{} // to allow testMammoth work in notfastpath mode diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-array.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-array.go.tmpl new file mode 100644 index 0000000000000..790e914e13cb3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-array.go.tmpl @@ -0,0 +1,78 @@ +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{else if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int + _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { + if {{var "l"}} > cap({{var "v"}}) { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + } else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for {{var "j"}} = 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { // bounds-check-elimination + {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} + } + {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} + {{var "h"}}.ElemContainerState({{var "j"}}) + {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}} + {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this + {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}} + var {{var "db"}} bool + if {{var "j"}} >= len({{var "v"}}) { + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) + {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} + } + if {{var "db"}} { + z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{end}} + } + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = make([]{{ .Typ }}, 0) + {{var "c"}} = true + } {{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-map.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-map.go.tmpl new file mode 100644 index 0000000000000..8323b54940d04 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen-dec-map.go.tmpl @@ -0,0 +1,42 @@ +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen-enc-chan.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/gen-enc-chan.go.tmpl new file mode 100644 index 0000000000000..4249588a3cf8d --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen-enc-chan.go.tmpl @@ -0,0 +1,27 @@ +{{.Label}}: +switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { +case timeout{{.Sfx}} == 0: // only consume available + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) + default: + break {{.Label}} + } + } +case timeout{{.Sfx}} > 0: // consume until timeout + tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + case <-tt{{.Sfx}}.C: + // close(tt.C) + break {{.Label}} + } + } +default: // consume until close + for b{{.Sfx}} := range {{.Chan}} { + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + } +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.generated.go b/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.generated.go new file mode 100644 index 0000000000000..2a7d1aab70b3a --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.generated.go @@ -0,0 +1,343 @@ +// comment this out // + build ignore + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from gen-helper.go.tmpl - DO NOT EDIT. + +package codec + +import ( + "encoding" + "reflect" +) + +// GenVersion is the current version of codecgen. +const GenVersion = 10 + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { + ge = genHelperEncoder{e: e} + ee = genHelperEncDriver{encDriver: e.e} + return +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { + gd = genHelperDecoder{d: d} + dd = genHelperDecDriver{decDriver: d.d} + return +} + +type genHelperEncDriver struct { + encDriver +} + +func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) { + encStructFieldKey(s, x.encDriver, nil, keyType, false, false) +} +func (x genHelperEncDriver) EncodeSymbol(s string) { + x.encDriver.EncodeStringEnc(cUTF8, s) +} + +type genHelperDecDriver struct { + decDriver + C checkOverflow +} + +func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte { + return decStructFieldKey(x.decDriver, keyType, buf) +} +func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) { + return x.C.IntV(x.decDriver.DecodeInt64(), bitsize) +} +func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + return x.C.UintV(x.decDriver.DecodeUint64(), bitsize) +} +func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + f = x.DecodeFloat64() + if chkOverflow32 && chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} +func (x genHelperDecDriver) DecodeFloat32As64() (f float64) { + f = x.DecodeFloat64() + if chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + M must + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + C checkOverflow + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshalUtf8(bs, fnerr) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshalAsis(bs, fnerr) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshalRaw(bs, fnerr) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return } + +// func (f genHelperEncoder) TimeRtidIfBinc() uintptr { +// if _, ok := f.e.hh.(*BincHandle); ok { +// return timeTypId +// } +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.e.h.getExt(rtid) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) WriteStr(s string) { + f.e.w.writestr(s) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) BytesView(v string) []byte { return bytesView(v) } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { f.d.swallow() } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { + return &f.d.b +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false) + // f.d.decodeValueFallback(rv) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeStringAsBytes() + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } + +// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { +// // Note: builtin is no longer supported - so make this a no-op +// if _, ok := f.d.hh.(*BincHandle); ok { +// return timeTypId +// } +// return 0 +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.d.h.getExt(rtid) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { + return decInferLen(clen, maxlen, unit) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: no longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.go.tmpl new file mode 100644 index 0000000000000..f5d0634e6a1c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen-helper.go.tmpl @@ -0,0 +1,308 @@ +// comment this out // + build ignore + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from gen-helper.go.tmpl - DO NOT EDIT. + +package codec + +import ( + "encoding" + "reflect" +) + +// GenVersion is the current version of codecgen. +const GenVersion = {{ .Version }} + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { + ge = genHelperEncoder{e: e} + ee = genHelperEncDriver{encDriver: e.e} + return +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { + gd = genHelperDecoder{d: d} + dd = genHelperDecDriver{decDriver: d.d} + return +} + +type genHelperEncDriver struct { + encDriver +} + +func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) { + encStructFieldKey(s, x.encDriver, nil, keyType, false, false) +} +func (x genHelperEncDriver) EncodeSymbol(s string) { + x.encDriver.EncodeStringEnc(cUTF8, s) +} + +type genHelperDecDriver struct { + decDriver + C checkOverflow +} + +func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte { + return decStructFieldKey(x.decDriver, keyType, buf) +} +func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) { + return x.C.IntV(x.decDriver.DecodeInt64(), bitsize) +} +func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + return x.C.UintV(x.decDriver.DecodeUint64(), bitsize) +} +func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + f = x.DecodeFloat64() + if chkOverflow32 && chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} +func (x genHelperDecDriver) DecodeFloat32As64() (f float64) { + f = x.DecodeFloat64() + if chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + M must + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + C checkOverflow + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshalUtf8(bs, fnerr) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshalAsis(bs, fnerr) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshalRaw(bs, fnerr) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return } +// func (f genHelperEncoder) TimeRtidIfBinc() uintptr { +// if _, ok := f.e.hh.(*BincHandle); ok { +// return timeTypId +// } +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.e.h.getExt(rtid) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) WriteStr(s string) { + f.e.w.writestr(s) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) BytesView(v string) []byte { return bytesView(v) } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { f.d.swallow() } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { + return &f.d.b +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false) + // f.d.decodeValueFallback(rv) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeStringAsBytes() + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } +// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { +// // Note: builtin is no longer supported - so make this a no-op +// if _, ok := f.d.hh.(*BincHandle); ok { +// return timeTypId +// } +// return 0 +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.d.h.getExt(rtid) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { + return decInferLen(clen, maxlen, unit) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: no longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) } + diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen.generated.go b/vendor/github.com/hashicorp/go-msgpack/codec/gen.generated.go new file mode 100644 index 0000000000000..8b00090a8ff4c --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen.generated.go @@ -0,0 +1,164 @@ +// +build codecgen.exec + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl + +const genDecMapTmpl = ` +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} +` + +const genDecListTmpl = ` +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{else if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int + _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { + if {{var "l"}} > cap({{var "v"}}) { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + } else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for {{var "j"}} = 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { // bounds-check-elimination + {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} + } + {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} + {{var "h"}}.ElemContainerState({{var "j"}}) + {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}} + {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this + {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}} + var {{var "db"}} bool + if {{var "j"}} >= len({{var "v"}}) { + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) + {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} + } + if {{var "db"}} { + z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{end}} + } + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = make([]{{ .Typ }}, 0) + {{var "c"}} = true + } {{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} +` + +const genEncChanTmpl = ` +{{.Label}}: +switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { +case timeout{{.Sfx}} == 0: // only consume available + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) + default: + break {{.Label}} + } + } +case timeout{{.Sfx}} > 0: // consume until timeout + tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + case <-tt{{.Sfx}}.C: + // close(tt.C) + break {{.Label}} + } + } +default: // consume until close + for b{{.Sfx}} := range {{.Chan}} { + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + } +} +` diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/gen.go b/vendor/github.com/hashicorp/go-msgpack/codec/gen.go new file mode 100644 index 0000000000000..74c4aa86af73a --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/gen.go @@ -0,0 +1,2149 @@ +// +build codecgen.exec + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "go/format" + "io" + "io/ioutil" + "math/rand" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "text/template" + "time" + "unicode" + "unicode/utf8" +) + +// --------------------------------------------------- +// codecgen supports the full cycle of reflection-based codec: +// - RawExt +// - Raw +// - Extensions +// - (Binary|Text|JSON)(Unm|M)arshal +// - generic by-kind +// +// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. +// In those areas, we try to only do reflection or interface-conversion when NECESSARY: +// - Extensions, only if Extensions are configured. +// +// However, codecgen doesn't support the following: +// - Canonical option. (codecgen IGNORES it currently) +// This is just because it has not been implemented. +// - MissingFielder implementation. +// If a type implements MissingFielder, it is completely ignored by codecgen. +// +// During encode/decode, Selfer takes precedence. +// A type implementing Selfer will know how to encode/decode itself statically. +// +// The following field types are supported: +// array: [n]T +// slice: []T +// map: map[K]V +// primitive: [u]int[n], float(32|64), bool, string +// struct +// +// --------------------------------------------------- +// Note that a Selfer cannot call (e|d).(En|De)code on itself, +// as this will cause a circular reference, as (En|De)code will call Selfer methods. +// Any type that implements Selfer must implement completely and not fallback to (En|De)code. +// +// In addition, code in this file manages the generation of fast-path implementations of +// encode/decode of slices/maps of primitive keys/values. +// +// Users MUST re-generate their implementations whenever the code shape changes. +// The generated code will panic if it was generated with a version older than the supporting library. +// --------------------------------------------------- +// +// codec framework is very feature rich. +// When encoding or decoding into an interface, it depends on the runtime type of the interface. +// The type of the interface may be a named type, an extension, etc. +// Consequently, we fallback to runtime codec for encoding/decoding interfaces. +// In addition, we fallback for any value which cannot be guaranteed at runtime. +// This allows us support ANY value, including any named types, specifically those which +// do not implement our interfaces (e.g. Selfer). +// +// This explains some slowness compared to other code generation codecs (e.g. msgp). +// This reduction in speed is only seen when your refers to interfaces, +// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } +// +// codecgen will panic if the file was generated with an old version of the library in use. +// +// Note: +// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. +// This way, there isn't a function call overhead just to see that we should not enter a block of code. +// +// Note: +// codecgen-generated code depends on the variables defined by fast-path.generated.go. +// consequently, you cannot run with tags "codecgen notfastpath". + +// GenVersion is the current version of codecgen. +// +// NOTE: Increment this value each time codecgen changes fundamentally. +// Fundamental changes are: +// - helper methods change (signature change, new ones added, some removed, etc) +// - codecgen command line changes +// +// v1: Initial Version +// v2: +// v3: Changes for Kubernetes: +// changes in signature of some unpublished helper methods and codecgen cmdline arguments. +// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) +// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. +// v6: removed unsafe from gen, and now uses codecgen.exec tag +// v7: +// v8: current - we now maintain compatibility with old generated code. +// v9: skipped +// v10: modified encDriver and decDriver interfaces. Remove deprecated methods after Jan 1, 2019 +const genVersion = 10 + +const ( + genCodecPkg = "codec1978" + genTempVarPfx = "yy" + genTopLevelVarName = "x" + + // ignore canBeNil parameter, and always set to true. + // This is because nil can appear anywhere, so we should always check. + genAnythingCanBeNil = true + + // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; + // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals + // are not executed a lot. + // + // From testing, it didn't make much difference in runtime, so keep as true (one function only) + genUseOneFunctionForDecStructMap = true +) + +type genStructMapStyle uint8 + +const ( + genStructMapStyleConsolidated genStructMapStyle = iota + genStructMapStyleLenPrefix + genStructMapStyleCheckBreak +) + +var ( + errGenAllTypesSamePkg = errors.New("All types must be in the same package") + errGenExpectArrayOrMap = errors.New("unexpected type. Expecting array/map/slice") + + genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") + genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) +) + +type genBuf struct { + buf []byte +} + +func (x *genBuf) s(s string) *genBuf { x.buf = append(x.buf, s...); return x } +func (x *genBuf) b(s []byte) *genBuf { x.buf = append(x.buf, s...); return x } +func (x *genBuf) v() string { return string(x.buf) } +func (x *genBuf) f(s string, args ...interface{}) { x.s(fmt.Sprintf(s, args...)) } +func (x *genBuf) reset() { + if x.buf != nil { + x.buf = x.buf[:0] + } +} + +// genRunner holds some state used during a Gen run. +type genRunner struct { + w io.Writer // output + c uint64 // counter used for generating varsfx + t []reflect.Type // list of types to run selfer on + + tc reflect.Type // currently running selfer on this type + te map[uintptr]bool // types for which the encoder has been created + td map[uintptr]bool // types for which the decoder has been created + cp string // codec import path + + im map[string]reflect.Type // imports to add + imn map[string]string // package names of imports to add + imc uint64 // counter for import numbers + + is map[reflect.Type]struct{} // types seen during import search + bp string // base PkgPath, for which we are generating for + + cpfx string // codec package prefix + + tm map[reflect.Type]struct{} // types for which enc/dec must be generated + ts []reflect.Type // types for which enc/dec must be generated + + xs string // top level variable/constant suffix + hn string // fn helper type name + + ti *TypeInfos + // rr *rand.Rand // random generator for file-specific types + + nx bool // no extensions +} + +// Gen will write a complete go file containing Selfer implementations for each +// type passed. All the types must be in the same package. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINUOUSLY WITHOUT NOTICE. +func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool, + ti *TypeInfos, typ ...reflect.Type) { + // All types passed to this method do not have a codec.Selfer method implemented directly. + // codecgen already checks the AST and skips any types that define the codec.Selfer methods. + // Consequently, there's no need to check and trim them if they implement codec.Selfer + + if len(typ) == 0 { + return + } + x := genRunner{ + w: w, + t: typ, + te: make(map[uintptr]bool), + td: make(map[uintptr]bool), + im: make(map[string]reflect.Type), + imn: make(map[string]string), + is: make(map[reflect.Type]struct{}), + tm: make(map[reflect.Type]struct{}), + ts: []reflect.Type{}, + bp: genImportPath(typ[0]), + xs: uid, + ti: ti, + nx: noExtensions, + } + if x.ti == nil { + x.ti = defTypeInfos + } + if x.xs == "" { + rr := rand.New(rand.NewSource(time.Now().UnixNano())) + x.xs = strconv.FormatInt(rr.Int63n(9999), 10) + } + + // gather imports first: + x.cp = genImportPath(reflect.TypeOf(x)) + x.imn[x.cp] = genCodecPkg + for _, t := range typ { + // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + if genImportPath(t) != x.bp { + panic(errGenAllTypesSamePkg) + } + x.genRefPkgs(t) + } + if buildTags != "" { + x.line("// +build " + buildTags) + x.line("") + } + x.line(` + +// Code generated by codecgen - DO NOT EDIT. + +`) + x.line("package " + pkgName) + x.line("") + x.line("import (") + if x.cp != x.bp { + x.cpfx = genCodecPkg + "." + x.linef("%s \"%s\"", genCodecPkg, x.cp) + } + // use a sorted set of im keys, so that we can get consistent output + imKeys := make([]string, 0, len(x.im)) + for k := range x.im { + imKeys = append(imKeys, k) + } + sort.Strings(imKeys) + for _, k := range imKeys { // for k, _ := range x.im { + if k == x.imn[k] { + x.linef("\"%s\"", k) + } else { + x.linef("%s \"%s\"", x.imn[k], k) + } + } + // add required packages + for _, k := range [...]string{"runtime", "errors", "strconv"} { // "reflect", "fmt" + if _, ok := x.im[k]; !ok { + x.line("\"" + k + "\"") + } + } + x.line(")") + x.line("") + + x.line("const (") + x.linef("// ----- content types ----") + x.linef("codecSelferCcUTF8%s = %v", x.xs, int64(cUTF8)) + x.linef("codecSelferCcRAW%s = %v", x.xs, int64(cRAW)) + x.linef("// ----- value types used ----") + for _, vt := range [...]valueType{ + valueTypeArray, valueTypeMap, valueTypeString, + valueTypeInt, valueTypeUint, valueTypeFloat} { + x.linef("codecSelferValueType%s%s = %v", vt.String(), x.xs, int64(vt)) + } + + x.linef("codecSelferBitsize%s = uint8(32 << (^uint(0) >> 63))", x.xs) + x.line(")") + x.line("var (") + x.line("errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") + x.line(")") + x.line("") + + x.hn = "codecSelfer" + x.xs + x.line("type " + x.hn + " struct{}") + x.line("") + + x.varsfxreset() + x.line("func init() {") + x.linef("if %sGenVersion != %v {", x.cpfx, genVersion) + x.line("_, file, _, _ := runtime.Caller(0)") + x.outf(`panic("codecgen version mismatch: current: %v, need " + strconv.FormatInt(int64(%sGenVersion), 10) + ". Re-generate file: " + file)`, genVersion, x.cpfx) + // x.out(`panic(fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) + // x.linef(`%v, %sGenVersion, file))`, genVersion, x.cpfx) + x.linef("}") + x.line("if false { var _ byte = 0; // reference the types, but skip this branch at build/run time") + // x.line("_ = strconv.ParseInt") + var n int + // for k, t := range x.im { + for _, k := range imKeys { + t := x.im[k] + x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) + n++ + } + if n > 0 { + x.out("_") + for i := 1; i < n; i++ { + x.out(", _") + } + x.out(" = v0") + for i := 1; i < n; i++ { + x.outf(", v%v", i) + } + } + x.line("} ") // close if false + x.line("}") // close init + x.line("") + + // generate rest of type info + for _, t := range typ { + x.tc = t + x.selfer(true) + x.selfer(false) + } + + for _, t := range x.ts { + rtid := rt2id(t) + // generate enc functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(true) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.encListFallback("v", t) + case reflect.Map: + x.encMapFallback("v", t) + default: + panic(errGenExpectArrayOrMap) + } + x.line("}") + x.line("") + + // generate dec functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(false) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.decListFallback("v", rtid, t) + case reflect.Map: + x.decMapFallback("v", rtid, t) + default: + panic(errGenExpectArrayOrMap) + } + x.line("}") + x.line("") + } + + x.line("") +} + +func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { + // return varname != genTopLevelVarName && t != x.tc + // the only time we checkForSelfer is if we are not at the TOP of the generated code. + return varname != genTopLevelVarName +} + +func (x *genRunner) arr2str(t reflect.Type, s string) string { + if t.Kind() == reflect.Array { + return s + } + return "" +} + +func (x *genRunner) genRequiredMethodVars(encode bool) { + x.line("var h " + x.hn) + if encode { + x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") + } else { + x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") + } + x.line("_, _, _ = h, z, r") +} + +func (x *genRunner) genRefPkgs(t reflect.Type) { + if _, ok := x.is[t]; ok { + return + } + x.is[t] = struct{}{} + tpkg, tname := genImportPath(t), t.Name() + if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { + if _, ok := x.im[tpkg]; !ok { + x.im[tpkg] = t + if idx := strings.LastIndex(tpkg, "/"); idx < 0 { + x.imn[tpkg] = tpkg + } else { + x.imc++ + x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) + } + } + } + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: + x.genRefPkgs(t.Elem()) + case reflect.Map: + x.genRefPkgs(t.Elem()) + x.genRefPkgs(t.Key()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { + x.genRefPkgs(t.Field(i).Type) + } + } + } +} + +func (x *genRunner) varsfx() string { + x.c++ + return strconv.FormatUint(x.c, 10) +} + +func (x *genRunner) varsfxreset() { + x.c = 0 +} + +func (x *genRunner) out(s string) { + _, err := io.WriteString(x.w, s) + if err != nil { + panic(err) + } +} + +func (x *genRunner) outf(s string, params ...interface{}) { + _, err := fmt.Fprintf(x.w, s, params...) + if err != nil { + panic(err) + } +} + +func (x *genRunner) line(s string) { + x.out(s) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) linef(s string, params ...interface{}) { + x.outf(s, params...) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) genTypeName(t reflect.Type) (n string) { + // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() + + // if the type has a PkgPath, which doesn't match the current package, + // then include it. + // We cannot depend on t.String() because it includes current package, + // or t.PkgPath because it includes full import path, + // + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "*" + t = t.Elem() + } + if tn := t.Name(); tn != "" { + return ptrPfx + x.genTypeNamePrim(t) + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) + case reflect.Slice: + return ptrPfx + "[]" + x.genTypeName(t.Elem()) + case reflect.Array: + return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) + case reflect.Chan: + return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) + default: + if t == intfTyp { + return ptrPfx + "interface{}" + } else { + return ptrPfx + x.genTypeNamePrim(t) + } + } +} + +func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { + if t.Name() == "" { + return t.String() + } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { + return t.Name() + } else { + return x.imn[genImportPath(t)] + "." + t.Name() + // return t.String() // best way to get the package name inclusive + } +} + +func (x *genRunner) genZeroValueR(t reflect.Type) string { + // if t is a named type, w + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, + reflect.Slice, reflect.Map, reflect.Invalid: + return "nil" + case reflect.Bool: + return "false" + case reflect.String: + return `""` + case reflect.Struct, reflect.Array: + return x.genTypeName(t) + "{}" + default: // all numbers + return "0" + } +} + +func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { + return genMethodNameT(t, x.tc) +} + +func (x *genRunner) selfer(encode bool) { + t := x.tc + t0 := t + // always make decode use a pointer receiver, + // and structs/arrays always use a ptr receiver (encode|decode) + isptr := !encode || t.Kind() == reflect.Array || (t.Kind() == reflect.Struct && t != timeTyp) + x.varsfxreset() + + fnSigPfx := "func (" + genTopLevelVarName + " " + if isptr { + fnSigPfx += "*" + } + fnSigPfx += x.genTypeName(t) + x.out(fnSigPfx) + + if isptr { + t = reflect.PtrTo(t) + } + if encode { + x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") + x.genRequiredMethodVars(true) + x.encVar(genTopLevelVarName, t) + } else { + x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + // do not use decVar, as there is no need to check TryDecodeAsNil + // or way to elegantly handle that, and also setting it to a + // non-nil value doesn't affect the pointer passed. + // x.decVar(genTopLevelVarName, t, false) + x.dec(genTopLevelVarName, t0, true) + } + x.line("}") + x.line("") + + if encode || t0.Kind() != reflect.Struct { + return + } + + // write is containerMap + if genUseOneFunctionForDecStructMap { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated) + x.line("}") + x.line("") + } else { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix) + x.line("}") + x.line("") + + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak) + x.line("}") + x.line("") + } + + // write containerArray + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0) + x.line("}") + x.line("") + +} + +// used for chan, array, slice, map +func (x *genRunner) xtraSM(varname string, t reflect.Type, encode, isptr bool) { + var ptrPfx, addrPfx string + if isptr { + ptrPfx = "*" + } else { + addrPfx = "&" + } + if encode { + x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), ptrPfx, x.genTypeName(t), varname) + } else { + x.linef("h.dec%s((*%s)(%s%s), d)", x.genMethodNameT(t), x.genTypeName(t), addrPfx, varname) + } + x.registerXtraT(t) +} + +func (x *genRunner) registerXtraT(t reflect.Type) { + // recursively register the types + if _, ok := x.tm[t]; ok { + return + } + var tkey reflect.Type + switch t.Kind() { + case reflect.Chan, reflect.Slice, reflect.Array: + case reflect.Map: + tkey = t.Key() + default: + return + } + x.tm[t] = struct{}{} + x.ts = append(x.ts, t) + // check if this refers to any xtra types eg. a slice of array: add the array + x.registerXtraT(t.Elem()) + if tkey != nil { + x.registerXtraT(tkey) + } +} + +// encVar will encode a variable. +// The parameter, t, is the reflect.Type of the variable itself +func (x *genRunner) encVar(varname string, t reflect.Type) { + // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) + var checkNil bool + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: + checkNil = true + } + if checkNil { + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + } + + switch t.Kind() { + case reflect.Ptr: + telem := t.Elem() + tek := telem.Kind() + if tek == reflect.Array || (tek == reflect.Struct && telem != timeTyp) { + x.enc(varname, genNonPtr(t)) + break + } + i := x.varsfx() + x.line(genTempVarPfx + i + " := *" + varname) + x.enc(genTempVarPfx+i, genNonPtr(t)) + case reflect.Struct, reflect.Array: + if t == timeTyp { + x.enc(varname, t) + break + } + i := x.varsfx() + x.line(genTempVarPfx + i + " := &" + varname) + x.enc(genTempVarPfx+i, t) + default: + x.enc(varname, t) + } + + if checkNil { + x.line("}") + } + +} + +// enc will encode a variable (varname) of type t, where t represents T. +// if t is !time.Time and t is of kind reflect.Struct or reflect.Array, varname is of type *T +// (to prevent copying), +// else t is of type T +func (x *genRunner) enc(varname string, t reflect.Type) { + rtid := rt2id(t) + ti2 := x.ti.get(rtid, t) + // We call CodecEncodeSelf if one of the following are honored: + // - the type already implements Selfer, call that + // - the type has a Selfer implementation just created, use that + // - the type is in the list of the ones we will generate for, but it is not currently being generated + + mi := x.varsfx() + // tptr := reflect.PtrTo(t) + tk := t.Kind() + if x.checkForSelfer(t, varname) { + if tk == reflect.Array || (tk == reflect.Struct && rtid != timeTypId) { // varname is of type *T + // if tptr.Implements(selferTyp) || t.Implements(selferTyp) { + if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } else { // varname is of type T + if ti2.cs { // t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } else if ti2.csp { // tptr.Implements(selferTyp) { + x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) + x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) + return + } + } + + if _, ok := x.te[rtid]; ok { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.te[rtid] = true + rtidAdded = true + } + + // check if + // - type is time.Time, RawExt, Raw + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == timeTyp { + x.linef("} else if !z.EncBasicHandle().TimeNotBuiltin { r.EncodeTime(%s)", varname) + // return + } + if t == rawTyp { + x.linef("} else { z.EncRaw(%s)", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.EncodeRawExt(%s, e)", varname) + return + } + // only check for extensions if the type is named, and has a packagePath. + var arrayOrStruct = tk == reflect.Array || tk == reflect.Struct // meaning varname if of type *T + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) + x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.EncExtension(%s, %s) ", yy, varname, yy, varname, yy) + } + if arrayOrStruct { // varname is of type *T + if ti2.bm || ti2.bmp { // t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname) + } + if ti2.jm || ti2.jmp { // t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname) + } else if ti2.tm || ti2.tmp { // t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname) + } + } else { // varname is of type T + if ti2.bm { // t.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname) + } else if ti2.bmp { // tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(&%v) ", varname) + } + if ti2.jm { // t.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname) + } else if ti2.jmp { // tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", varname) + } else if ti2.tm { // t.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname) + } else if ti2.tmp { // tptr.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(&%v) ", varname) + } + } + x.line("} else {") + + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(int64(" + varname + "))") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(uint64(" + varname + "))") + case reflect.Float32: + x.line("r.EncodeFloat32(float32(" + varname + "))") + case reflect.Float64: + x.line("r.EncodeFloat64(float64(" + varname + "))") + case reflect.Bool: + x.line("r.EncodeBool(bool(" + varname + "))") + case reflect.String: + x.linef("if z.EncBasicHandle().StringToRaw { r.EncodeStringBytesRaw(z.BytesView(string(%s))) } else { r.EncodeStringEnc(codecSelferCcUTF8%s, string(%s)) }", varname, x.xs, varname) + case reflect.Chan: + x.xtraSM(varname, t, true, false) + // x.encListFallback(varname, rtid, t) + case reflect.Array: + x.xtraSM(varname, t, true, true) + case reflect.Slice: + // if nil, call dedicated function + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("r.EncodeStringBytesRaw([]byte(" + varname + "))") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, t, true, false) + // x.encListFallback(varname, rtid, t) + } + case reflect.Map: + // if nil, call dedicated function + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, t, true, false) + // x.encMapFallback(varname, rtid, t) + } + case reflect.Struct: + if !inlist { + delete(x.te, rtid) + x.line("z.EncFallback(" + varname + ")") + break + } + x.encStruct(varname, rtid, t) + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.EncFallback(" + varname + ")") + } +} + +func (x *genRunner) encZero(t reflect.Type) { + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(0)") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(0)") + case reflect.Float32: + x.line("r.EncodeFloat32(0)") + case reflect.Float64: + x.line("r.EncodeFloat64(0)") + case reflect.Bool: + x.line("r.EncodeBool(false)") + case reflect.String: + x.linef(`if z.EncBasicHandle().StringToRaw { r.EncodeStringBytesRaw([]byte{}) } else { r.EncodeStringEnc(codecSelferCcUTF8%s, "") }`, x.xs) + default: + x.line("r.EncodeNil()") + } +} + +func (x *genRunner) encOmitEmptyLine(t2 reflect.StructField, varname string, buf *genBuf) { + // smartly check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. + // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) + varname2 := varname + "." + t2.Name + switch t2.Type.Kind() { + case reflect.Struct: + rtid2 := rt2id(t2.Type) + ti2 := x.ti.get(rtid2, t2.Type) + // fmt.Printf(">>>> structfield: omitempty: type: %s, field: %s\n", t2.Type.Name(), t2.Name) + if ti2.rtid == timeTypId { + buf.s("!(").s(varname2).s(".IsZero())") + break + } + if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) { + buf.s("!(").s(varname2).s(".IsZero())") + break + } + if ti2.isFlag(typeInfoFlagComparable) { + buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) + break + } + // buf.s("(") + buf.s("false") + for i, n := 0, t2.Type.NumField(); i < n; i++ { + f := t2.Type.Field(i) + if f.PkgPath != "" { // unexported + continue + } + buf.s(" || ") + x.encOmitEmptyLine(f, varname2, buf) + } + //buf.s(")") + case reflect.Bool: + buf.s(varname2) + case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: + buf.s("len(").s(varname2).s(") != 0") + default: + buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) + } +} + +func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { + // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) + // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it + + // if t === type currently running selfer on, do for all + ti := x.ti.get(rtid, t) + i := x.varsfx() + sepVarname := genTempVarPfx + "sep" + i + numfieldsvar := genTempVarPfx + "q" + i + ti2arrayvar := genTempVarPfx + "r" + i + struct2arrvar := genTempVarPfx + "2arr" + i + + x.line(sepVarname + " := !z.EncBinary()") + x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) + x.linef("_, _ = %s, %s", sepVarname, struct2arrvar) + x.linef("const %s bool = %v // struct tag has 'toArray'", ti2arrayvar, ti.toArray) + + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + + // var nn int + // due to omitEmpty, we need to calculate the + // number of non-empty things we write out first. + // This is required as we need to pre-determine the size of the container, + // to support length-prefixing. + if ti.anyOmitEmpty { + x.linef("var %s = [%v]bool{ // should field at this index be written?", numfieldsvar, len(tisfi)) + + for j, si := range tisfi { + _ = j + if !si.omitEmpty() { + // x.linef("%s[%v] = true // %s", numfieldsvar, j, si.fieldName) + x.linef("true, // %s", si.fieldName) + // nn++ + continue + } + var t2 reflect.StructField + var omitline genBuf + { + t2typ := t + varname3 := varname + // go through the loop, record the t2 field explicitly, + // and gather the omit line if embedded in pointers. + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + // do not include actual field in the omit line. + // that is done subsequently (right after - below). + if uint8(ij+1) < si.nis && t2typ.Kind() == reflect.Ptr { + omitline.s(varname3).s(" != nil && ") + } + } + } + x.encOmitEmptyLine(t2, varname, &omitline) + x.linef("%s, // %s", omitline.v(), si.fieldName) + } + x.line("}") + x.linef("_ = %s", numfieldsvar) + } + // x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.linef("r.WriteArrayStart(%d)", len(tisfi)) + x.linef("} else {") // if not ti.toArray + if ti.anyOmitEmpty { + // nn = 0 + // x.linef("var %snn%s = %v", genTempVarPfx, i, nn) + x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) + x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i) + x.linef("%snn%s = %v", genTempVarPfx, i, 0) + } else { + x.linef("r.WriteMapStart(%d)", len(tisfi)) + } + x.line("}") // close if not StructToArray + + for j, si := range tisfi { + i := x.varsfx() + isNilVarName := genTempVarPfx + "n" + i + var labelUsed bool + var t2 reflect.StructField + { + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + if !labelUsed { + x.line("var " + isNilVarName + " bool") + } + x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") + x.line("goto LABEL" + i) + x.line("}") + labelUsed = true + // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") + } + } + // t2 = t.FieldByIndex(si.is) + } + if labelUsed { + x.line("LABEL" + i + ":") + } + // if the type of the field is a Selfer, or one of the ones + + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray + if labelUsed { + x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName) + } + x.line("r.WriteArrayElem()") + if si.omitEmpty() { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.encVar(varname+"."+t2.Name, t2.Type) + if si.omitEmpty() { + x.linef("} else {") + x.encZero(t2.Type) + x.linef("}") + } + if labelUsed { + x.line("}") + } + + x.linef("} else {") // if not ti.toArray + + if si.omitEmpty() { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.line("r.WriteMapElemKey()") + + // emulate EncStructFieldKey + switch ti.keyType { + case valueTypeInt: + x.linef("r.EncodeInt(z.M.Int(strconv.ParseInt(`%s`, 10, 64)))", si.encName) + case valueTypeUint: + x.linef("r.EncodeUint(z.M.Uint(strconv.ParseUint(`%s`, 10, 64)))", si.encName) + case valueTypeFloat: + x.linef("r.EncodeFloat64(z.M.Float(strconv.ParseFloat(`%s`, 64)))", si.encName) + default: // string + if si.encNameAsciiAlphaNum { + x.linef(`if z.IsJSONHandle() { z.WriteStr("\"%s\"") } else { `, si.encName) + } + x.linef("r.EncodeStringEnc(codecSelferCcUTF8%s, `%s`)", x.xs, si.encName) + if si.encNameAsciiAlphaNum { + x.linef("}") + } + } + // x.linef("r.EncStructFieldKey(codecSelferValueType%s%s, `%s`)", ti.keyType.String(), x.xs, si.encName) + x.line("r.WriteMapElemValue()") + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + x.encVar(varname+"."+t2.Name, t2.Type) + x.line("}") + } else { + x.encVar(varname+"."+t2.Name, t2.Type) + } + if si.omitEmpty() { + x.line("}") + } + x.linef("} ") // end if/else ti.toArray + } + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.line("r.WriteArrayEnd()") + x.line("} else {") + x.line("r.WriteMapEnd()") + x.line("}") + +} + +func (x *genRunner) encListFallback(varname string, t reflect.Type) { + elemBytes := t.Elem().Kind() == reflect.Uint8 + if t.AssignableTo(uint8SliceTyp) { + x.linef("r.EncodeStringBytesRaw([]byte(%s))", varname) + return + } + if t.Kind() == reflect.Array && elemBytes { + x.linef("r.EncodeStringBytesRaw(((*[%d]byte)(%s))[:])", t.Len(), varname) + return + } + i := x.varsfx() + if t.Kind() == reflect.Chan { + type ts struct { + Label, Chan, Slice, Sfx string + } + tm, err := template.New("").Parse(genEncChanTmpl) + if err != nil { + panic(err) + } + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + x.linef("var sch%s []%s", i, x.genTypeName(t.Elem())) + err = tm.Execute(x.w, &ts{"Lsch" + i, varname, "sch" + i, i}) + if err != nil { + panic(err) + } + // x.linef("%s = sch%s", varname, i) + if elemBytes { + x.linef("r.EncodeStringBytesRaw([]byte(%s))", "sch"+i) + x.line("}") + return + } + varname = "sch" + i + } + + x.line("r.WriteArrayStart(len(" + varname + "))") + x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) + x.line("r.WriteArrayElem()") + + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteArrayEnd()") + if t.Kind() == reflect.Chan { + x.line("}") + } +} + +func (x *genRunner) encMapFallback(varname string, t reflect.Type) { + // TODO: expand this to handle canonical. + i := x.varsfx() + x.line("r.WriteMapStart(len(" + varname + "))") + x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + x.line("r.WriteMapElemKey()") + x.encVar(genTempVarPfx+"k"+i, t.Key()) + x.line("r.WriteMapElemValue()") + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteMapEnd()") +} + +func (x *genRunner) decVarInitPtr(varname, nilvar string, t reflect.Type, si *structFieldInfo, + newbuf, nilbuf *genBuf) (t2 reflect.StructField) { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + t2kind := t2typ.Kind() + var nilbufed bool + if si != nil { + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + t2kind = t2typ.Kind() + if t2kind != reflect.Ptr { + continue + } + if newbuf != nil { + newbuf.f("if %s == nil { %s = new(%s) }\n", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + if nilbuf != nil { + if !nilbufed { + nilbuf.s("if true") + nilbufed = true + } + nilbuf.s(" && ").s(varname3).s(" != nil") + } + } + } + // if t2typ.Kind() == reflect.Ptr { + // varname3 = varname3 + t2.Name + // } + if nilbuf != nil { + if nilbufed { + nilbuf.s(" { ") + } + if nilvar != "" { + nilbuf.s(nilvar).s(" = true") + } else if tk := t2typ.Kind(); tk == reflect.Ptr { + if strings.IndexByte(varname3, '.') != -1 || strings.IndexByte(varname3, '[') != -1 { + nilbuf.s(varname3).s(" = nil") + } else { + nilbuf.s("*").s(varname3).s(" = ").s(x.genZeroValueR(t2typ.Elem())) + } + } else { + nilbuf.s(varname3).s(" = ").s(x.genZeroValueR(t2typ)) + } + if nilbufed { + nilbuf.s("}") + } + } + return t2 +} + +// decVar takes a variable called varname, of type t +func (x *genRunner) decVarMain(varname, rand string, t reflect.Type, checkNotNil bool) { + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + var varname2 string + if t.Kind() != reflect.Ptr { + if t.PkgPath() != "" || !x.decTryAssignPrimitive(varname, t, false) { + x.dec(varname, t, false) + } + } else { + if checkNotNil { + x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) + } + // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). + // There's a chance of a **T in here which is nil. + var ptrPfx string + for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { + ptrPfx += "*" + if checkNotNil { + x.linef("if %s%s == nil { %s%s = new(%s)}", + ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) + } + } + // Should we create temp var if a slice/map indexing? No. dec(...) can now handle it. + + if ptrPfx == "" { + x.dec(varname, t, true) + } else { + varname2 = genTempVarPfx + "z" + rand + x.line(varname2 + " := " + ptrPfx + varname) + x.dec(varname2, t, true) + } + } +} + +// decVar takes a variable called varname, of type t +func (x *genRunner) decVar(varname, nilvar string, t reflect.Type, canBeNil, checkNotNil bool) { + i := x.varsfx() + + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + + if !canBeNil { + canBeNil = genAnythingCanBeNil || !genIsImmutable(t) + } + + if canBeNil { + var buf genBuf + x.decVarInitPtr(varname, nilvar, t, nil, nil, &buf) + x.linef("if r.TryDecodeAsNil() { %s } else {", buf.buf) + } else { + x.line("// cannot be nil") + } + + x.decVarMain(varname, i, t, checkNotNil) + + if canBeNil { + x.line("} ") + } +} + +// dec will decode a variable (varname) of type t or ptrTo(t) if isptr==true. +// t is always a basetype (i.e. not of kind reflect.Ptr). +func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) { + // assumptions: + // - the varname is to a pointer already. No need to take address of it + // - t is always a baseType T (not a *T, etc). + rtid := rt2id(t) + ti2 := x.ti.get(rtid, t) + // tptr := reflect.PtrTo(t) + if x.checkForSelfer(t, varname) { + if ti2.cs || ti2.csp { // t.Implements(selferTyp) || tptr.Implements(selferTyp) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + if _, ok := x.td[rtid]; ok { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.td[rtid] = true + rtidAdded = true + } + + // check if + // - type is time.Time, Raw, RawExt + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + + mi := x.varsfx() + // x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) + // x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + var ptrPfx, addrPfx string + if isptr { + ptrPfx = "*" + } else { + addrPfx = "&" + } + if t == timeTyp { + x.linef("} else if !z.DecBasicHandle().TimeNotBuiltin { %s%v = r.DecodeTime()", ptrPfx, varname) + // return + } + if t == rawTyp { + x.linef("} else { %s%v = z.DecRaw()", ptrPfx, varname) + return + } + + if t == rawExtTyp { + x.linef("} else { r.DecodeExt(%s%v, 0, nil)", addrPfx, varname) + return + } + + // only check for extensions if the type is named, and has a packagePath. + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + // x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) + yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) + x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.DecExtension(%s, %s) ", yy, varname, yy, varname, yy) + } + + if ti2.bu || ti2.bup { // t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { + x.linef("} else if z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", addrPfx, varname) + } + if ti2.ju || ti2.jup { // t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { + x.linef("} else if !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", addrPfx, varname) + } else if ti2.tu || ti2.tup { // t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { + x.linef("} else if !z.DecBinary() { z.DecTextUnmarshal(%s%v)", addrPfx, varname) + } + + x.line("} else {") + + if x.decTryAssignPrimitive(varname, t, isptr) { + return + } + + switch t.Kind() { + case reflect.Array, reflect.Chan: + x.xtraSM(varname, t, false, isptr) + case reflect.Slice: + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.linef("%s%s = r.DecodeBytes(%s(%s[]byte)(%s), false)", + ptrPfx, varname, ptrPfx, ptrPfx, varname) + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) + } else { + x.xtraSM(varname, t, false, isptr) + // x.decListFallback(varname, rtid, false, t) + } + case reflect.Map: + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) + } else { + x.xtraSM(varname, t, false, isptr) + // x.decMapFallback(varname, rtid, t) + } + case reflect.Struct: + if inlist { + // no need to create temp variable if isptr, or x.F or x[F] + if isptr || strings.IndexByte(varname, '.') != -1 || strings.IndexByte(varname, '[') != -1 { + x.decStruct(varname, rtid, t) + } else { + varname2 := genTempVarPfx + "j" + mi + x.line(varname2 + " := &" + varname) + x.decStruct(varname2, rtid, t) + } + } else { + // delete(x.td, rtid) + x.line("z.DecFallback(" + addrPfx + varname + ", false)") + } + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.DecFallback(" + addrPfx + varname + ", true)") + } +} + +func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type, isptr bool) (done bool) { + // This should only be used for exact primitives (ie un-named types). + // Named types may be implementations of Selfer, Unmarshaler, etc. + // They should be handled by dec(...) + + var ptr string + if isptr { + ptr = "*" + } + switch t.Kind() { + case reflect.Int: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + case reflect.Int8: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 8))", ptr, varname, x.genTypeName(t)) + case reflect.Int16: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 16))", ptr, varname, x.genTypeName(t)) + case reflect.Int32: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 32))", ptr, varname, x.genTypeName(t)) + case reflect.Int64: + x.linef("%s%s = (%s)(r.DecodeInt64())", ptr, varname, x.genTypeName(t)) + + case reflect.Uint: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + case reflect.Uint8: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 8))", ptr, varname, x.genTypeName(t)) + case reflect.Uint16: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 16))", ptr, varname, x.genTypeName(t)) + case reflect.Uint32: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 32))", ptr, varname, x.genTypeName(t)) + case reflect.Uint64: + x.linef("%s%s = (%s)(r.DecodeUint64())", ptr, varname, x.genTypeName(t)) + case reflect.Uintptr: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + + case reflect.Float32: + x.linef("%s%s = (%s)(r.DecodeFloat32As64())", ptr, varname, x.genTypeName(t)) + case reflect.Float64: + x.linef("%s%s = (%s)(r.DecodeFloat64())", ptr, varname, x.genTypeName(t)) + + case reflect.Bool: + x.linef("%s%s = (%s)(r.DecodeBool())", ptr, varname, x.genTypeName(t)) + case reflect.String: + x.linef("%s%s = (%s)(r.DecodeString())", ptr, varname, x.genTypeName(t)) + default: + return false + } + return true +} + +func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)") + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.DecodeBytes( ((*[%d]byte)(%s))[:], true)", t.Len(), varname) + return + } + type tstruc struct { + TempVar string + Rand string + Varname string + CTyp string + Typ string + Immutable bool + Size int + } + telem := t.Elem() + ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} + + funcs := make(template.FuncMap) + + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, "", telem, false, true) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + funcs["zero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["isArray"] = func() bool { + return t.Kind() == reflect.Array + } + funcs["isSlice"] = func() bool { + return t.Kind() == reflect.Slice + } + funcs["isChan"] = func() bool { + return t.Kind() == reflect.Chan + } + tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { + type tstruc struct { + TempVar string + Sfx string + Rand string + Varname string + KTyp string + Typ string + Size int + } + telem := t.Elem() + tkey := t.Key() + ts := tstruc{ + genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), + x.genTypeName(telem), int(telem.Size() + tkey.Size()), + } + + funcs := make(template.FuncMap) + funcs["decElemZero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["decElemKindImmutable"] = func() bool { + return genIsImmutable(telem) + } + funcs["decElemKindPtr"] = func() bool { + return telem.Kind() == reflect.Ptr + } + funcs["decElemKindIntf"] = func() bool { + return telem.Kind() == reflect.Interface + } + funcs["decLineVarK"] = func(varname string) string { + x.decVar(varname, "", tkey, false, true) + return "" + } + funcs["decLineVar"] = func(varname, decodedNilVarname string) string { + x.decVar(varname, decodedNilVarname, telem, false, true) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + + tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { + ti := x.ti.get(rtid, t) + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + x.line("switch (" + kName + ") {") + var newbuf, nilbuf genBuf + for _, si := range tisfi { + x.line("case \"" + si.encName + "\":") + newbuf.reset() + nilbuf.reset() + t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) + x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf) + x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false) + x.line("}") + } + x.line("default:") + // pass the slice here, so that the string will not escape, and maybe save allocation + x.line("z.DecStructFieldNotFound(-1, " + kName + ")") + x.line("} // end switch " + kName) +} + +func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { + tpfx := genTempVarPfx + ti := x.ti.get(rtid, t) + i := x.varsfx() + kName := tpfx + "s" + i + + switch style { + case genStructMapStyleLenPrefix: + x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) + case genStructMapStyleCheckBreak: + x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) + default: // 0, otherwise. + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) + x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) + x.line("} else { if r.CheckBreak() { break }; }") + } + x.line("r.ReadMapElemKey()") + + // emulate decstructfieldkey + switch ti.keyType { + case valueTypeInt: + x.linef("%s := z.StringView(strconv.AppendInt(z.DecScratchArrayBuffer()[:0], r.DecodeInt64(), 10))", kName) + case valueTypeUint: + x.linef("%s := z.StringView(strconv.AppendUint(z.DecScratchArrayBuffer()[:0], r.DecodeUint64(), 10))", kName) + case valueTypeFloat: + x.linef("%s := z.StringView(strconv.AppendFloat(z.DecScratchArrayBuffer()[:0], r.DecodeFloat64(), 'f', -1, 64))", kName) + default: // string + x.linef("%s := z.StringView(r.DecodeStringAsBytes())", kName) + } + // x.linef("%s := z.StringView(r.DecStructFieldKey(codecSelferValueType%s%s, z.DecScratchArrayBuffer()))", kName, ti.keyType.String(), x.xs) + + x.line("r.ReadMapElemValue()") + x.decStructMapSwitch(kName, varname, rtid, t) + + x.line("} // end for " + tpfx + "j" + i) + x.line("r.ReadMapEnd()") +} + +func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { + tpfx := genTempVarPfx + i := x.varsfx() + ti := x.ti.get(rtid, t) + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + x.linef("var %sj%s int", tpfx, i) + x.linef("var %sb%s bool", tpfx, i) // break + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + var newbuf, nilbuf genBuf + for _, si := range tisfi { + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString) + x.line("r.ReadArrayElem()") + newbuf.reset() + nilbuf.reset() + t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) + x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf) + x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false) + x.line("}") + } + // read remaining values and throw away. + x.line("for {") + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { break }", tpfx, i) + x.line("r.ReadArrayElem()") + x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) + x.line("}") + x.line("r.ReadArrayEnd()") +} + +func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { + // varname MUST be a ptr, or a struct field or a slice element. + i := x.varsfx() + x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) + x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadMapEnd()") + if genUseOneFunctionForDecStructMap { + x.line("} else { ") + x.linef("%s.codecDecodeSelfFromMap(%sl%s, d)", varname, genTempVarPfx, i) + } else { + x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") + x.line(varname + ".codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") + x.line("} else {") + x.line(varname + ".codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") + } + x.line("}") + + // else if container is array + x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadArrayEnd()") + x.line("} else { ") + x.linef("%s.codecDecodeSelfFromArray(%sl%s, d)", varname, genTempVarPfx, i) + x.line("}") + // else panic + x.line("} else { ") + x.line("panic(errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + ")") + x.line("} ") +} + +// -------- + +type genV struct { + // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice + MapKey string + Elem string + Primitive string + Size int +} + +func (x *genRunner) newGenV(t reflect.Type) (v genV) { + switch t.Kind() { + case reflect.Slice, reflect.Array: + te := t.Elem() + v.Elem = x.genTypeName(te) + v.Size = int(te.Size()) + case reflect.Map: + te, tk := t.Elem(), t.Key() + v.Elem = x.genTypeName(te) + v.MapKey = x.genTypeName(tk) + v.Size = int(te.Size() + tk.Size()) + default: + panic("unexpected type for newGenV. Requires map or slice type") + } + return +} + +func (x *genV) MethodNamePfx(prefix string, prim bool) string { + var name []byte + if prefix != "" { + name = append(name, prefix...) + } + if prim { + name = append(name, genTitleCaseName(x.Primitive)...) + } else { + if x.MapKey == "" { + name = append(name, "Slice"...) + } else { + name = append(name, "Map"...) + name = append(name, genTitleCaseName(x.MapKey)...) + } + name = append(name, genTitleCaseName(x.Elem)...) + } + return string(name) + +} + +// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. +// +// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, +// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. +// We strip it here. +func genImportPath(t reflect.Type) (s string) { + s = t.PkgPath() + if genCheckVendor { + // HACK: always handle vendoring. It should be typically on in go 1.6, 1.7 + s = genStripVendor(s) + } + return +} + +// A go identifier is (letter|_)[letter|number|_]* +func genGoIdentifier(s string, checkFirstChar bool) string { + b := make([]byte, 0, len(s)) + t := make([]byte, 4) + var n int + for i, r := range s { + if checkFirstChar && i == 0 && !unicode.IsLetter(r) { + b = append(b, '_') + } + // r must be unicode_letter, unicode_digit or _ + if unicode.IsLetter(r) || unicode.IsDigit(r) { + n = utf8.EncodeRune(t, r) + b = append(b, t[:n]...) + } else { + b = append(b, '_') + } + } + return string(b) +} + +func genNonPtr(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func genTitleCaseName(s string) string { + switch s { + case "interface{}", "interface {}": + return "Intf" + default: + return strings.ToUpper(s[0:1]) + s[1:] + } +} + +func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "Ptrto" + t = t.Elem() + } + tstr := t.String() + if tn := t.Name(); tn != "" { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + return ptrPfx + tn + } else { + if genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) + case reflect.Slice: + return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) + case reflect.Array: + return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) + case reflect.Chan: + var cx string + switch t.ChanDir() { + case reflect.SendDir: + cx = "ChanSend" + case reflect.RecvDir: + cx = "ChanRecv" + default: + cx = "Chan" + } + return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) + default: + if t == intfTyp { + return ptrPfx + "Interface" + } else { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + if t.Name() != "" { + return ptrPfx + t.Name() + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } else { + // best way to get the package name inclusive + // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) + if t.Name() != "" && genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + } +} + +// genCustomNameForType base64encodes the t.String() value in such a way +// that it can be used within a function name. +func genCustomTypeName(tstr string) string { + len2 := genBase64enc.EncodedLen(len(tstr)) + bufx := make([]byte, len2) + genBase64enc.Encode(bufx, []byte(tstr)) + for i := len2 - 1; i >= 0; i-- { + if bufx[i] == '=' { + len2-- + } else { + break + } + } + return string(bufx[:len2]) +} + +func genIsImmutable(t reflect.Type) (v bool) { + return isImmutableKind(t.Kind()) +} + +type genInternal struct { + Version int + Values []genV +} + +func (x genInternal) FastpathLen() (l int) { + for _, v := range x.Values { + if v.Primitive == "" && !(v.MapKey == "" && v.Elem == "uint8") { + l++ + } + } + return +} + +func genInternalZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + return "nil" + case "bool": + return "false" + case "string": + return `""` + default: + return "0" + } +} + +var genInternalNonZeroValueIdx [5]uint64 +var genInternalNonZeroValueStrs = [2][5]string{ + {`"string-is-an-interface"`, "true", `"some-string"`, "11.1", "33"}, + {`"string-is-an-interface-2"`, "true", `"some-string-2"`, "22.2", "44"}, +} + +func genInternalNonZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + genInternalNonZeroValueIdx[0]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[0]%2][0] // return string, to remove ambiguity + case "bool": + genInternalNonZeroValueIdx[1]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[1]%2][1] + case "string": + genInternalNonZeroValueIdx[2]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[2]%2][2] + case "float32", "float64", "float", "double": + genInternalNonZeroValueIdx[3]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[3]%2][3] + default: + genInternalNonZeroValueIdx[4]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[4]%2][4] + } +} + +func genInternalEncCommandAsString(s string, vname string) string { + switch s { + case "uint", "uint8", "uint16", "uint32", "uint64": + return "ee.EncodeUint(uint64(" + vname + "))" + case "int", "int8", "int16", "int32", "int64": + return "ee.EncodeInt(int64(" + vname + "))" + case "string": + return "if e.h.StringToRaw { ee.EncodeStringBytesRaw(bytesView(" + vname + ")) " + + "} else { ee.EncodeStringEnc(cUTF8, " + vname + ") }" + case "float32": + return "ee.EncodeFloat32(" + vname + ")" + case "float64": + return "ee.EncodeFloat64(" + vname + ")" + case "bool": + return "ee.EncodeBool(" + vname + ")" + // case "symbol": + // return "ee.EncodeSymbol(" + vname + ")" + default: + return "e.encode(" + vname + ")" + } +} + +func genInternalDecCommandAsString(s string) string { + switch s { + case "uint": + return "uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))" + case "uint8": + return "uint8(chkOvf.UintV(dd.DecodeUint64(), 8))" + case "uint16": + return "uint16(chkOvf.UintV(dd.DecodeUint64(), 16))" + case "uint32": + return "uint32(chkOvf.UintV(dd.DecodeUint64(), 32))" + case "uint64": + return "dd.DecodeUint64()" + case "uintptr": + return "uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))" + case "int": + return "int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))" + case "int8": + return "int8(chkOvf.IntV(dd.DecodeInt64(), 8))" + case "int16": + return "int16(chkOvf.IntV(dd.DecodeInt64(), 16))" + case "int32": + return "int32(chkOvf.IntV(dd.DecodeInt64(), 32))" + case "int64": + return "dd.DecodeInt64()" + + case "string": + return "dd.DecodeString()" + case "float32": + return "float32(chkOvf.Float32V(dd.DecodeFloat64()))" + case "float64": + return "dd.DecodeFloat64()" + case "bool": + return "dd.DecodeBool()" + default: + panic(errors.New("gen internal: unknown type for decode: " + s)) + } +} + +func genInternalSortType(s string, elem bool) string { + for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { + if strings.HasPrefix(s, v) { + if elem { + if v == "int" || v == "uint" || v == "float" { + return v + "64" + } else { + return v + } + } + return v + "Slice" + } + } + panic("sorttype: unexpected type: " + s) +} + +func genStripVendor(s string) string { + // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. + // if s contains /vendor/ OR startsWith vendor/, then return everything after it. + const vendorStart = "vendor/" + const vendorInline = "/vendor/" + if i := strings.LastIndex(s, vendorInline); i >= 0 { + s = s[i+len(vendorInline):] + } else if strings.HasPrefix(s, vendorStart) { + s = s[len(vendorStart):] + } + return s +} + +// var genInternalMu sync.Mutex +var genInternalV = genInternal{Version: genVersion} +var genInternalTmplFuncs template.FuncMap +var genInternalOnce sync.Once + +func genInternalInit() { + types := [...]string{ + "interface{}", + "string", + "float32", + "float64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "bool", + } + // keep as slice, so it is in specific iteration order. + // Initial order was uint64, string, interface{}, int, int64 + mapvaltypes := [...]string{ + "interface{}", + "string", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + "bool", + } + wordSizeBytes := int(intBitsize) / 8 + + mapvaltypes2 := map[string]int{ + "interface{}": 2 * wordSizeBytes, + "string": 2 * wordSizeBytes, + "uint": 1 * wordSizeBytes, + "uint8": 1, + "uint16": 2, + "uint32": 4, + "uint64": 8, + "uintptr": 1 * wordSizeBytes, + "int": 1 * wordSizeBytes, + "int8": 1, + "int16": 2, + "int32": 4, + "int64": 8, + "float32": 4, + "float64": 8, + "bool": 1, + } + var gt = genInternal{Version: genVersion} + + // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function + for _, s := range types { + gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) + // if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. + // gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + // } + gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + if _, ok := mapvaltypes2[s]; !ok { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) + } + for _, ms := range mapvaltypes { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) + } + } + + funcs := make(template.FuncMap) + // funcs["haspfx"] = strings.HasPrefix + funcs["encmd"] = genInternalEncCommandAsString + funcs["decmd"] = genInternalDecCommandAsString + funcs["zerocmd"] = genInternalZeroValue + funcs["nonzerocmd"] = genInternalNonZeroValue + funcs["hasprefix"] = strings.HasPrefix + funcs["sorttype"] = genInternalSortType + + genInternalV = gt + genInternalTmplFuncs = funcs +} + +// genInternalGoFile is used to generate source files from templates. +// It is run by the program author alone. +// Unfortunately, it has to be exported so that it can be called from a command line tool. +// *** DO NOT USE *** +func genInternalGoFile(r io.Reader, w io.Writer) (err error) { + genInternalOnce.Do(genInternalInit) + + gt := genInternalV + + t := template.New("").Funcs(genInternalTmplFuncs) + + tmplstr, err := ioutil.ReadAll(r) + if err != nil { + return + } + + if t, err = t.Parse(string(tmplstr)); err != nil { + return + } + + var out bytes.Buffer + err = t.Execute(&out, gt) + if err != nil { + return + } + + bout, err := format.Source(out.Bytes()) + if err != nil { + w.Write(out.Bytes()) // write out if error, so we can still see. + // w.Write(bout) // write out if error, as much as possible, so we can still see. + return + } + w.Write(bout) + return +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_gte_go15.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_gte_go15.go new file mode 100644 index 0000000000000..9ddbe2059336f --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_gte_go15.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = true + +func reflectArrayOf(count int, elem reflect.Type) reflect.Type { + return reflect.ArrayOf(count, elem) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_lt_go15.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_lt_go15.go new file mode 100644 index 0000000000000..c5fcd6697ef6d --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_arrayof_lt_go15.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = false + +func reflectArrayOf(count int, elem reflect.Type) reflect.Type { + panic("codec: reflect.ArrayOf unsupported in this go version") +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_gte_go19.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_gte_go19.go new file mode 100644 index 0000000000000..bc39d6b719f34 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_gte_go19.go @@ -0,0 +1,15 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.9 + +package codec + +import "reflect" + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + if size < 0 { + return reflect.MakeMapWithSize(t, 4) + } + return reflect.MakeMapWithSize(t, size) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_lt_go19.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_lt_go19.go new file mode 100644 index 0000000000000..cde4cd372514d --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_makemap_lt_go19.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.9 + +package codec + +import "reflect" + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + return reflect.MakeMap(t) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_gte_go110.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_gte_go110.go new file mode 100644 index 0000000000000..794133a3cbb79 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_gte_go110.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.10 + +package codec + +const allowSetUnexportedEmbeddedPtr = false diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_lt_go110.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_lt_go110.go new file mode 100644 index 0000000000000..fd92ede3558ec --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unexportedembeddedptr_lt_go110.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.10 + +package codec + +const allowSetUnexportedEmbeddedPtr = true diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unsupported_lt_go14.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unsupported_lt_go14.go new file mode 100644 index 0000000000000..8debfa6137131 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_unsupported_lt_go14.go @@ -0,0 +1,17 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.4 + +package codec + +// This codec package will only work for go1.4 and above. +// This is for the following reasons: +// - go 1.4 was released in 2014 +// - go runtime is written fully in go +// - interface only holds pointers +// - reflect.Value is stabilized as 3 words + +func init() { + panic("codec: go 1.3 and below are not supported") +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go15.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go15.go new file mode 100644 index 0000000000000..0f1bb01e5a134 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go15.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5,!go1.6 + +package codec + +import "os" + +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go16.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go16.go new file mode 100644 index 0000000000000..2fb4b057ddc0b --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_eq_go16.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.6,!go1.7 + +package codec + +import "os" + +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_gte_go17.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_gte_go17.go new file mode 100644 index 0000000000000..c5b8155053948 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_gte_go17.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.7 + +package codec + +const genCheckVendor = true diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_lt_go15.go b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_lt_go15.go new file mode 100644 index 0000000000000..837cf240bae3a --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/goversion_vendor_lt_go15.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +var genCheckVendor = false diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go index 7da3955edc99f..228ad93d1594f 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go @@ -1,68 +1,200 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec // Contains code shared by both encode and decode. +// Some shared ideas around encoding/decoding +// ------------------------------------------ +// +// If an interface{} is passed, we first do a type assertion to see if it is +// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. +// +// If we start with a reflect.Value, we are already in reflect.Value land and +// will try to grab the function for the underlying Type and directly call that function. +// This is more performant than calling reflect.Value.Interface(). +// +// This still helps us bypass many layers of reflection, and give best performance. +// +// Containers +// ------------ +// Containers in the stream are either associative arrays (key-value pairs) or +// regular arrays (indexed by incrementing integers). +// +// Some streams support indefinite-length containers, and use a breaking +// byte-sequence to denote that the container has come to an end. +// +// Some streams also are text-based, and use explicit separators to denote the +// end/beginning of different values. +// +// During encode, we use a high-level condition to determine how to iterate through +// the container. That decision is based on whether the container is text-based (with +// separators) or binary (without separators). If binary, we do not even call the +// encoding of separators. +// +// During decode, we use a different high-level condition to determine how to iterate +// through the containers. That decision is based on whether the stream contained +// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that +// it has to be binary, and we do not even try to read separators. +// +// Philosophy +// ------------ +// On decode, this codec will update containers appropriately: +// - If struct, update fields from stream into fields of struct. +// If field in stream not found in struct, handle appropriately (based on option). +// If a struct field has no corresponding value in the stream, leave it AS IS. +// If nil in stream, set value to nil/zero value. +// - If map, update map from stream. +// If the stream value is NIL, set the map to nil. +// - if slice, try to update up to length of array in stream. +// if container len is less than stream array length, +// and container cannot be expanded, handled (based on option). +// This means you can decode 4-element stream array into 1-element array. +// +// ------------------------------------ +// On encode, user can specify omitEmpty. This means that the value will be omitted +// if the zero value. The problem may occur during decode, where omitted values do not affect +// the value being decoded into. This means that if decoding into a struct with an +// int field with current value=5, and the field is omitted in the stream, then after +// decoding, the value will still be 5 (not 0). +// omitEmpty only works if you guarantee that you always decode into zero-values. +// +// ------------------------------------ +// We could have truncated a map to remove keys not available in the stream, +// or set values in the struct which are not in the stream to their zero values. +// We decided against it because there is no efficient way to do it. +// We may introduce it as an option later. +// However, that will require enabling it for both runtime and code generation modes. +// +// To support truncate, we need to do 2 passes over the container: +// map +// - first collect all keys (e.g. in k1) +// - for each key in stream, mark k1 that the key should not be removed +// - after updating map, do second pass and call delete for all keys in k1 which are not marked +// struct: +// - for each field, track the *typeInfo s1 +// - iterate through all s1, and for each one not marked, set value to zero +// - this involves checking the possible anonymous fields which are nil ptrs. +// too much work. +// +// ------------------------------------------ +// Error Handling is done within the library using panic. +// +// This way, the code doesn't have to keep checking if an error has happened, +// and we don't have to keep sending the error value along with each call +// or storing it in the En|Decoder and checking it constantly along the way. +// +// The disadvantage is that small functions which use panics cannot be inlined. +// The code accounts for that by only using panics behind an interface; +// since interface calls cannot be inlined, this is irrelevant. +// +// We considered storing the error is En|Decoder. +// - once it has its err field set, it cannot be used again. +// - panicing will be optional, controlled by const flag. +// - code should always check error first and return early. +// We eventually decided against it as it makes the code clumsier to always +// check for these error conditions. + import ( + "bytes" + "encoding" "encoding/binary" + "errors" "fmt" + "io" "math" "reflect" "sort" + "strconv" "strings" "sync" + "sync/atomic" "time" - "unicode" - "unicode/utf8" ) const ( - structTagName = "codec" + scratchByteArrayLen = 32 + // initCollectionCap = 16 // 32 is defensive. 16 is preferred. - // Support - // encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error) - // encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error + // Support encoding.(Binary|Text)(Unm|M)arshaler. // This constant flag will enable or disable it. - supportBinaryMarshal = true - - // Each Encoder or Decoder uses a cache of functions based on conditionals, - // so that the conditionals are not run every time. - // - // Either a map or a slice is used to keep track of the functions. - // The map is more natural, but has a higher cost than a slice/array. - // This flag (useMapForCodecCache) controls which is used. - useMapForCodecCache = false - - // For some common container types, we can short-circuit an elaborate - // reflection dance and call encode/decode directly. - // The currently supported types are: - // - slices of strings, or id's (int64,uint64) or interfaces. - // - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf - shortCircuitReflectToFastPath = true + supportMarshalInterfaces = true // for debugging, set this to false, to catch panic traces. // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. recoverPanicToErr = true - // if checkStructForEmptyValue, check structs fields to see if an empty value. - // This could be an expensive call, so possibly disable it. - checkStructForEmptyValue = false + // arrayCacheLen is the length of the cache used in encoder or decoder for + // allowing zero-alloc initialization. + // arrayCacheLen = 8 + + // size of the cacheline: defaulting to value for archs: amd64, arm64, 386 + // should use "runtime/internal/sys".CacheLineSize, but that is not exposed. + cacheLineSize = 64 - // if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue - derefForIsEmptyValue = false + wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize + wordSize = wordSizeBits / 8 + + // so structFieldInfo fits into 8 bytes + maxLevelsEmbedding = 14 + + // useFinalizers=true configures finalizers to release pool'ed resources + // acquired by Encoder/Decoder during their GC. + // + // Note that calling SetFinalizer is always expensive, + // as code must be run on the systemstack even for SetFinalizer(t, nil). + // + // We document that folks SHOULD call Release() when done, or they can + // explicitly call SetFinalizer themselves e.g. + // runtime.SetFinalizer(e, (*Encoder).Release) + // runtime.SetFinalizer(d, (*Decoder).Release) + useFinalizers = false ) +var oneByteArr [1]byte +var zeroByteSlice = oneByteArr[:0:0] + +var codecgen bool + +var refBitset bitset256 +var pool pooler +var panicv panicHdl + +func init() { + pool.init() + + refBitset.set(byte(reflect.Map)) + refBitset.set(byte(reflect.Ptr)) + refBitset.set(byte(reflect.Func)) + refBitset.set(byte(reflect.Chan)) +} + +type clsErr struct { + closed bool // is it closed? + errClosed error // error on closing +} + +// type entryType uint8 + +// const ( +// entryTypeBytes entryType = iota // make this 0, so a comparison is cheap +// entryTypeIo +// entryTypeBufio +// entryTypeUnset = 255 +// ) + type charEncoding uint8 const ( - c_RAW charEncoding = iota - c_UTF8 - c_UTF16LE - c_UTF16BE - c_UTF32LE - c_UTF32BE + _ charEncoding = iota // make 0 unset + cUTF8 + cUTF16LE + cUTF16BE + cUTF32LE + cUTF32BE + // Deprecated: not a true char encoding value + cRAW charEncoding = 255 ) // valueType is the stream type @@ -80,517 +212,2508 @@ const ( valueTypeBytes valueTypeMap valueTypeArray - valueTypeTimestamp + valueTypeTime valueTypeExt - valueTypeInvalid = 0xff + // valueTypeInvalid = 0xff +) + +var valueTypeStrings = [...]string{ + "Unset", + "Nil", + "Int", + "Uint", + "Float", + "Bool", + "String", + "Symbol", + "Bytes", + "Map", + "Array", + "Timestamp", + "Ext", +} + +func (x valueType) String() string { + if int(x) < len(valueTypeStrings) { + return valueTypeStrings[x] + } + return strconv.FormatInt(int64(x), 10) +} + +type seqType uint8 + +const ( + _ seqType = iota + seqTypeArray + seqTypeSlice + seqTypeChan +) + +// note that containerMapStart and containerArraySend are not sent. +// This is because the ReadXXXStart and EncodeXXXStart already does these. +type containerState uint8 + +const ( + _ containerState = iota + + containerMapStart // slot left open, since Driver method already covers it + containerMapKey + containerMapValue + containerMapEnd + containerArrayStart // slot left open, since Driver methods already cover it + containerArrayElem + containerArrayEnd +) + +// // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo +// type sfiIdx struct { +// name string +// index int +// } + +// do not recurse if a containing type refers to an embedded type +// which refers back to its containing type (via a pointer). +// The second time this back-reference happens, break out, +// so as not to cause an infinite loop. +const rgetMaxRecursion = 2 + +// Anecdotally, we believe most types have <= 12 fields. +// - even Java's PMD rules set TooManyFields threshold to 15. +// However, go has embedded fields, which should be regarded as +// top level, allowing structs to possibly double or triple. +// In addition, we don't want to keep creating transient arrays, +// especially for the sfi index tracking, and the evtypes tracking. +// +// So - try to keep typeInfoLoadArray within 2K bytes +const ( + typeInfoLoadArraySfisLen = 16 + typeInfoLoadArraySfiidxLen = 8 * 112 + typeInfoLoadArrayEtypesLen = 12 + typeInfoLoadArrayBLen = 8 * 4 ) +type typeInfoLoad struct { + // fNames []string + // encNames []string + etypes []uintptr + sfis []structFieldInfo +} + +type typeInfoLoadArray struct { + // fNames [typeInfoLoadArrayLen]string + // encNames [typeInfoLoadArrayLen]string + sfis [typeInfoLoadArraySfisLen]structFieldInfo + sfiidx [typeInfoLoadArraySfiidxLen]byte + etypes [typeInfoLoadArrayEtypesLen]uintptr + b [typeInfoLoadArrayBLen]byte // scratch - used for struct field names +} + +// mirror json.Marshaler and json.Unmarshaler here, +// so we don't import the encoding/json package + +type jsonMarshaler interface { + MarshalJSON() ([]byte, error) +} +type jsonUnmarshaler interface { + UnmarshalJSON([]byte) error +} + +type isZeroer interface { + IsZero() bool +} + +type codecError struct { + name string + err interface{} +} + +func (e codecError) Cause() error { + switch xerr := e.err.(type) { + case nil: + return nil + case error: + return xerr + case string: + return errors.New(xerr) + case fmt.Stringer: + return errors.New(xerr.String()) + default: + return fmt.Errorf("%v", e.err) + } +} + +func (e codecError) Error() string { + return fmt.Sprintf("%s error: %v", e.name, e.err) +} + +// type byteAccepter func(byte) bool + var ( bigen = binary.BigEndian structInfoFieldName = "_struct" - cachedTypeInfo = make(map[uintptr]*typeInfo, 4) - cachedTypeInfoMutex sync.RWMutex - - intfSliceTyp = reflect.TypeOf([]interface{}(nil)) - intfTyp = intfSliceTyp.Elem() - - strSliceTyp = reflect.TypeOf([]string(nil)) - boolSliceTyp = reflect.TypeOf([]bool(nil)) - uintSliceTyp = reflect.TypeOf([]uint(nil)) - uint8SliceTyp = reflect.TypeOf([]uint8(nil)) - uint16SliceTyp = reflect.TypeOf([]uint16(nil)) - uint32SliceTyp = reflect.TypeOf([]uint32(nil)) - uint64SliceTyp = reflect.TypeOf([]uint64(nil)) - intSliceTyp = reflect.TypeOf([]int(nil)) - int8SliceTyp = reflect.TypeOf([]int8(nil)) - int16SliceTyp = reflect.TypeOf([]int16(nil)) - int32SliceTyp = reflect.TypeOf([]int32(nil)) - int64SliceTyp = reflect.TypeOf([]int64(nil)) - float32SliceTyp = reflect.TypeOf([]float32(nil)) - float64SliceTyp = reflect.TypeOf([]float64(nil)) - - mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) - mapStrStrTyp = reflect.TypeOf(map[string]string(nil)) - - mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil)) - mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil)) - mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil)) - mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil)) - - stringTyp = reflect.TypeOf("") - timeTyp = reflect.TypeOf(time.Time{}) - rawExtTyp = reflect.TypeOf(RawExt{}) - - mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() - binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem() - binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem() - - rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() - intfTypId = reflect.ValueOf(intfTyp).Pointer() - timeTypId = reflect.ValueOf(timeTyp).Pointer() - - intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() - strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer() - - boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer() - uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer() - uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() - uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer() - uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer() - uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer() - intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer() - int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer() - int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer() - int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer() - int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer() - float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer() - float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer() - - mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer() - mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() - mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() - mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer() - mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer() - mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer() - mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer() - // Id = reflect.ValueOf().Pointer() - // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() - - binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer() - binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer() - - intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) - uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) - - bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem() + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + rawTyp = reflect.TypeOf(Raw{}) + uintptrTyp = reflect.TypeOf(uintptr(0)) + uint8Typ = reflect.TypeOf(uint8(0)) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + uintTyp = reflect.TypeOf(uint(0)) + intTyp = reflect.TypeOf(int(0)) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + + binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + + textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + + jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() + jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() + + selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() + missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem() + iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem() + + uint8TypId = rt2id(uint8Typ) + uint8SliceTypId = rt2id(uint8SliceTyp) + rawExtTypId = rt2id(rawExtTyp) + rawTypId = rt2id(rawTyp) + intfTypId = rt2id(intfTyp) + timeTypId = rt2id(timeTyp) + stringTypId = rt2id(stringTyp) + + mapStrIntfTypId = rt2id(mapStrIntfTyp) + mapIntfIntfTypId = rt2id(mapIntfIntfTyp) + intfSliceTypId = rt2id(intfSliceTyp) + // mapBySliceTypId = rt2id(mapBySliceTyp) + + intBitsize = uint8(intTyp.Bits()) + uintBitsize = uint8(uintTyp.Bits()) + + // bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + chkOvf checkOverflow + + errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo") ) -type binaryUnmarshaler interface { - UnmarshalBinary(data []byte) error +var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) + +var immutableKindsSet = [32]bool{ + // reflect.Invalid: , + reflect.Bool: true, + reflect.Int: true, + reflect.Int8: true, + reflect.Int16: true, + reflect.Int32: true, + reflect.Int64: true, + reflect.Uint: true, + reflect.Uint8: true, + reflect.Uint16: true, + reflect.Uint32: true, + reflect.Uint64: true, + reflect.Uintptr: true, + reflect.Float32: true, + reflect.Float64: true, + reflect.Complex64: true, + reflect.Complex128: true, + // reflect.Array + // reflect.Chan + // reflect.Func: true, + // reflect.Interface + // reflect.Map + // reflect.Ptr + // reflect.Slice + reflect.String: true, + // reflect.Struct + // reflect.UnsafePointer +} + +// Selfer defines methods by which a value can encode or decode itself. +// +// Any type which implements Selfer will be able to encode or decode itself. +// Consequently, during (en|de)code, this takes precedence over +// (text|binary)(M|Unm)arshal or extension support. +// +// By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself. +// If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error. +// For example, the snippet below will cause such an error. +// type testSelferRecur struct{} +// func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) } +// func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) } +// +// Note: *the first set of bytes of any value MUST NOT represent nil in the format*. +// This is because, during each decode, we first check the the next set of bytes +// represent nil, and if so, we just set the value to nil. +type Selfer interface { + CodecEncodeSelf(*Encoder) + CodecDecodeSelf(*Decoder) } -type binaryMarshaler interface { - MarshalBinary() (data []byte, err error) +// MissingFielder defines the interface allowing structs to internally decode or encode +// values which do not map to struct fields. +// +// We expect that this interface is bound to a pointer type (so the mutation function works). +// +// A use-case is if a version of a type unexports a field, but you want compatibility between +// both versions during encoding and decoding. +// +// Note that the interface is completely ignored during codecgen. +type MissingFielder interface { + // CodecMissingField is called to set a missing field and value pair. + // + // It returns true if the missing field was set on the struct. + CodecMissingField(field []byte, value interface{}) bool + + // CodecMissingFields returns the set of fields which are not struct fields + CodecMissingFields() map[string]interface{} } -// MapBySlice represents a slice which should be encoded as a map in the stream. +// MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream. // The slice contains a sequence of key-value pairs. +// This affords storing a map in a specific sequence in the stream. +// +// Example usage: +// type T1 []string // or []int or []Point or any other "slice" type +// func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map +// type T2 struct { KeyValues T1 } +// +// var kvs = []string{"one", "1", "two", "2", "three", "3"} +// var v2 = T2{ KeyValues: T1(kvs) } +// // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} } +// +// The support of MapBySlice affords the following: +// - A slice type which implements MapBySlice will be encoded as a map +// - A slice can be decoded from a map in the stream +// - It MUST be a slice type (not a pointer receiver) that implements MapBySlice type MapBySlice interface { MapBySlice() } -// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. -// // BasicHandle encapsulates the common options and extension functions. +// +// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. type BasicHandle struct { + // BasicHandle is always a part of a different type. + // It doesn't have to fit into it own cache lines. + + // TypeInfos is used to get the type info for any type. + // + // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json + TypeInfos *TypeInfos + + // Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls). + // If *[]T is used instead, this becomes comparable, at the cost of extra indirection. + // Thses slices are used all the time, so keep as slices (not pointers). + extHandle - EncodeOptions - DecodeOptions -} -// Handle is the interface for a specific encoding format. -// -// Typically, a Handle is pre-configured before first time use, -// and not modified while in use. Such a pre-configured Handle -// is safe for concurrent access. -type Handle interface { - writeExt() bool - getBasicHandle() *BasicHandle - newEncDriver(w encWriter) encDriver - newDecDriver(r decReader) decDriver -} + intf2impls -// RawExt represents raw unprocessed extension data. -type RawExt struct { - Tag byte - Data []byte -} + inited uint32 + _ uint32 // padding -type extTypeTagFn struct { - rtid uintptr - rt reflect.Type - tag byte - encFn func(reflect.Value) ([]byte, error) - decFn func(reflect.Value, []byte) error -} + // ---- cache line -type extHandle []*extTypeTagFn + RPCOptions -// AddExt registers an encode and decode function for a reflect.Type. -// Note that the type must be a named type, and specifically not -// a pointer or Interface. An error is returned if that is not honored. -// -// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn. -func (o *extHandle) AddExt( - rt reflect.Type, - tag byte, - encfn func(reflect.Value) ([]byte, error), - decfn func(reflect.Value, []byte) error, -) (err error) { - // o is a pointer, because we may need to initialize it - if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { - err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", - reflect.Zero(rt).Interface()) - return - } + // TimeNotBuiltin configures whether time.Time should be treated as a builtin type. + // + // All Handlers should know how to encode/decode time.Time as part of the core + // format specification, or as a standard extension defined by the format. + // + // However, users can elect to handle time.Time as a custom extension, or via the + // standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface. + // To elect this behavior, users can set TimeNotBuiltin=true. + // Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior + // (for Cbor and Msgpack), where time.Time was not a builtin supported type. + TimeNotBuiltin bool + + // ExplicitRelease configures whether Release() is implicitly called after an encode or + // decode call. + // + // If you will hold onto an Encoder or Decoder for re-use, by calling Reset(...) + // on it or calling (Must)Encode repeatedly into a given []byte or io.Writer, + // then you do not want it to be implicitly closed after each Encode/Decode call. + // Doing so will unnecessarily return resources to the shared pool, only for you to + // grab them right after again to do another Encode/Decode call. + // + // Instead, you configure ExplicitRelease=true, and you explicitly call Release() when + // you are truly done. + // + // As an alternative, you can explicitly set a finalizer - so its resources + // are returned to the shared pool before it is garbage-collected. Do it as below: + // runtime.SetFinalizer(e, (*Encoder).Release) + // runtime.SetFinalizer(d, (*Decoder).Release) + ExplicitRelease bool - // o cannot be nil, since it is always embedded in a Handle. - // if nil, let it panic. - // if o == nil { - // err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.") - // return - // } + be bool // is handle a binary encoding? + js bool // is handle javascript handler? + n byte // first letter of handle name + _ uint16 // padding - rtid := reflect.ValueOf(rt).Pointer() - for _, v := range *o { - if v.rtid == rtid { - v.tag, v.encFn, v.decFn = tag, encfn, decfn - return - } - } + // ---- cache line - *o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn}) - return -} + DecodeOptions -func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { - for _, v := range o { - if v.rtid == rtid { - return v - } - } - return nil -} + // ---- cache line -func (o extHandle) getExtForTag(tag byte) *extTypeTagFn { - for _, v := range o { - if v.tag == tag { - return v - } - } - return nil -} + EncodeOptions -func (o extHandle) getDecodeExtForTag(tag byte) ( - rv reflect.Value, fn func(reflect.Value, []byte) error) { - if x := o.getExtForTag(tag); x != nil { - // ext is only registered for base - rv = reflect.New(x.rt).Elem() - fn = x.decFn - } - return + // noBuiltInTypeChecker + + rtidFns atomicRtidFnSlice + mu sync.Mutex + // r []uintptr // rtids mapped to s above } -func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) { - if x := o.getExt(rtid); x != nil { - tag = x.tag - fn = x.decFn +// basicHandle returns an initialized BasicHandle from the Handle. +func basicHandle(hh Handle) (x *BasicHandle) { + x = hh.getBasicHandle() + // ** We need to simulate once.Do, to ensure no data race within the block. + // ** Consequently, below would not work. + // if atomic.CompareAndSwapUint32(&x.inited, 0, 1) { + // x.be = hh.isBinary() + // _, x.js = hh.(*JsonHandle) + // x.n = hh.Name()[0] + // } + + // simulate once.Do using our own stored flag and mutex as a CompareAndSwap + // is not sufficient, since a race condition can occur within init(Handle) function. + // init is made noinline, so that this function can be inlined by its caller. + if atomic.LoadUint32(&x.inited) == 0 { + x.init(hh) } return } -func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) { - if x := o.getExt(rtid); x != nil { - tag = x.tag - fn = x.encFn +//go:noinline +func (x *BasicHandle) init(hh Handle) { + // make it uninlineable, as it is called at most once + x.mu.Lock() + if x.inited == 0 { + x.be = hh.isBinary() + _, x.js = hh.(*JsonHandle) + x.n = hh.Name()[0] + atomic.StoreUint32(&x.inited, 1) } - return + x.mu.Unlock() } -type structFieldInfo struct { - encName string // encode name - - // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. - - is []int // (recursive/embedded) field index in struct - i int16 // field index in struct - omitEmpty bool - toArray bool // if field is _struct, is the toArray set? +func (x *BasicHandle) getBasicHandle() *BasicHandle { + return x +} - // tag string // tag - // name string // field name - // encNameBs []byte // encoded name as byte stream - // ikind int // kind of the field as an int i.e. int(reflect.Kind) +func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + if x.TypeInfos == nil { + return defTypeInfos.get(rtid, rt) + } + return x.TypeInfos.get(rtid, rt) } -func parseStructFieldInfo(fname string, stag string) *structFieldInfo { - if fname == "" { - panic("parseStructFieldInfo: No Field Name") +func findFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) { + // binary search. adapted from sort/search.go. + // Note: we use goto (instead of for loop) so this can be inlined. + + // h, i, j := 0, 0, len(s) + var h uint // var h, i uint + var j = uint(len(s)) +LOOP: + if i < j { + h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP } - si := structFieldInfo{ - // name: fname, - encName: fname, - // tag: stag, + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn } + return +} - if stag != "" { - for i, s := range strings.Split(stag, ",") { - if i == 0 { - if s != "" { - si.encName = s +func (x *BasicHandle) fn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) { + rtid := rt2id(rt) + sp := x.rtidFns.load() + if sp != nil { + if _, fn = findFn(sp, rtid); fn != nil { + // xdebugf("<<<< %c: found fn for %v in rtidfns of size: %v", c.n, rt, len(sp)) + return + } + } + c := x + // xdebugf("#### for %c: load fn for %v in rtidfns of size: %v", c.n, rt, len(sp)) + fn = new(codecFn) + fi := &(fn.i) + ti := c.getTypeInfo(rtid, rt) + fi.ti = ti + + rk := reflect.Kind(ti.kind) + + if checkCodecSelfer && (ti.cs || ti.csp) { + fn.fe = (*Encoder).selferMarshal + fn.fd = (*Decoder).selferUnmarshal + fi.addrF = true + fi.addrD = ti.csp + fi.addrE = ti.csp + } else if rtid == timeTypId && !c.TimeNotBuiltin { + fn.fe = (*Encoder).kTime + fn.fd = (*Decoder).kTime + } else if rtid == rawTypId { + fn.fe = (*Encoder).raw + fn.fd = (*Decoder).raw + } else if rtid == rawExtTypId { + fn.fe = (*Encoder).rawExt + fn.fd = (*Decoder).rawExt + fi.addrF = true + fi.addrD = true + fi.addrE = true + } else if xfFn := c.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*Encoder).ext + fn.fd = (*Decoder).ext + fi.addrF = true + fi.addrD = true + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) { + fn.fe = (*Encoder).binaryMarshal + fn.fd = (*Decoder).binaryUnmarshal + fi.addrF = true + fi.addrD = ti.bup + fi.addrE = ti.bmp + } else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) { + //If JSON, we should check JSONMarshal before textMarshal + fn.fe = (*Encoder).jsonMarshal + fn.fd = (*Decoder).jsonUnmarshal + fi.addrF = true + fi.addrD = ti.jup + fi.addrE = ti.jmp + } else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) { + fn.fe = (*Encoder).textMarshal + fn.fd = (*Decoder).textUnmarshal + fi.addrF = true + fi.addrD = ti.tup + fi.addrE = ti.tmp + } else { + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if ti.pkgpath == "" { // un-named slice or map + if idx := fastpathAV.index(rtid); idx != -1 { + fn.fe = fastpathAV[idx].encfn + fn.fd = fastpathAV[idx].decfn + fi.addrD = true + fi.addrF = false } } else { - switch s { - case "omitempty": - si.omitEmpty = true - case "toarray": - si.toArray = true + // use mapping for underlying type if there + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(ti.key, ti.elem) + } else { + rtu = reflect.SliceOf(ti.elem) + } + rtuid := rt2id(rtu) + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].encfn + xrt := fastpathAV[idx].rt + fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) { + xfnf(e, xf, xrv.Convert(xrt)) + } + fi.addrD = true + fi.addrF = false // meaning it can be an address(ptr) or a value + xfnf2 := fastpathAV[idx].decfn + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt))) + } else { + xfnf2(d, xf, xrv.Convert(xrt)) + } + } + } + } + } + if fn.fe == nil && fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fe = (*Encoder).kBool + fn.fd = (*Decoder).kBool + case reflect.String: + fn.fe = (*Encoder).kString + fn.fd = (*Decoder).kString + case reflect.Int: + fn.fd = (*Decoder).kInt + fn.fe = (*Encoder).kInt + case reflect.Int8: + fn.fe = (*Encoder).kInt8 + fn.fd = (*Decoder).kInt8 + case reflect.Int16: + fn.fe = (*Encoder).kInt16 + fn.fd = (*Decoder).kInt16 + case reflect.Int32: + fn.fe = (*Encoder).kInt32 + fn.fd = (*Decoder).kInt32 + case reflect.Int64: + fn.fe = (*Encoder).kInt64 + fn.fd = (*Decoder).kInt64 + case reflect.Uint: + fn.fd = (*Decoder).kUint + fn.fe = (*Encoder).kUint + case reflect.Uint8: + fn.fe = (*Encoder).kUint8 + fn.fd = (*Decoder).kUint8 + case reflect.Uint16: + fn.fe = (*Encoder).kUint16 + fn.fd = (*Decoder).kUint16 + case reflect.Uint32: + fn.fe = (*Encoder).kUint32 + fn.fd = (*Decoder).kUint32 + case reflect.Uint64: + fn.fe = (*Encoder).kUint64 + fn.fd = (*Decoder).kUint64 + case reflect.Uintptr: + fn.fe = (*Encoder).kUintptr + fn.fd = (*Decoder).kUintptr + case reflect.Float32: + fn.fe = (*Encoder).kFloat32 + fn.fd = (*Decoder).kFloat32 + case reflect.Float64: + fn.fe = (*Encoder).kFloat64 + fn.fd = (*Decoder).kFloat64 + case reflect.Invalid: + fn.fe = (*Encoder).kInvalid + fn.fd = (*Decoder).kErr + case reflect.Chan: + fi.seq = seqTypeChan + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.fe = (*Encoder).kSlice + fi.addrF = false + fi.addrD = false + rt2 := reflect.SliceOf(ti.elem) + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + d.h.fn(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len())) + } + // fn.fd = (*Decoder).kArray + case reflect.Struct: + if ti.anyOmitEmpty || ti.mf || ti.mfp { + fn.fe = (*Encoder).kStruct + } else { + fn.fe = (*Encoder).kStructNoOmitempty } + fn.fd = (*Decoder).kStruct + case reflect.Map: + fn.fe = (*Encoder).kMap + fn.fd = (*Decoder).kMap + case reflect.Interface: + // encode: reflect.Interface are handled already by preEncodeValue + fn.fd = (*Decoder).kInterface + fn.fe = (*Encoder).kErr + default: + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue + fn.fe = (*Encoder).kErr + fn.fd = (*Decoder).kErr } } } - // si.encNameBs = []byte(si.encName) - return &si -} -type sfiSortedByEncName []*structFieldInfo + c.mu.Lock() + var sp2 []codecRtidFn + sp = c.rtidFns.load() + if sp == nil { + sp2 = []codecRtidFn{{rtid, fn}} + c.rtidFns.store(sp2) + // xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2)) + // xdebugf(">>>> loading stored rtidfns of size: %v", len(c.rtidFns.load())) + } else { + idx, fn2 := findFn(sp, rtid) + if fn2 == nil { + sp2 = make([]codecRtidFn, len(sp)+1) + copy(sp2, sp[:idx]) + copy(sp2[idx+1:], sp[idx:]) + sp2[idx] = codecRtidFn{rtid, fn} + c.rtidFns.store(sp2) + // xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2)) -func (p sfiSortedByEncName) Len() int { - return len(p) + } + } + c.mu.Unlock() + return } -func (p sfiSortedByEncName) Less(i, j int) bool { - return p[i].encName < p[j].encName +// Handle defines a specific encoding format. It also stores any runtime state +// used during an Encoding or Decoding session e.g. stored state about Types, etc. +// +// Once a handle is configured, it can be shared across multiple Encoders and Decoders. +// +// Note that a Handle is NOT safe for concurrent modification. +// Consequently, do not modify it after it is configured if shared among +// multiple Encoders and Decoders in different goroutines. +// +// Consequently, the typical usage model is that a Handle is pre-configured +// before first time use, and not modified while in use. +// Such a pre-configured Handle is safe for concurrent access. +type Handle interface { + Name() string + // return the basic handle. It may not have been inited. + // Prefer to use basicHandle() helper function that ensures it has been inited. + getBasicHandle() *BasicHandle + recreateEncDriver(encDriver) bool + newEncDriver(w *Encoder) encDriver + newDecDriver(r *Decoder) decDriver + isBinary() bool + hasElemSeparators() bool + // IsBuiltinType(rtid uintptr) bool } -func (p sfiSortedByEncName) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} +// Raw represents raw formatted bytes. +// We "blindly" store it during encode and retrieve the raw bytes during decode. +// Note: it is dangerous during encode, so we may gate the behaviour +// behind an Encode flag which must be explicitly set. +type Raw []byte -// typeInfo keeps information about each type referenced in the encode/decode sequence. +// RawExt represents raw unprocessed extension data. +// Some codecs will decode extension data as a *RawExt +// if there is no registered extension for the tag. // -// During an encode/decode sequence, we work as below: -// - If base is a built in type, en/decode base value -// - If base is registered as an extension, en/decode base value -// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method -// - Else decode appropriately based on the reflect.Kind -type typeInfo struct { - sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. - sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. +// Only one of Data or Value is nil. +// If Data is nil, then the content of the RawExt is in the Value. +type RawExt struct { + Tag uint64 + // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value. + // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types + Data []byte + // Value represents the extension, if Data is nil. + // Value is used by codecs (e.g. cbor, json) which leverage the format to do + // custom serialization of the types. + Value interface{} +} - rt reflect.Type - rtid uintptr +// BytesExt handles custom (de)serialization of types to/from []byte. +// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. +type BytesExt interface { + // WriteExt converts a value to a []byte. + // + // Note: v is a pointer iff the registered extension type is a struct or array kind. + WriteExt(v interface{}) []byte - // baseId gives pointer to the base reflect.Type, after deferencing - // the pointers. E.g. base type of ***time.Time is time.Time. - base reflect.Type - baseId uintptr - baseIndir int8 // number of indirections to get to base - - mbs bool // base type (T or *T) is a MapBySlice - - m bool // base type (T or *T) is a binaryMarshaler - unm bool // base type (T or *T) is a binaryUnmarshaler - mIndir int8 // number of indirections to get to binaryMarshaler type - unmIndir int8 // number of indirections to get to binaryUnmarshaler type - toArray bool // whether this (struct) type should be encoded as an array -} - -func (ti *typeInfo) indexForEncName(name string) int { - //tisfi := ti.sfi - const binarySearchThreshold = 16 - if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { - // linear search. faster than binary search in my testing up to 16-field structs. - for i, si := range ti.sfi { - if si.encName == name { - return i - } - } - } else { - // binary search. adapted from sort/search.go. - h, i, j := 0, 0, sfilen - for i < j { - h = i + (j-i)/2 - if ti.sfi[h].encName < name { - i = h + 1 - } else { - j = h - } - } - if i < sfilen && ti.sfi[i].encName == name { - return i - } - } - return -1 + // ReadExt updates a value from a []byte. + // + // Note: dst is always a pointer kind to the registered extension type. + ReadExt(dst interface{}, src []byte) } -func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - var ok bool - cachedTypeInfoMutex.RLock() - pti, ok = cachedTypeInfo[rtid] - cachedTypeInfoMutex.RUnlock() - if ok { - return - } +// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. +// The Encoder or Decoder will then handle the further (de)serialization of that known type. +// +// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types. +type InterfaceExt interface { + // ConvertExt converts a value into a simpler interface for easy encoding + // e.g. convert time.Time to int64. + // + // Note: v is a pointer iff the registered extension type is a struct or array kind. + ConvertExt(v interface{}) interface{} - cachedTypeInfoMutex.Lock() - defer cachedTypeInfoMutex.Unlock() - if pti, ok = cachedTypeInfo[rtid]; ok { - return - } + // UpdateExt updates a value from a simpler interface for easy decoding + // e.g. convert int64 to time.Time. + // + // Note: dst is always a pointer kind to the registered extension type. + UpdateExt(dst interface{}, src interface{}) +} - ti := typeInfo{rt: rt, rtid: rtid} - pti = &ti +// Ext handles custom (de)serialization of custom types / extensions. +type Ext interface { + BytesExt + InterfaceExt +} - var indir int8 - if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { - ti.m, ti.mIndir = true, indir - } - if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { - ti.unm, ti.unmIndir = true, indir - } - if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { - ti.mbs = true - } +// addExtWrapper is a wrapper implementation to support former AddExt exported method. +type addExtWrapper struct { + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} - pt := rt - var ptIndir int8 - // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } - for pt.Kind() == reflect.Ptr { - pt = pt.Elem() - ptIndir++ - } - if ptIndir == 0 { - ti.base = rt - ti.baseId = rtid - } else { - ti.base = pt - ti.baseId = reflect.ValueOf(pt).Pointer() - ti.baseIndir = ptIndir +func (x addExtWrapper) WriteExt(v interface{}) []byte { + bs, err := x.encFn(reflect.ValueOf(v)) + if err != nil { + panic(err) } - - if rt.Kind() == reflect.Struct { - var siInfo *structFieldInfo - if f, ok := rt.FieldByName(structInfoFieldName); ok { - siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName)) - ti.toArray = siInfo.toArray - } - sfip := make([]*structFieldInfo, 0, rt.NumField()) - rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo) - - // // try to put all si close together - // const tryToPutAllStructFieldInfoTogether = true - // if tryToPutAllStructFieldInfoTogether { - // sfip2 := make([]structFieldInfo, len(sfip)) - // for i, si := range sfip { - // sfip2[i] = *si - // } - // for i := range sfip { - // sfip[i] = &sfip2[i] - // } - // } - - ti.sfip = make([]*structFieldInfo, len(sfip)) - ti.sfi = make([]*structFieldInfo, len(sfip)) - copy(ti.sfip, sfip) - sort.Sort(sfiSortedByEncName(sfip)) - copy(ti.sfi, sfip) - } - // sfi = sfip - cachedTypeInfo[rtid] = pti - return + return bs } -func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, - sfi *[]*structFieldInfo, siInfo *structFieldInfo, -) { - // for rt.Kind() == reflect.Ptr { - // // indexstack = append(indexstack, 0) - // rt = rt.Elem() - // } - for j := 0; j < rt.NumField(); j++ { - f := rt.Field(j) - stag := f.Tag.Get(structTagName) - if stag == "-" { - continue - } - if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { - continue - } - // if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it. - if f.Anonymous && stag == "" { - ft := f.Type - for ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - if ft.Kind() == reflect.Struct { - indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo) - continue - } - } - // do not let fields with same name in embedded structs override field at higher level. - // this must be done after anonymous check, to allow anonymous field - // still include their child fields - if _, ok := fnameToHastag[f.Name]; ok { - continue - } - si := parseStructFieldInfo(f.Name, stag) - // si.ikind = int(f.Type.Kind()) - if len(indexstack) == 0 { - si.i = int16(j) - } else { - si.i = -1 - si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - } - - if siInfo != nil { - if siInfo.omitEmpty { - si.omitEmpty = true - } - } - *sfi = append(*sfi, si) - fnameToHastag[f.Name] = stag != "" +func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { + if err := x.decFn(reflect.ValueOf(v), bs); err != nil { + panic(err) } } -func panicToErr(err *error) { - if recoverPanicToErr { - if x := recover(); x != nil { - //debug.PrintStack() - panicValToErr(x, err) - } - } +func (x addExtWrapper) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) } -func doPanic(tag string, format string, params ...interface{}) { - params2 := make([]interface{}, len(params)+1) - params2[0] = tag - copy(params2[1:], params) - panic(fmt.Errorf("%s: "+format, params2...)) +func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { + x.ReadExt(dest, v.([]byte)) } -func checkOverflowFloat32(f float64, doCheck bool) { - if !doCheck { - return - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowFloat() - f2 := f - if f2 < 0 { - f2 = -f - } - if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 { - decErr("Overflow float32 value: %v", f2) - } +type extWrapper struct { + BytesExt + InterfaceExt } -func checkOverflow(ui uint64, i int64, bitsize uint8) { - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize == 0 { +type bytesExtFailer struct{} + +func (bytesExtFailer) WriteExt(v interface{}) []byte { + panicv.errorstr("BytesExt.WriteExt is not supported") + return nil +} +func (bytesExtFailer) ReadExt(v interface{}, bs []byte) { + panicv.errorstr("BytesExt.ReadExt is not supported") +} + +type interfaceExtFailer struct{} + +func (interfaceExtFailer) ConvertExt(v interface{}) interface{} { + panicv.errorstr("InterfaceExt.ConvertExt is not supported") + return nil +} +func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) { + panicv.errorstr("InterfaceExt.UpdateExt is not supported") +} + +type binaryEncodingType struct{} + +func (binaryEncodingType) isBinary() bool { return true } + +type textEncodingType struct{} + +func (textEncodingType) isBinary() bool { return false } + +// noBuiltInTypes is embedded into many types which do not support builtins +// e.g. msgpack, simple, cbor. + +// type noBuiltInTypeChecker struct{} +// func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false } +// type noBuiltInTypes struct{ noBuiltInTypeChecker } + +type noBuiltInTypes struct{} + +func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} +func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} + +// type noStreamingCodec struct{} +// func (noStreamingCodec) CheckBreak() bool { return false } +// func (noStreamingCodec) hasElemSeparators() bool { return false } + +type noElemSeparators struct{} + +func (noElemSeparators) hasElemSeparators() (v bool) { return } +func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return } + +// bigenHelper. +// Users must already slice the x completely, because we will not reslice. +type bigenHelper struct { + x []byte // must be correctly sliced to appropriate len. slicing is a cost. + w *encWriterSwitch +} + +func (z bigenHelper) writeUint16(v uint16) { + bigen.PutUint16(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint32(v uint32) { + bigen.PutUint32(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint64(v uint64) { + bigen.PutUint64(z.x, v) + z.w.writeb(z.x) +} + +type extTypeTagFn struct { + rtid uintptr + rtidptr uintptr + rt reflect.Type + tag uint64 + ext Ext + _ [1]uint64 // padding +} + +type extHandle []extTypeTagFn + +// AddExt registes an encode and decode function for a reflect.Type. +// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. +// +// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. +func (o *extHandle) AddExt(rt reflect.Type, tag byte, + encfn func(reflect.Value) ([]byte, error), + decfn func(reflect.Value, []byte) error) (err error) { + if encfn == nil || decfn == nil { + return o.SetExt(rt, uint64(tag), nil) + } + return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) +} + +// SetExt will set the extension for a tag and reflect.Type. +// Note that the type must be a named type, and specifically not a pointer or Interface. +// An error is returned if that is not honored. +// To Deregister an ext, call SetExt with nil Ext. +// +// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. +func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { + // o is a pointer, because we may need to initialize it + rk := rt.Kind() + for rk == reflect.Ptr { + rt = rt.Elem() + rk = rt.Kind() + } + + if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr { + return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt) + } + + rtid := rt2id(rt) + switch rtid { + case timeTypId, rawTypId, rawExtTypId: + // all natively supported type, so cannot have an extension + return // TODO: should we silently ignore, or return an error??? + } + // if o == nil { + // return errors.New("codec.Handle.SetExt: extHandle not initialized") + // } + o2 := *o + // if o2 == nil { + // return errors.New("codec.Handle.SetExt: extHandle not initialized") + // } + for i := range o2 { + v := &o2[i] + if v.rtid == rtid { + v.tag, v.ext = tag, ext + return + } + } + rtidptr := rt2id(reflect.PtrTo(rt)) + *o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext, [1]uint64{}}) + return +} + +func (o extHandle) getExt(rtid uintptr) (v *extTypeTagFn) { + for i := range o { + v = &o[i] + if v.rtid == rtid || v.rtidptr == rtid { + return + } + } + return nil +} + +func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) { + for i := range o { + v = &o[i] + if v.tag == tag { + return + } + } + return nil +} + +type intf2impl struct { + rtid uintptr // for intf + impl reflect.Type + // _ [1]uint64 // padding // not-needed, as *intf2impl is never returned. +} + +type intf2impls []intf2impl + +// Intf2Impl maps an interface to an implementing type. +// This allows us support infering the concrete type +// and populating it when passed an interface. +// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc. +// +// Passing a nil impl will clear the mapping. +func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) { + if impl != nil && !impl.Implements(intf) { + return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf) + } + rtid := rt2id(intf) + o2 := *o + for i := range o2 { + v := &o2[i] + if v.rtid == rtid { + v.impl = impl + return + } + } + *o = append(o2, intf2impl{rtid, impl}) + return +} + +func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) { + for i := range o { + v := &o[i] + if v.rtid == rtid { + if v.impl == nil { + return + } + if v.impl.Kind() == reflect.Ptr { + return reflect.New(v.impl.Elem()) + } + return reflect.New(v.impl).Elem() + } + } + return +} + +type structFieldInfoFlag uint8 + +const ( + _ structFieldInfoFlag = 1 << iota + structFieldInfoFlagReady + structFieldInfoFlagOmitEmpty +) + +func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) { + *x = *x | f +} + +func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) { + *x = *x &^ f +} + +func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool { + return x&f != 0 +} + +func (x structFieldInfoFlag) omitEmpty() bool { + return x.flagGet(structFieldInfoFlagOmitEmpty) +} + +func (x structFieldInfoFlag) ready() bool { + return x.flagGet(structFieldInfoFlagReady) +} + +type structFieldInfo struct { + encName string // encode name + fieldName string // field name + + is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct + nis uint8 // num levels of embedding. if 1, then it's not embedded. + + encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers + structFieldInfoFlag + _ [1]byte // padding +} + +func (si *structFieldInfo) setToZeroValue(v reflect.Value) { + if v, valid := si.field(v, false); valid { + v.Set(reflect.Zero(v.Type())) + } +} + +// rv returns the field of the struct. +// If anonymous, it returns an Invalid +func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) { + // replicate FieldByIndex + for i, x := range si.is { + if uint8(i) == si.nis { + break + } + if v, valid = baseStructRv(v, update); !valid { + return + } + v = v.Field(int(x)) + } + + return v, true +} + +// func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value { +// v, _ = si.field(v, update) +// return v +// } + +func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) { + keytype = valueTypeString // default + if stag == "" { + return + } + for i, s := range strings.Split(stag, ",") { + if i == 0 { + } else { + switch s { + case "omitempty": + omitEmpty = true + case "toarray": + toArray = true + case "int": + keytype = valueTypeInt + case "uint": + keytype = valueTypeUint + case "float": + keytype = valueTypeFloat + // case "bool": + // keytype = valueTypeBool + case "string": + keytype = valueTypeString + } + } + } + return +} + +func (si *structFieldInfo) parseTag(stag string) { + // if fname == "" { + // panic(errNoFieldNameToStructFieldInfo) + // } + + if stag == "" { + return + } + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + switch s { + case "omitempty": + si.flagSet(structFieldInfoFlagOmitEmpty) + // si.omitEmpty = true + // case "toarray": + // si.toArray = true + } + } + } +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { return len(p) } +func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName } +func (p sfiSortedByEncName) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +const structFieldNodeNumToCache = 4 + +type structFieldNodeCache struct { + rv [structFieldNodeNumToCache]reflect.Value + idx [structFieldNodeNumToCache]uint32 + num uint8 +} + +func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) { + for i, k := range &x.idx { + if uint8(i) == x.num { + return // break + } + if key == k { + return x.rv[i], true + } + } + return +} + +func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) { + if x.num < structFieldNodeNumToCache { + x.rv[x.num] = fv + x.idx[x.num] = key + x.num++ + return + } +} + +type structFieldNode struct { + v reflect.Value + cache2 structFieldNodeCache + cache3 structFieldNodeCache + update bool +} + +func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) { + // return si.fieldval(x.v, x.update) + // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding + // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc. + var valid bool + switch si.nis { + case 1: + fv = x.v.Field(int(si.is[0])) + case 2: + if fv, valid = x.cache2.get(uint32(si.is[0])); valid { + fv = fv.Field(int(si.is[1])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache2.tryAdd(fv, uint32(si.is[0])) + fv = fv.Field(int(si.is[1])) + case 3: + var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1]) + if fv, valid = x.cache3.get(key); valid { + fv = fv.Field(int(si.is[2])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + fv = fv.Field(int(si.is[1])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache3.tryAdd(fv, key) + fv = fv.Field(int(si.is[2])) + default: + fv, _ = si.field(x.v, x.update) + } + return +} + +func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !update { + return + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v, true +} + +type typeInfoFlag uint8 + +const ( + typeInfoFlagComparable = 1 << iota + typeInfoFlagIsZeroer + typeInfoFlagIsZeroerPtr +) + +// typeInfo keeps information about each (non-ptr) type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + rt reflect.Type + elem reflect.Type + pkgpath string + + rtid uintptr + // rv0 reflect.Value // saved zero value, used if immutableKind + + numMeth uint16 // number of methods + kind uint8 + chandir uint8 + + anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty" + toArray bool // whether this (struct) type should be encoded as an array + keyType valueType // if struct, how is the field name stored in a stream? default is string + mbs bool // base type (T or *T) is a MapBySlice + + // ---- cpu cache line boundary? + sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfiSrc []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + key reflect.Type + + // ---- cpu cache line boundary? + // sfis []structFieldInfo // all sfi, in src order, as created. + sfiNamesSort []byte // all names, with indexes into the sfiSort + + // format of marshal type fields below: [btj][mu]p? OR csp? + + bm bool // T is a binaryMarshaler + bmp bool // *T is a binaryMarshaler + bu bool // T is a binaryUnmarshaler + bup bool // *T is a binaryUnmarshaler + tm bool // T is a textMarshaler + tmp bool // *T is a textMarshaler + tu bool // T is a textUnmarshaler + tup bool // *T is a textUnmarshaler + + jm bool // T is a jsonMarshaler + jmp bool // *T is a jsonMarshaler + ju bool // T is a jsonUnmarshaler + jup bool // *T is a jsonUnmarshaler + cs bool // T is a Selfer + csp bool // *T is a Selfer + mf bool // T is a MissingFielder + mfp bool // *T is a MissingFielder + + // other flags, with individual bits representing if set. + flags typeInfoFlag + infoFieldOmitempty bool + + _ [6]byte // padding + _ [2]uint64 // padding +} + +func (ti *typeInfo) isFlag(f typeInfoFlag) bool { + return ti.flags&f != 0 +} + +func (ti *typeInfo) indexForEncName(name []byte) (index int16) { + var sn []byte + if len(name)+2 <= 32 { + var buf [32]byte // should not escape to heap + sn = buf[:len(name)+2] + } else { + sn = make([]byte, len(name)+2) + } + copy(sn[1:], name) + sn[0], sn[len(sn)-1] = tiSep2(name), 0xff + j := bytes.Index(ti.sfiNamesSort, sn) + if j < 0 { + return -1 + } + index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8) + return +} + +type rtid2ti struct { + rtid uintptr + ti *typeInfo +} + +// TypeInfos caches typeInfo for each type on first inspection. +// +// It is configured with a set of tag keys, which are used to get +// configuration for the type. +type TypeInfos struct { + // infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected + infos atomicTypeInfoSlice + mu sync.Mutex + tags []string + _ [2]uint64 // padding +} + +// NewTypeInfos creates a TypeInfos given a set of struct tags keys. +// +// This allows users customize the struct tag keys which contain configuration +// of their types. +func NewTypeInfos(tags []string) *TypeInfos { + return &TypeInfos{tags: tags} +} + +func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { + // check for tags: codec, json, in that order. + // this allows seamless support for many configured structs. + for _, x := range x.tags { + s = t.Get(x) + if s != "" { + return s + } + } + return +} + +func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) { + // binary search. adapted from sort/search.go. + // Note: we use goto (instead of for loop) so this can be inlined. + + // if sp == nil { + // return -1, nil + // } + // s := *sp + + // h, i, j := 0, 0, len(s) + var h uint // var h, i uint + var j = uint(len(s)) +LOOP: + if i < j { + h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + ti = s[i].ti + } + return +} + +func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + sp := x.infos.load() + if sp != nil { + _, pti = findTypeInfo(sp, rtid) + if pti != nil { + return + } + } + + rk := rt.Kind() + + if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) { + panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt) + } + + // do not hold lock while computing this. + // it may lead to duplication, but that's ok. + ti := typeInfo{ + rt: rt, + rtid: rtid, + kind: uint8(rk), + pkgpath: rt.PkgPath(), + keyType: valueTypeString, // default it - so it's never 0 + } + // ti.rv0 = reflect.Zero(rt) + + // ti.comparable = rt.Comparable() + ti.numMeth = uint16(rt.NumMethod()) + + ti.bm, ti.bmp = implIntf(rt, binaryMarshalerTyp) + ti.bu, ti.bup = implIntf(rt, binaryUnmarshalerTyp) + ti.tm, ti.tmp = implIntf(rt, textMarshalerTyp) + ti.tu, ti.tup = implIntf(rt, textUnmarshalerTyp) + ti.jm, ti.jmp = implIntf(rt, jsonMarshalerTyp) + ti.ju, ti.jup = implIntf(rt, jsonUnmarshalerTyp) + ti.cs, ti.csp = implIntf(rt, selferTyp) + ti.mf, ti.mfp = implIntf(rt, missingFielderTyp) + + b1, b2 := implIntf(rt, iszeroTyp) + if b1 { + ti.flags |= typeInfoFlagIsZeroer + } + if b2 { + ti.flags |= typeInfoFlagIsZeroerPtr + } + if rt.Comparable() { + ti.flags |= typeInfoFlagComparable + } + + switch rk { + case reflect.Struct: + var omitEmpty bool + if f, ok := rt.FieldByName(structInfoFieldName); ok { + ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag)) + ti.infoFieldOmitempty = omitEmpty + } else { + ti.keyType = valueTypeString + } + pp, pi := &pool.tiload, pool.tiload.Get() // pool.tiLoad() + pv := pi.(*typeInfoLoadArray) + pv.etypes[0] = ti.rtid + // vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} + vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]} + x.rget(rt, rtid, omitEmpty, nil, &vv) + // ti.sfis = vv.sfis + ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv) + pp.Put(pi) + case reflect.Map: + ti.elem = rt.Elem() + ti.key = rt.Key() + case reflect.Slice: + ti.mbs, _ = implIntf(rt, mapBySliceTyp) + ti.elem = rt.Elem() + case reflect.Chan: + ti.elem = rt.Elem() + ti.chandir = uint8(rt.ChanDir()) + case reflect.Array, reflect.Ptr: + ti.elem = rt.Elem() + } + // sfi = sfiSrc + + x.mu.Lock() + sp = x.infos.load() + var sp2 []rtid2ti + if sp == nil { + pti = &ti + sp2 = []rtid2ti{{rtid, pti}} + x.infos.store(sp2) + } else { + var idx uint + idx, pti = findTypeInfo(sp, rtid) + if pti == nil { + pti = &ti + sp2 = make([]rtid2ti, len(sp)+1) + copy(sp2, sp[:idx]) + copy(sp2[idx+1:], sp[idx:]) + sp2[idx] = rtid2ti{rtid, pti} + x.infos.store(sp2) + } + } + x.mu.Unlock() + return +} + +func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, + indexstack []uint16, pv *typeInfoLoad) { + // Read up fields and store how to access the value. + // + // It uses go's rules for message selectors, + // which say that the field with the shallowest depth is selected. + // + // Note: we consciously use slices, not a map, to simulate a set. + // Typically, types have < 16 fields, + // and iteration using equals is faster than maps there + flen := rt.NumField() + if flen > (1< %v fields are not supported - has %v fields", + (1<= 0; i-- { // bounds-check elimination + b := si.encName[i] + if (b >= '0' && b <= '9') || (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') { + continue + } + si.encNameAsciiAlphaNum = false + break + } + si.fieldName = f.Name + si.flagSet(structFieldInfoFlagReady) + + // pv.encNames = append(pv.encNames, si.encName) + + // si.ikind = int(f.Type.Kind()) + if len(indexstack) > maxLevelsEmbedding-1 { + panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth", + maxLevelsEmbedding-1, len(indexstack)) + } + si.nis = uint8(len(indexstack)) + 1 + copy(si.is[:], indexstack) + si.is[len(indexstack)] = j + + if omitEmpty { + si.flagSet(structFieldInfoFlagOmitEmpty) + } + pv.sfis = append(pv.sfis, si) + } +} + +func tiSep(name string) uint8 { + // (xn[0]%64) // (between 192-255 - outside ascii BMP) + // return 0xfe - (name[0] & 63) + // return 0xfe - (name[0] & 63) - uint8(len(name)) + // return 0xfe - (name[0] & 63) - uint8(len(name)&63) + // return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07)) + return 0xfe - (name[0] & 63) - uint8(len(name)&63) +} + +func tiSep2(name []byte) uint8 { + return 0xfe - (name[0] & 63) - uint8(len(name)&63) +} + +// resolves the struct field info got from a call to rget. +// Returns a trimmed, unsorted and sorted []*structFieldInfo. +func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) ( + y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) { + sa := pv.sfiidx[:0] + sn := pv.b[:] + n := len(x) + + var xn string + var ui uint16 + var sep byte + + for i := range x { + ui = uint16(i) + xn = x[i].encName // fieldName or encName? use encName for now. + if len(xn)+2 > cap(pv.b) { + sn = make([]byte, len(xn)+2) + } else { + sn = sn[:len(xn)+2] + } + // use a custom sep, so that misses are less frequent, + // since the sep (first char in search) is as unique as first char in field name. + sep = tiSep(xn) + sn[0], sn[len(sn)-1] = sep, 0xff + copy(sn[1:], xn) + j := bytes.Index(sa, sn) + if j == -1 { + sa = append(sa, sep) + sa = append(sa, xn...) + sa = append(sa, 0xff, byte(ui>>8), byte(ui)) + } else { + index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8 + // one of them must be reset to nil, + // and the index updated appropriately to the other one + if x[i].nis == x[index].nis { + } else if x[i].nis < x[index].nis { + sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui) + if x[index].ready() { + x[index].flagClr(structFieldInfoFlagReady) + n-- + } + } else { + if x[i].ready() { + x[i].flagClr(structFieldInfoFlagReady) + n-- + } + } + } + + } + var w []structFieldInfo + sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray + if sharingArray { + w = make([]structFieldInfo, n) + } + + // remove all the nils (non-ready) + y = make([]*structFieldInfo, n) + n = 0 + var sslen int + for i := range x { + if !x[i].ready() { + continue + } + if !anyOmitEmpty && x[i].omitEmpty() { + anyOmitEmpty = true + } + if sharingArray { + w[n] = x[i] + y[n] = &w[n] + } else { + y[n] = &x[i] + } + sslen = sslen + len(x[i].encName) + 4 + n++ + } + if n != len(y) { + panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", + rt, len(y), len(x), n) + } + + z = make([]*structFieldInfo, len(y)) + copy(z, y) + sort.Sort(sfiSortedByEncName(z)) + + sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen + if sharingArray { + ss = make([]byte, 0, sslen) + } else { + ss = sa[:0] // reuse the newly made sa array if necessary + } + for i := range z { + xn = z[i].encName + sep = tiSep(xn) + ui = uint16(i) + ss = append(ss, sep) + ss = append(ss, xn...) + ss = append(ss, 0xff, byte(ui>>8), byte(ui)) + } + return +} + +func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) { + return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp) +} + +// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty: +// - does it implement IsZero() bool +// - is it comparable, and can i compare directly using == +// - if checkStruct, then walk through the encodable fields +// and check if they are empty or not. +func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + // v is a struct kind - no need to check again. + // We only check isZero on a struct kind, to reduce the amount of times + // that we lookup the rtid and typeInfo for each type as we walk the tree. + + vt := v.Type() + rtid := rt2id(vt) + if tinfos == nil { + tinfos = defTypeInfos + } + ti := tinfos.get(rtid, vt) + if ti.rtid == timeTypId { + return rv2i(v).(time.Time).IsZero() + } + if ti.isFlag(typeInfoFlagIsZeroerPtr) && v.CanAddr() { + return rv2i(v.Addr()).(isZeroer).IsZero() + } + if ti.isFlag(typeInfoFlagIsZeroer) { + return rv2i(v).(isZeroer).IsZero() + } + if ti.isFlag(typeInfoFlagComparable) { + return rv2i(v) == rv2i(reflect.Zero(vt)) + } + if !checkStruct { + return false + } + // We only care about what we can encode/decode, + // so that is what we use to check omitEmpty. + for _, si := range ti.sfiSrc { + sfv, valid := si.field(v, false) + if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) { + return false + } + } + return true +} + +// func roundFloat(x float64) float64 { +// t := math.Trunc(x) +// if math.Abs(x-t) >= 0.5 { +// return t + math.Copysign(1, x) +// } +// return t +// } + +func panicToErr(h errDecorator, err *error) { + // Note: This method MUST be called directly from defer i.e. defer panicToErr ... + // else it seems the recover is not fully handled + if recoverPanicToErr { + if x := recover(); x != nil { + // fmt.Printf("panic'ing with: %v\n", x) + // debug.PrintStack() + panicValToErr(h, x, err) + } + } +} + +func panicValToErr(h errDecorator, v interface{}, err *error) { + switch xerr := v.(type) { + case nil: + case error: + switch xerr { + case nil: + case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized: + // treat as special (bubble up) + *err = xerr + default: + h.wrapErr(xerr, err) + } + case string: + if xerr != "" { + h.wrapErr(xerr, err) + } + case fmt.Stringer: + if xerr != nil { + h.wrapErr(xerr, err) + } + default: + h.wrapErr(v, err) + } +} + +func isImmutableKind(k reflect.Kind) (v bool) { + // return immutableKindsSet[k] + // since we know reflect.Kind is in range 0..31, then use the k%32 == k constraint + return immutableKindsSet[k%reflect.Kind(len(immutableKindsSet))] // bounds-check-elimination +} + +// ---- + +type codecFnInfo struct { + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType + addrD bool + addrF bool // if addrD, this says whether decode function can take a value or a ptr + addrE bool +} + +// codecFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type codecFn struct { + i codecFnInfo + fe func(*Encoder, *codecFnInfo, reflect.Value) + fd func(*Decoder, *codecFnInfo, reflect.Value) + _ [1]uint64 // padding +} + +type codecRtidFn struct { + rtid uintptr + fn *codecFn +} + +// ---- + +// these "checkOverflow" functions must be inlinable, and not call anybody. +// Overflow means that the value cannot be represented without wrapping/overflow. +// Overflow=false does not mean that the value can be represented without losing precision +// (especially for floating point). + +type checkOverflow struct{} + +// func (checkOverflow) Float16(f float64) (overflow bool) { +// panicv.errorf("unimplemented") +// if f < 0 { +// f = -f +// } +// return math.MaxFloat32 < f && f <= math.MaxFloat64 +// } + +func (checkOverflow) Float32(v float64) (overflow bool) { + if v < 0 { + v = -v + } + return math.MaxFloat32 < v && v <= math.MaxFloat64 +} +func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} +func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} +func (checkOverflow) SignedInt(v uint64) (overflow bool) { + //e.g. -127 to 128 for int8 + pos := (v >> 63) == 0 + ui2 := v & 0x7fffffffffffffff + if pos { + if ui2 > math.MaxInt64 { + overflow = true + } + } else { + if ui2 > math.MaxInt64-1 { + overflow = true + } + } + return +} + +func (x checkOverflow) Float32V(v float64) float64 { + if x.Float32(v) { + panicv.errorf("float32 overflow: %v", v) + } + return v +} +func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 { + if x.Uint(v, bitsize) { + panicv.errorf("uint64 overflow: %v", v) + } + return v +} +func (x checkOverflow) IntV(v int64, bitsize uint8) int64 { + if x.Int(v, bitsize) { + panicv.errorf("int64 overflow: %v", v) + } + return v +} +func (x checkOverflow) SignedIntV(v uint64) int64 { + if x.SignedInt(v) { + panicv.errorf("uint64 to int64 overflow: %v", v) + } + return int64(v) +} + +// ------------------ SORT ----------------- + +func isNaN(f float64) bool { return f != f } + +// ----------------------- + +type ioFlusher interface { + Flush() error +} + +type ioPeeker interface { + Peek(int) ([]byte, error) +} + +type ioBuffered interface { + Buffered() int +} + +// ----------------------- + +type intSlice []int64 +type uintSlice []uint64 + +// type uintptrSlice []uintptr +type floatSlice []float64 +type boolSlice []bool +type stringSlice []string + +// type bytesSlice [][]byte + +func (p intSlice) Len() int { return len(p) } +func (p intSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +func (p intSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p uintSlice) Len() int { return len(p) } +func (p uintSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +func (p uintSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// func (p uintptrSlice) Len() int { return len(p) } +// func (p uintptrSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +// func (p uintptrSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p floatSlice) Len() int { return len(p) } +func (p floatSlice) Less(i, j int) bool { + return p[uint(i)] < p[uint(j)] || isNaN(p[uint(i)]) && !isNaN(p[uint(j)]) +} +func (p floatSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p stringSlice) Len() int { return len(p) } +func (p stringSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +func (p stringSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// func (p bytesSlice) Len() int { return len(p) } +// func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)], p[uint(j)]) == -1 } +// func (p bytesSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p boolSlice) Len() int { return len(p) } +func (p boolSlice) Less(i, j int) bool { return !p[uint(i)] && p[uint(j)] } +func (p boolSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// --------------------- + +type sfiRv struct { + v *structFieldInfo + r reflect.Value +} + +type intRv struct { + v int64 + r reflect.Value +} +type intRvSlice []intRv +type uintRv struct { + v uint64 + r reflect.Value +} +type uintRvSlice []uintRv +type floatRv struct { + v float64 + r reflect.Value +} +type floatRvSlice []floatRv +type boolRv struct { + v bool + r reflect.Value +} +type boolRvSlice []boolRv +type stringRv struct { + v string + r reflect.Value +} +type stringRvSlice []stringRv +type bytesRv struct { + v []byte + r reflect.Value +} +type bytesRvSlice []bytesRv +type timeRv struct { + v time.Time + r reflect.Value +} +type timeRvSlice []timeRv + +func (p intRvSlice) Len() int { return len(p) } +func (p intRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } +func (p intRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p uintRvSlice) Len() int { return len(p) } +func (p uintRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } +func (p uintRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p floatRvSlice) Len() int { return len(p) } +func (p floatRvSlice) Less(i, j int) bool { + return p[uint(i)].v < p[uint(j)].v || isNaN(p[uint(i)].v) && !isNaN(p[uint(j)].v) +} +func (p floatRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p stringRvSlice) Len() int { return len(p) } +func (p stringRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } +func (p stringRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p bytesRvSlice) Len() int { return len(p) } +func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 } +func (p bytesRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p boolRvSlice) Len() int { return len(p) } +func (p boolRvSlice) Less(i, j int) bool { return !p[uint(i)].v && p[uint(j)].v } +func (p boolRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p timeRvSlice) Len() int { return len(p) } +func (p timeRvSlice) Less(i, j int) bool { return p[uint(i)].v.Before(p[uint(j)].v) } +func (p timeRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// ----------------- + +type bytesI struct { + v []byte + i interface{} +} + +type bytesISlice []bytesI + +func (p bytesISlice) Len() int { return len(p) } +func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 } +func (p bytesISlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// ----------------- + +type set []uintptr + +func (s *set) add(v uintptr) (exists bool) { + // e.ci is always nil, or len >= 1 + x := *s + if x == nil { + x = make([]uintptr, 1, 8) + x[0] = v + *s = x + return + } + // typically, length will be 1. make this perform. + if len(x) == 1 { + if j := x[0]; j == 0 { + x[0] = v + } else if j == v { + exists = true + } else { + x = append(x, v) + *s = x + } + return + } + // check if it exists + for _, j := range x { + if j == v { + exists = true + return + } + } + // try to replace a "deleted" slot + for i, j := range x { + if j == 0 { + x[i] = v + return + } + } + // if unable to replace deleted slot, just append it. + x = append(x, v) + *s = x + return +} + +func (s *set) remove(v uintptr) (exists bool) { + x := *s + if len(x) == 0 { return } - if i != 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - decErr("Overflow int value: %v", i) + if len(x) == 1 { + if x[0] == v { + x[0] = 0 } + return } - if ui != 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - decErr("Overflow uint value: %v", ui) + for i, j := range x { + if j == v { + exists = true + x[i] = 0 // set it to 0, as way to delete it. + // copy(x[i:], x[i+1:]) + // x = x[:len(x)-1] + return } } + return +} + +// ------ + +// bitset types are better than [256]bool, because they permit the whole +// bitset array being on a single cache line and use less memory. +// +// Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap). +// +// We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces +// bounds checking, so we discarded them, and everyone uses bitset256. +// +// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1). +// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7 + +type bitset256 [32]byte + +func (x *bitset256) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} + +// func (x *bitset256) issetv(pos byte) byte { +// return x[pos>>3] & (1 << (pos & 7)) +// } + +func (x *bitset256) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} + +// func (x *bitset256) unset(pos byte) { +// x[pos>>3] &^= (1 << (pos & 7)) +// } + +// type bit2set256 [64]byte + +// func (x *bit2set256) set(pos byte, v1, v2 bool) { +// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 +// if v1 { +// x[pos>>2] |= 1 << (pos2 + 1) +// } +// if v2 { +// x[pos>>2] |= 1 << pos2 +// } +// } +// func (x *bit2set256) get(pos byte) uint8 { +// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 +// return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011 +// } + +// ------------ + +type pooler struct { + // function-scoped pooled resources + tiload sync.Pool // for type info loading + sfiRv8, sfiRv16, sfiRv32, sfiRv64, sfiRv128 sync.Pool // for struct encoding + + // lifetime-scoped pooled resources + // dn sync.Pool // for decNaked + buf1k, buf2k, buf4k, buf8k, buf16k, buf32k, buf64k sync.Pool // for [N]byte +} + +func (p *pooler) init() { + p.tiload.New = func() interface{} { return new(typeInfoLoadArray) } + + p.sfiRv8.New = func() interface{} { return new([8]sfiRv) } + p.sfiRv16.New = func() interface{} { return new([16]sfiRv) } + p.sfiRv32.New = func() interface{} { return new([32]sfiRv) } + p.sfiRv64.New = func() interface{} { return new([64]sfiRv) } + p.sfiRv128.New = func() interface{} { return new([128]sfiRv) } + + // p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x } + + p.buf1k.New = func() interface{} { return new([1 * 1024]byte) } + p.buf2k.New = func() interface{} { return new([2 * 1024]byte) } + p.buf4k.New = func() interface{} { return new([4 * 1024]byte) } + p.buf8k.New = func() interface{} { return new([8 * 1024]byte) } + p.buf16k.New = func() interface{} { return new([16 * 1024]byte) } + p.buf32k.New = func() interface{} { return new([32 * 1024]byte) } + p.buf64k.New = func() interface{} { return new([64 * 1024]byte) } + +} + +// func (p *pooler) sfiRv8() (sp *sync.Pool, v interface{}) { +// return &p.strRv8, p.strRv8.Get() +// } +// func (p *pooler) sfiRv16() (sp *sync.Pool, v interface{}) { +// return &p.strRv16, p.strRv16.Get() +// } +// func (p *pooler) sfiRv32() (sp *sync.Pool, v interface{}) { +// return &p.strRv32, p.strRv32.Get() +// } +// func (p *pooler) sfiRv64() (sp *sync.Pool, v interface{}) { +// return &p.strRv64, p.strRv64.Get() +// } +// func (p *pooler) sfiRv128() (sp *sync.Pool, v interface{}) { +// return &p.strRv128, p.strRv128.Get() +// } + +// func (p *pooler) bytes1k() (sp *sync.Pool, v interface{}) { +// return &p.buf1k, p.buf1k.Get() +// } +// func (p *pooler) bytes2k() (sp *sync.Pool, v interface{}) { +// return &p.buf2k, p.buf2k.Get() +// } +// func (p *pooler) bytes4k() (sp *sync.Pool, v interface{}) { +// return &p.buf4k, p.buf4k.Get() +// } +// func (p *pooler) bytes8k() (sp *sync.Pool, v interface{}) { +// return &p.buf8k, p.buf8k.Get() +// } +// func (p *pooler) bytes16k() (sp *sync.Pool, v interface{}) { +// return &p.buf16k, p.buf16k.Get() +// } +// func (p *pooler) bytes32k() (sp *sync.Pool, v interface{}) { +// return &p.buf32k, p.buf32k.Get() +// } +// func (p *pooler) bytes64k() (sp *sync.Pool, v interface{}) { +// return &p.buf64k, p.buf64k.Get() +// } + +// func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) { +// return &p.tiload, p.tiload.Get() +// } + +// func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) { +// return &p.dn, p.dn.Get() +// } + +// func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) { +// sp := &(p.dn) +// vv := sp.Get() +// return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) } +// } +// func (p *pooler) decNakedGet() (v interface{}) { +// return p.dn.Get() +// } +// func (p *pooler) tiLoadGet() (v interface{}) { +// return p.tiload.Get() +// } +// func (p *pooler) decNakedPut(v interface{}) { +// p.dn.Put(v) +// } +// func (p *pooler) tiLoadPut(v interface{}) { +// p.tiload.Put(v) +// } + +// ---------------------------------------------------- + +type panicHdl struct{} + +func (panicHdl) errorv(err error) { + if err != nil { + panic(err) + } +} + +func (panicHdl) errorstr(message string) { + if message != "" { + panic(message) + } +} + +func (panicHdl) errorf(format string, params ...interface{}) { + if format == "" { + } else if len(params) == 0 { + panic(format) + } else { + panic(fmt.Sprintf(format, params...)) + } +} + +// ---------------------------------------------------- + +type errDecorator interface { + wrapErr(in interface{}, out *error) +} + +type errDecoratorDef struct{} + +func (errDecoratorDef) wrapErr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) } + +// ---------------------------------------------------- + +type must struct{} + +func (must) String(s string, err error) string { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Int(s int64, err error) int64 { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Uint(s uint64, err error) uint64 { + if err != nil { + panicv.errorv(err) + } + return s } +func (must) Float(s float64, err error) float64 { + if err != nil { + panicv.errorv(err) + } + return s +} + +// ------------------- + +type bytesBufPooler struct { + pool *sync.Pool + poolbuf interface{} +} + +func (z *bytesBufPooler) end() { + if z.pool != nil { + z.pool.Put(z.poolbuf) + z.pool, z.poolbuf = nil, nil + } +} + +func (z *bytesBufPooler) get(bufsize int) (buf []byte) { + // ensure an end is called first (if necessary) + if z.pool != nil { + z.pool.Put(z.poolbuf) + z.pool, z.poolbuf = nil, nil + } + + // // Try to use binary search. + // // This is not optimal, as most folks select 1k or 2k buffers + // // so a linear search is better (sequence of if/else blocks) + // if bufsize < 1 { + // bufsize = 0 + // } else { + // bufsize-- + // bufsize /= 1024 + // } + // switch bufsize { + // case 0: + // z.pool, z.poolbuf = pool.bytes1k() + // buf = z.poolbuf.(*[1 * 1024]byte)[:] + // case 1: + // z.pool, z.poolbuf = pool.bytes2k() + // buf = z.poolbuf.(*[2 * 1024]byte)[:] + // case 2, 3: + // z.pool, z.poolbuf = pool.bytes4k() + // buf = z.poolbuf.(*[4 * 1024]byte)[:] + // case 4, 5, 6, 7: + // z.pool, z.poolbuf = pool.bytes8k() + // buf = z.poolbuf.(*[8 * 1024]byte)[:] + // case 8, 9, 10, 11, 12, 13, 14, 15: + // z.pool, z.poolbuf = pool.bytes16k() + // buf = z.poolbuf.(*[16 * 1024]byte)[:] + // case 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31: + // z.pool, z.poolbuf = pool.bytes32k() + // buf = z.poolbuf.(*[32 * 1024]byte)[:] + // default: + // z.pool, z.poolbuf = pool.bytes64k() + // buf = z.poolbuf.(*[64 * 1024]byte)[:] + // } + // return + + if bufsize <= 1*1024 { + z.pool, z.poolbuf = &pool.buf1k, pool.buf1k.Get() // pool.bytes1k() + buf = z.poolbuf.(*[1 * 1024]byte)[:] + } else if bufsize <= 2*1024 { + z.pool, z.poolbuf = &pool.buf2k, pool.buf2k.Get() // pool.bytes2k() + buf = z.poolbuf.(*[2 * 1024]byte)[:] + } else if bufsize <= 4*1024 { + z.pool, z.poolbuf = &pool.buf4k, pool.buf4k.Get() // pool.bytes4k() + buf = z.poolbuf.(*[4 * 1024]byte)[:] + } else if bufsize <= 8*1024 { + z.pool, z.poolbuf = &pool.buf8k, pool.buf8k.Get() // pool.bytes8k() + buf = z.poolbuf.(*[8 * 1024]byte)[:] + } else if bufsize <= 16*1024 { + z.pool, z.poolbuf = &pool.buf16k, pool.buf16k.Get() // pool.bytes16k() + buf = z.poolbuf.(*[16 * 1024]byte)[:] + } else if bufsize <= 32*1024 { + z.pool, z.poolbuf = &pool.buf32k, pool.buf32k.Get() // pool.bytes32k() + buf = z.poolbuf.(*[32 * 1024]byte)[:] + } else { + z.pool, z.poolbuf = &pool.buf64k, pool.buf64k.Get() // pool.bytes64k() + buf = z.poolbuf.(*[64 * 1024]byte)[:] + } + return +} + +// ---------------- + +type sfiRvPooler struct { + pool *sync.Pool + poolv interface{} +} + +func (z *sfiRvPooler) end() { + if z.pool != nil { + z.pool.Put(z.poolv) + z.pool, z.poolv = nil, nil + } +} + +func (z *sfiRvPooler) get(newlen int) (fkvs []sfiRv) { + if newlen < 0 { // bounds-check-elimination + // cannot happen // here for bounds-check-elimination + } else if newlen <= 8 { + z.pool, z.poolv = &pool.sfiRv8, pool.sfiRv8.Get() // pool.sfiRv8() + fkvs = z.poolv.(*[8]sfiRv)[:newlen] + } else if newlen <= 16 { + z.pool, z.poolv = &pool.sfiRv16, pool.sfiRv16.Get() // pool.sfiRv16() + fkvs = z.poolv.(*[16]sfiRv)[:newlen] + } else if newlen <= 32 { + z.pool, z.poolv = &pool.sfiRv32, pool.sfiRv32.Get() // pool.sfiRv32() + fkvs = z.poolv.(*[32]sfiRv)[:newlen] + } else if newlen <= 64 { + z.pool, z.poolv = &pool.sfiRv64, pool.sfiRv64.Get() // pool.sfiRv64() + fkvs = z.poolv.(*[64]sfiRv)[:newlen] + } else if newlen <= 128 { + z.pool, z.poolv = &pool.sfiRv128, pool.sfiRv128.Get() // pool.sfiRv128() + fkvs = z.poolv.(*[128]sfiRv)[:newlen] + } else { + fkvs = make([]sfiRv, newlen) + } + return +} + +// xdebugf printf. the message in red on the terminal. +// Use it in place of fmt.Printf (which it calls internally) +func xdebugf(pattern string, args ...interface{}) { + var delim string + if len(pattern) > 0 && pattern[len(pattern)-1] != '\n' { + delim = "\n" + } + fmt.Printf("\033[1;31m"+pattern+delim+"\033[0m", args...) +} + +// func isImmutableKind(k reflect.Kind) (v bool) { +// return false || +// k == reflect.Int || +// k == reflect.Int8 || +// k == reflect.Int16 || +// k == reflect.Int32 || +// k == reflect.Int64 || +// k == reflect.Uint || +// k == reflect.Uint8 || +// k == reflect.Uint16 || +// k == reflect.Uint32 || +// k == reflect.Uint64 || +// k == reflect.Uintptr || +// k == reflect.Float32 || +// k == reflect.Float64 || +// k == reflect.Bool || +// k == reflect.String +// } + +// func timeLocUTCName(tzint int16) string { +// if tzint == 0 { +// return "UTC" +// } +// var tzname = []byte("UTC+00:00") +// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf.. inline below. +// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first +// var tzhr, tzmin int16 +// if tzint < 0 { +// tzname[3] = '-' // (TODO: verify. this works here) +// tzhr, tzmin = -tzint/60, (-tzint)%60 +// } else { +// tzhr, tzmin = tzint/60, tzint%60 +// } +// tzname[4] = timeDigits[tzhr/10] +// tzname[5] = timeDigits[tzhr%10] +// tzname[7] = timeDigits[tzmin/10] +// tzname[8] = timeDigits[tzmin%10] +// return string(tzname) +// //return time.FixedZone(string(tzname), int(tzint)*60) +// } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go index 93f12854f21ae..0cbd665e257f0 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go @@ -1,132 +1,121 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec // All non-std package dependencies live in this file, // so porting to different environment is easy (just update functions). -import ( - "errors" - "fmt" - "math" - "reflect" -) - -var ( - raisePanicAfterRecover = false - debugging = true -) - -func panicValToErr(panicVal interface{}, err *error) { - switch xerr := panicVal.(type) { - case error: - *err = xerr - case string: - *err = errors.New(xerr) - default: - *err = fmt.Errorf("%v", panicVal) - } - if raisePanicAfterRecover { - panic(panicVal) +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } } return } -func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { - switch v.Kind() { - case reflect.Invalid: - return true - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if deref { - if v.IsNil() { - return true - } - return hIsEmptyValue(v.Elem(), deref, checkStruct) - } else { - return v.IsNil() +// validate that this function is correct ... +// culled from OGRE (Object-Oriented Graphics Rendering Engine) +// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) +func halfFloatToFloatBits(yy uint16) (d uint32) { + y := uint32(yy) + s := (y >> 15) & 0x01 + e := (y >> 10) & 0x1f + m := y & 0x03ff + + if e == 0 { + if m == 0 { // plu or minus 0 + return s << 31 } - case reflect.Struct: - if !checkStruct { - return false + // Denormalized number -- renormalize it + for (m & 0x00000400) == 0 { + m <<= 1 + e -= 1 } - // return true if all fields are empty. else return false. - - // we cannot use equality check, because some fields may be maps/slices/etc - // and consequently the structs are not comparable. - // return v.Interface() == reflect.Zero(v.Type()).Interface() - for i, n := 0, v.NumField(); i < n; i++ { - if !hIsEmptyValue(v.Field(i), deref, checkStruct) { - return false - } + e += 1 + const zz uint32 = 0x0400 + m &= ^zz + } else if e == 31 { + if m == 0 { // Inf + return (s << 31) | 0x7f800000 } - return true + return (s << 31) | 0x7f800000 | (m << 13) // NaN } - return false + e = e + (127 - 15) + m = m << 13 + return (s << 31) | (e << 23) | m } -func isEmptyValue(v reflect.Value) bool { - return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue) -} +// GrowCap will return a new capacity for a slice, given the following: +// - oldCap: current capacity +// - unit: in-memory size of an element +// - num: number of elements to add +func growCap(oldCap, unit, num int) (newCap int) { + // appendslice logic (if cap < 1024, *2, else *1.25): + // leads to many copy calls, especially when copying bytes. + // bytes.Buffer model (2*cap + n): much better for bytes. + // smarter way is to take the byte-size of the appended element(type) into account -func debugf(format string, args ...interface{}) { - if debugging { - if len(format) == 0 || format[len(format)-1] != '\n' { - format = format + "\n" - } - fmt.Printf(format, args...) + // maintain 3 thresholds: + // t1: if cap <= t1, newcap = 2x + // t2: if cap <= t2, newcap = 1.75x + // t3: if cap <= t3, newcap = 1.5x + // else newcap = 1.25x + // + // t1, t2, t3 >= 1024 always. + // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) + // + // With this, appending for bytes increase by: + // 100% up to 4K + // 75% up to 8K + // 50% up to 16K + // 25% beyond that + + // unit can be 0 e.g. for struct{}{}; handle that appropriately + var t1, t2, t3 int // thresholds + if unit <= 1 { + t1, t2, t3 = 4*1024, 8*1024, 16*1024 + } else if unit < 16 { + t3 = 16 / unit * 1024 + t1 = t3 * 1 / 4 + t2 = t3 * 2 / 4 + } else { + t1, t2, t3 = 1024, 1024, 1024 } -} -func pruneSignExt(v []byte, pos bool) (n int) { - if len(v) < 2 { - } else if pos && v[0] == 0 { - for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { - } - } else if !pos && v[0] == 0xff { - for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { - } + var x int // temporary variable + + // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively + if oldCap <= t1 { // [0,t1] + x = 8 + } else if oldCap > t3 { // (t3,infinity] + x = 5 + } else if oldCap <= t2 { // (t1,t2] + x = 7 + } else { // (t2,t3] + x = 6 } - return -} + newCap = x * oldCap / 4 -func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { - if typ == nil { - return + if num > 0 { + newCap += num } - rt := typ - // The type might be a pointer and we need to keep - // dereferencing to the base type until we find an implementation. - for { - if rt.Implements(iTyp) { - return true, indir - } - if p := rt; p.Kind() == reflect.Ptr { - indir++ - if indir >= math.MaxInt8 { // insane number of indirections - return false, 0 - } - rt = p.Elem() - continue + + // ensure newCap is a multiple of 64 (if it is > 64) or 16. + if newCap > 64 { + if x = newCap % 64; x != 0 { + x = newCap / 64 + newCap = 64 * (x + 1) } - break - } - // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. - if typ.Kind() != reflect.Ptr { - // Not a pointer, but does the pointer work? - if reflect.PtrTo(typ).Implements(iTyp) { - return true, -1 + } else { + if x = newCap % 16; x != 0 { + x = newCap / 16 + newCap = 16 * (x + 1) } } - return false, 0 + return } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_not_unsafe.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_not_unsafe.go new file mode 100644 index 0000000000000..74987f9f7f74a --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper_not_unsafe.go @@ -0,0 +1,331 @@ +// +build !go1.7 safe appengine + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "sync/atomic" + "time" +) + +const safeMode = true + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// and call keepAlive4BytesView(v) at point where done with view. +func stringView(v []byte) string { + return string(v) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// and call keepAlive4BytesView(v) at point where done with view. +func bytesView(v string) []byte { + return []byte(v) +} + +func definitelyNil(v interface{}) bool { + // this is a best-effort option. + // We just return false, so we don't unnecessarily incur the cost of reflection this early. + return false +} + +func rv2i(rv reflect.Value) interface{} { + return rv.Interface() +} + +func rt2id(rt reflect.Type) uintptr { + return reflect.ValueOf(rt).Pointer() +} + +// func rv2rtid(rv reflect.Value) uintptr { +// return reflect.ValueOf(rv.Type()).Pointer() +// } + +func i2rtid(i interface{}) uintptr { + return reflect.ValueOf(reflect.TypeOf(i)).Pointer() +} + +// -------------------------- + +func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) + } + return v.IsNil() + case reflect.Struct: + return isEmptyStruct(v, tinfos, deref, checkStruct) + } + return false +} + +// -------------------------- +// type ptrToRvMap struct{} + +// func (*ptrToRvMap) init() {} +// func (*ptrToRvMap) get(i interface{}) reflect.Value { +// return reflect.ValueOf(i).Elem() +// } + +// -------------------------- +type atomicClsErr struct { + v atomic.Value +} + +func (x *atomicClsErr) load() (e clsErr) { + if i := x.v.Load(); i != nil { + e = i.(clsErr) + } + return +} + +func (x *atomicClsErr) store(p clsErr) { + x.v.Store(p) +} + +// -------------------------- +type atomicTypeInfoSlice struct { // expected to be 2 words + v atomic.Value +} + +func (x *atomicTypeInfoSlice) load() (e []rtid2ti) { + if i := x.v.Load(); i != nil { + e = i.([]rtid2ti) + } + return +} + +func (x *atomicTypeInfoSlice) store(p []rtid2ti) { + x.v.Store(p) +} + +// -------------------------- +type atomicRtidFnSlice struct { // expected to be 2 words + v atomic.Value +} + +func (x *atomicRtidFnSlice) load() (e []codecRtidFn) { + if i := x.v.Load(); i != nil { + e = i.([]codecRtidFn) + } + return +} + +func (x *atomicRtidFnSlice) store(p []codecRtidFn) { + x.v.Store(p) +} + +// -------------------------- +func (n *decNaked) ru() reflect.Value { + return reflect.ValueOf(&n.u).Elem() +} +func (n *decNaked) ri() reflect.Value { + return reflect.ValueOf(&n.i).Elem() +} +func (n *decNaked) rf() reflect.Value { + return reflect.ValueOf(&n.f).Elem() +} +func (n *decNaked) rl() reflect.Value { + return reflect.ValueOf(&n.l).Elem() +} +func (n *decNaked) rs() reflect.Value { + return reflect.ValueOf(&n.s).Elem() +} +func (n *decNaked) rt() reflect.Value { + return reflect.ValueOf(&n.t).Elem() +} +func (n *decNaked) rb() reflect.Value { + return reflect.ValueOf(&n.b).Elem() +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + rv.SetBytes(d.rawBytes()) +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + rv.SetString(d.d.DecodeString()) +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + rv.SetBool(d.d.DecodeBool()) +} + +func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) { + rv.Set(reflect.ValueOf(d.d.DecodeTime())) +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + fv := d.d.DecodeFloat64() + if chkOvf.Float32(fv) { + d.errorf("float32 overflow: %v", fv) + } + rv.SetFloat(fv) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + rv.SetFloat(d.d.DecodeFloat64()) +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt64()) +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint64()) +} + +// ---------------- + +func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeBool(rv.Bool()) +} + +func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeTime(rv2i(rv).(time.Time)) +} + +func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { + s := rv.String() + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(s)) + } else { + e.e.EncodeStringEnc(cUTF8, s) + } +} + +func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rv.Float()) +} + +func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(float32(rv.Float())) +} + +func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +// // keepAlive4BytesView maintains a reference to the input parameter for bytesView. +// // +// // Usage: call this at point where done with the bytes view. +// func keepAlive4BytesView(v string) {} + +// // keepAlive4BytesView maintains a reference to the input parameter for stringView. +// // +// // Usage: call this at point where done with the string view. +// func keepAlive4StringView(v []byte) {} + +// func definitelyNil(v interface{}) bool { +// rv := reflect.ValueOf(v) +// switch rv.Kind() { +// case reflect.Invalid: +// return true +// case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func: +// return rv.IsNil() +// default: +// return false +// } +// } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_unsafe.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_unsafe.go new file mode 100644 index 0000000000000..3bc34d90d7c2c --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper_unsafe.go @@ -0,0 +1,745 @@ +// +build !safe +// +build !appengine +// +build go1.7 + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "sync/atomic" + "time" + "unsafe" +) + +// This file has unsafe variants of some helper methods. +// NOTE: See helper_not_unsafe.go for the usage information. + +// var zeroRTv [4]uintptr + +const safeMode = false +const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go + +type unsafeString struct { + Data unsafe.Pointer + Len int +} + +type unsafeSlice struct { + Data unsafe.Pointer + Len int + Cap int +} + +type unsafeIntf struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +type unsafeReflectValue struct { + typ unsafe.Pointer + ptr unsafe.Pointer + flag uintptr +} + +func stringView(v []byte) string { + if len(v) == 0 { + return "" + } + bx := (*unsafeSlice)(unsafe.Pointer(&v)) + return *(*string)(unsafe.Pointer(&unsafeString{bx.Data, bx.Len})) +} + +func bytesView(v string) []byte { + if len(v) == 0 { + return zeroByteSlice + } + sx := (*unsafeString)(unsafe.Pointer(&v)) + return *(*[]byte)(unsafe.Pointer(&unsafeSlice{sx.Data, sx.Len, sx.Len})) +} + +func definitelyNil(v interface{}) bool { + // There is no global way of checking if an interface is nil. + // For true references (map, ptr, func, chan), you can just look + // at the word of the interface. However, for slices, you have to dereference + // the word, and get a pointer to the 3-word interface value. + // + // However, the following are cheap calls + // - TypeOf(interface): cheap 2-line call. + // - ValueOf(interface{}): expensive + // - type.Kind: cheap call through an interface + // - Value.Type(): cheap call + // except it's a method value (e.g. r.Read, which implies that it is a Func) + + return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil +} + +func rv2i(rv reflect.Value) interface{} { + // TODO: consider a more generally-known optimization for reflect.Value ==> Interface + // + // Currently, we use this fragile method that taps into implememtation details from + // the source go stdlib reflect/value.go, and trims the implementation. + + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir + var ptr unsafe.Pointer + if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 { + ptr = *(*unsafe.Pointer)(urv.ptr) + } else { + ptr = urv.ptr + } + return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr})) +} + +func rt2id(rt reflect.Type) uintptr { + return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) +} + +// func rv2rtid(rv reflect.Value) uintptr { +// return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ) +// } + +func i2rtid(i interface{}) uintptr { + return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ) +} + +// -------------------------- + +func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + urv := (*unsafeReflectValue)(unsafe.Pointer(&v)) + if urv.flag == 0 { + return true + } + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.String: + return (*unsafeString)(urv.ptr).Len == 0 + case reflect.Slice: + return (*unsafeSlice)(urv.ptr).Len == 0 + case reflect.Bool: + return !*(*bool)(urv.ptr) + case reflect.Int: + return *(*int)(urv.ptr) == 0 + case reflect.Int8: + return *(*int8)(urv.ptr) == 0 + case reflect.Int16: + return *(*int16)(urv.ptr) == 0 + case reflect.Int32: + return *(*int32)(urv.ptr) == 0 + case reflect.Int64: + return *(*int64)(urv.ptr) == 0 + case reflect.Uint: + return *(*uint)(urv.ptr) == 0 + case reflect.Uint8: + return *(*uint8)(urv.ptr) == 0 + case reflect.Uint16: + return *(*uint16)(urv.ptr) == 0 + case reflect.Uint32: + return *(*uint32)(urv.ptr) == 0 + case reflect.Uint64: + return *(*uint64)(urv.ptr) == 0 + case reflect.Uintptr: + return *(*uintptr)(urv.ptr) == 0 + case reflect.Float32: + return *(*float32)(urv.ptr) == 0 + case reflect.Float64: + return *(*float64)(urv.ptr) == 0 + case reflect.Interface: + isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil + if deref { + if isnil { + return true + } + return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) + } + return isnil + case reflect.Ptr: + // isnil := urv.ptr == nil (not sufficient, as a pointer value encodes the type) + isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil + if deref { + if isnil { + return true + } + return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) + } + return isnil + case reflect.Struct: + return isEmptyStruct(v, tinfos, deref, checkStruct) + case reflect.Map, reflect.Array, reflect.Chan: + return v.Len() == 0 + } + return false +} + +// -------------------------- + +// atomicXXX is expected to be 2 words (for symmetry with atomic.Value) +// +// Note that we do not atomically load/store length and data pointer separately, +// as this could lead to some races. Instead, we atomically load/store cappedSlice. +// +// Note: with atomic.(Load|Store)Pointer, we MUST work with an unsafe.Pointer directly. + +// ---------------------- +type atomicTypeInfoSlice struct { + v unsafe.Pointer // *[]rtid2ti + _ uintptr // padding (atomicXXX expected to be 2 words) +} + +func (x *atomicTypeInfoSlice) load() (s []rtid2ti) { + x2 := atomic.LoadPointer(&x.v) + if x2 != nil { + s = *(*[]rtid2ti)(x2) + } + return +} + +func (x *atomicTypeInfoSlice) store(p []rtid2ti) { + atomic.StorePointer(&x.v, unsafe.Pointer(&p)) +} + +// -------------------------- +type atomicRtidFnSlice struct { + v unsafe.Pointer // *[]codecRtidFn + _ uintptr // padding (atomicXXX expected to be 2 words) +} + +func (x *atomicRtidFnSlice) load() (s []codecRtidFn) { + x2 := atomic.LoadPointer(&x.v) + if x2 != nil { + s = *(*[]codecRtidFn)(x2) + } + return +} + +func (x *atomicRtidFnSlice) store(p []codecRtidFn) { + atomic.StorePointer(&x.v, unsafe.Pointer(&p)) +} + +// -------------------------- +type atomicClsErr struct { + v unsafe.Pointer // *clsErr + _ uintptr // padding (atomicXXX expected to be 2 words) +} + +func (x *atomicClsErr) load() (e clsErr) { + x2 := (*clsErr)(atomic.LoadPointer(&x.v)) + if x2 != nil { + e = *x2 + } + return +} + +func (x *atomicClsErr) store(p clsErr) { + atomic.StorePointer(&x.v, unsafe.Pointer(&p)) +} + +// -------------------------- + +// to create a reflect.Value for each member field of decNaked, +// we first create a global decNaked, and create reflect.Value +// for them all. +// This way, we have the flags and type in the reflect.Value. +// Then, when a reflect.Value is called, we just copy it, +// update the ptr to the decNaked's, and return it. + +type unsafeDecNakedWrapper struct { + decNaked + ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above +} + +func (n *unsafeDecNakedWrapper) init() { + n.ru = reflect.ValueOf(&n.u).Elem() + n.ri = reflect.ValueOf(&n.i).Elem() + n.rf = reflect.ValueOf(&n.f).Elem() + n.rl = reflect.ValueOf(&n.l).Elem() + n.rs = reflect.ValueOf(&n.s).Elem() + n.rt = reflect.ValueOf(&n.t).Elem() + n.rb = reflect.ValueOf(&n.b).Elem() + // n.rr[] = reflect.ValueOf(&n.) +} + +var defUnsafeDecNakedWrapper unsafeDecNakedWrapper + +func init() { + defUnsafeDecNakedWrapper.init() +} + +func (n *decNaked) ru() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.ru + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.u) + return +} +func (n *decNaked) ri() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.ri + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.i) + return +} +func (n *decNaked) rf() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.rf + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.f) + return +} +func (n *decNaked) rl() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.rl + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.l) + return +} +func (n *decNaked) rs() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.rs + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.s) + return +} +func (n *decNaked) rt() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.rt + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.t) + return +} +func (n *decNaked) rb() (v reflect.Value) { + v = defUnsafeDecNakedWrapper.rb + ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.b) + return +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*[]byte)(urv.ptr) = d.rawBytes() +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*string)(urv.ptr) = d.d.DecodeString() +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*bool)(urv.ptr) = d.d.DecodeBool() +} + +func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*time.Time)(urv.ptr) = d.d.DecodeTime() +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + fv := d.d.DecodeFloat64() + if chkOvf.Float32(fv) { + d.errorf("float32 overflow: %v", fv) + } + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*float32)(urv.ptr) = float32(fv) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*float64)(urv.ptr) = d.d.DecodeFloat64() +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int)(urv.ptr) = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int8)(urv.ptr) = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int16)(urv.ptr) = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int32)(urv.ptr) = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int64)(urv.ptr) = d.d.DecodeInt64() +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint)(urv.ptr) = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uintptr)(urv.ptr) = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint8)(urv.ptr) = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint16)(urv.ptr) = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint32)(urv.ptr) = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint64)(urv.ptr) = d.d.DecodeUint64() +} + +// ------------ + +func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeBool(*(*bool)(v.ptr)) +} + +func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeTime(*(*time.Time)(v.ptr)) +} + +func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + s := *(*string)(v.ptr) + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(s)) + } else { + e.e.EncodeStringEnc(cUTF8, s) + } +} + +func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeFloat64(*(*float64)(v.ptr)) +} + +func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeFloat32(*(*float32)(v.ptr)) +} + +func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int)(v.ptr))) +} + +func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int8)(v.ptr))) +} + +func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int16)(v.ptr))) +} + +func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int32)(v.ptr))) +} + +func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int64)(v.ptr))) +} + +func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint)(v.ptr))) +} + +func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint8)(v.ptr))) +} + +func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint16)(v.ptr))) +} + +func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint32)(v.ptr))) +} + +func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint64)(v.ptr))) +} + +func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uintptr)(v.ptr))) +} + +// ------------ + +// func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // if urv.flag&unsafeFlagIndir != 0 { +// // urv.ptr = *(*unsafe.Pointer)(urv.ptr) +// // } +// *(*[]byte)(urv.ptr) = d.rawBytes() +// } + +// func rv0t(rt reflect.Type) reflect.Value { +// ut := (*unsafeIntf)(unsafe.Pointer(&rt)) +// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr +// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())} +// return *(*reflect.Value)(unsafe.Pointer(&uv}) +// } + +// func rv2i(rv reflect.Value) interface{} { +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir +// var ptr unsafe.Pointer +// // kk := reflect.Kind(urv.flag & (1<<5 - 1)) +// // if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 { +// if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 { +// ptr = *(*unsafe.Pointer)(urv.ptr) +// } else { +// ptr = urv.ptr +// } +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr})) +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } + +// func definitelyNil(v interface{}) bool { +// var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v)) +// if ui.word == nil { +// return true +// } +// var tk = reflect.TypeOf(v).Kind() +// return (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.word) == nil +// fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n", +// v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil) +// } + +// func keepAlive4BytesView(v string) { +// runtime.KeepAlive(v) +// } + +// func keepAlive4StringView(v []byte) { +// runtime.KeepAlive(v) +// } + +// func rt2id(rt reflect.Type) uintptr { +// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) +// // var i interface{} = rt +// // // ui := (*unsafeIntf)(unsafe.Pointer(&i)) +// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word +// } + +// func rv2i(rv reflect.Value) interface{} { +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // non-reference type: already indir +// // reference type: depend on flagIndir property ('cos maybe was double-referenced) +// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 ) +// // rvk := reflect.Kind(urv.flag & (1<<5 - 1)) +// // if (rvk == reflect.Chan || +// // rvk == reflect.Func || +// // rvk == reflect.Interface || +// // rvk == reflect.Map || +// // rvk == reflect.Ptr || +// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 { +// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // } +// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 { +// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// } +// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type()) +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } + +// const ( +// unsafeRvFlagKindMask = 1<<5 - 1 +// unsafeRvKindDirectIface = 1 << 5 +// unsafeRvFlagIndir = 1 << 7 +// unsafeRvFlagAddr = 1 << 8 +// unsafeRvFlagMethod = 1 << 9 + +// _USE_RV_INTERFACE bool = false +// _UNSAFE_RV_DEBUG = true +// ) + +// type unsafeRtype struct { +// _ [2]uintptr +// _ uint32 +// _ uint8 +// _ uint8 +// _ uint8 +// kind uint8 +// _ [2]uintptr +// _ int32 +// } + +// func _rv2i(rv reflect.Value) interface{} { +// // Note: From use, +// // - it's never an interface +// // - the only calls here are for ifaceIndir types. +// // (though that conditional is wrong) +// // To know for sure, we need the value of t.kind (which is not exposed). +// // +// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct) +// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string +// // - Type Direct, Value indirect: ==> map??? +// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map +// // +// // TRANSLATES TO: +// // if typeIndirect { } else if valueIndirect { } else { } +// // +// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored. + +// if _USE_RV_INTERFACE { +// return rv.Interface() +// } +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + +// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { +// // println("***** IS flag method or interface: delegating to rv.Interface()") +// // return rv.Interface() +// // } + +// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { +// // println("***** IS Interface: delegate to rv.Interface") +// // return rv.Interface() +// // } +// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 { +// // if urv.flag&unsafeRvFlagAddr == 0 { +// // println("***** IS ifaceIndir typ") +// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ} +// // // return *(*interface{})(unsafe.Pointer(&ui)) +// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// // } else if urv.flag&unsafeRvFlagIndir != 0 { +// // println("***** IS flagindir") +// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // } else { +// // println("***** NOT flagindir") +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// // println("***** default: delegate to rv.Interface") + +// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ)) +// if _UNSAFE_RV_DEBUG { +// fmt.Printf(">>>> start: %v: ", rv.Type()) +// fmt.Printf("%v - %v\n", *urv, *urt) +// } +// if urt.kind&unsafeRvKindDirectIface == 0 { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type()) +// } +// // println("***** IS ifaceIndir typ") +// // if true || urv.flag&unsafeRvFlagAddr == 0 { +// // // println(" ***** IS NOT addr") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// } else if urv.flag&unsafeRvFlagIndir != 0 { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** +flagIndir type: %v\n", rv.Type()) +// } +// // println("***** IS flagindir") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// } else { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** -flagIndir type: %v\n", rv.Type()) +// } +// // println("***** NOT flagindir") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } +// // println("***** default: delegating to rv.Interface()") +// // return rv.Interface() +// } + +// var staticM0 = make(map[string]uint64) +// var staticI0 = (int32)(-5) + +// func staticRv2iTest() { +// i0 := (int32)(-5) +// m0 := make(map[string]uint16) +// m0["1"] = 1 +// for _, i := range []interface{}{ +// (int)(7), +// (uint)(8), +// (int16)(-9), +// (uint16)(19), +// (uintptr)(77), +// (bool)(true), +// float32(-32.7), +// float64(64.9), +// complex(float32(19), 5), +// complex(float64(-32), 7), +// [4]uint64{1, 2, 3, 4}, +// (chan<- int)(nil), // chan, +// rv2i, // func +// io.Writer(ioutil.Discard), +// make(map[string]uint), +// (map[string]uint)(nil), +// staticM0, +// m0, +// &m0, +// i0, +// &i0, +// &staticI0, +// &staticM0, +// []uint32{6, 7, 8}, +// "abc", +// Raw{}, +// RawExt{}, +// &Raw{}, +// &RawExt{}, +// unsafe.Pointer(&i0), +// } { +// i2 := rv2i(reflect.ValueOf(i)) +// eq := reflect.DeepEqual(i, i2) +// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq) +// } +// // os.Exit(0) +// } + +// func init() { +// staticRv2iTest() +// } + +// func rv2i(rv reflect.Value) interface{} { +// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() { +// return rv.Interface() +// } +// // var i interface{} +// // ui := (*unsafeIntf)(unsafe.Pointer(&i)) +// var ui unsafeIntf +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr)) +// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 { +// if urv.flag&unsafeRvFlagAddr != 0 { +// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()") +// return rv.Interface() +// } +// println("****** indirect type/kind") +// ui.word = urv.ptr +// } else if urv.flag&unsafeRvFlagIndir != 0 { +// println("****** unsafe rv flag indir") +// ui.word = *(*unsafe.Pointer)(urv.ptr) +// } else { +// println("****** default: assign prt to word directly") +// ui.word = urv.ptr +// } +// // ui.word = urv.ptr +// ui.typ = urv.typ +// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word) +// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word)) +// return *(*interface{})(unsafe.Pointer(&ui)) +// // return i +// } diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/json.go b/vendor/github.com/hashicorp/go-msgpack/codec/json.go new file mode 100644 index 0000000000000..a731c8165e678 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/json.go @@ -0,0 +1,1491 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// By default, this json support uses base64 encoding for bytes, because you cannot +// store and read any arbitrary string in json (only unicode). +// However, the user can configre how to encode/decode bytes. +// +// This library specifically supports UTF-8 for encoding and decoding only. +// +// Note that the library will happily encode/decode things which are not valid +// json e.g. a map[int64]string. We do it for consistency. With valid json, +// we will encode and decode appropriately. +// Users can specify their map type if necessary to force it. +// +// Note: +// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. +// We implement it here. + +// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver +// MUST not call one-another. + +import ( + "bytes" + "encoding/base64" + "math" + "reflect" + "strconv" + "time" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +//-------------------------------- + +var jsonLiterals = [...]byte{ + '"', 't', 'r', 'u', 'e', '"', + '"', 'f', 'a', 'l', 's', 'e', '"', + '"', 'n', 'u', 'l', 'l', '"', +} + +const ( + jsonLitTrueQ = 0 + jsonLitTrue = 1 + jsonLitFalseQ = 6 + jsonLitFalse = 7 + // jsonLitNullQ = 13 + jsonLitNull = 14 +) + +var ( + jsonLiteral4True = jsonLiterals[jsonLitTrue+1 : jsonLitTrue+4] + jsonLiteral4False = jsonLiterals[jsonLitFalse+1 : jsonLitFalse+5] + jsonLiteral4Null = jsonLiterals[jsonLitNull+1 : jsonLitNull+4] +) + +const ( + jsonU4Chk2 = '0' + jsonU4Chk1 = 'a' - 10 + jsonU4Chk0 = 'A' - 10 + + jsonScratchArrayLen = 64 +) + +const ( + // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: + // - If we see first character of null, false or true, + // do not validate subsequent characters. + // - e.g. if we see a n, assume null and skip next 3 characters, + // and do not validate they are ull. + // P.S. Do not expect a significant decoding boost from this. + jsonValidateSymbols = true + + jsonSpacesOrTabsLen = 128 + + jsonAlwaysReturnInternString = false +) + +var ( + // jsonTabs and jsonSpaces are used as caches for indents + jsonTabs, jsonSpaces [jsonSpacesOrTabsLen]byte + + jsonCharHtmlSafeSet bitset256 + jsonCharSafeSet bitset256 + jsonCharWhitespaceSet bitset256 + jsonNumSet bitset256 +) + +func init() { + var i byte + for i = 0; i < jsonSpacesOrTabsLen; i++ { + jsonSpaces[i] = ' ' + jsonTabs[i] = '\t' + } + + // populate the safe values as true: note: ASCII control characters are (0-31) + // jsonCharSafeSet: all true except (0-31) " \ + // jsonCharHtmlSafeSet: all true except (0-31) " \ < > & + for i = 32; i < utf8.RuneSelf; i++ { + switch i { + case '"', '\\': + case '<', '>', '&': + jsonCharSafeSet.set(i) // = true + default: + jsonCharSafeSet.set(i) + jsonCharHtmlSafeSet.set(i) + } + } + for i = 0; i <= utf8.RuneSelf; i++ { + switch i { + case ' ', '\t', '\r', '\n': + jsonCharWhitespaceSet.set(i) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-': + jsonNumSet.set(i) + } + } +} + +// ---------------- + +type jsonEncDriverTypical struct { + jsonEncDriver +} + +func (e *jsonEncDriverTypical) typical() {} + +func (e *jsonEncDriverTypical) WriteArrayStart(length int) { + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriverTypical) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + e.c = containerArrayElem +} + +func (e *jsonEncDriverTypical) WriteArrayEnd() { + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriverTypical) WriteMapStart(length int) { + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriverTypical) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + e.c = containerMapKey +} + +func (e *jsonEncDriverTypical) WriteMapElemValue() { + e.w.writen1(':') + e.c = containerMapValue +} + +func (e *jsonEncDriverTypical) WriteMapEnd() { + e.w.writen1('}') + e.c = containerMapEnd +} + +func (e *jsonEncDriverTypical) EncodeBool(b bool) { + if b { + e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) + } +} + +func (e *jsonEncDriverTypical) EncodeFloat64(f float64) { + fmt, prec := jsonFloatStrconvFmtPrec(f) + e.w.writeb(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64)) +} + +func (e *jsonEncDriverTypical) EncodeInt(v int64) { + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverTypical) EncodeUint(v uint64) { + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverTypical) EncodeFloat32(f float32) { + e.EncodeFloat64(float64(f)) +} + +// func (e *jsonEncDriverTypical) atEndOfEncode() { +// if e.tw { +// e.w.writen1(' ') +// } +// } + +// ---------------- + +type jsonEncDriverGeneric struct { + jsonEncDriver + // ds string // indent string + di int8 // indent per + d bool // indenting? + dt bool // indent using tabs + dl uint16 // indent level + ks bool // map key as string + is byte // integer as string + _ byte // padding + _ [2]uint64 // padding +} + +// indent is done as below: +// - newline and indent are added before each mapKey or arrayElem +// - newline and indent are added before each ending, +// except there was no entry (so we can have {} or []) + +func (e *jsonEncDriverGeneric) reset() { + e.jsonEncDriver.reset() + e.d, e.dt, e.dl, e.di = false, false, 0, 0 + if e.h.Indent > 0 { + e.d = true + e.di = int8(e.h.Indent) + } else if e.h.Indent < 0 { + e.d = true + e.dt = true + e.di = int8(-e.h.Indent) + } + e.ks = e.h.MapKeyAsString + e.is = e.h.IntegerAsString +} + +func (e *jsonEncDriverGeneric) WriteArrayStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriverGeneric) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerArrayElem +} + +func (e *jsonEncDriverGeneric) WriteArrayEnd() { + if e.d { + e.dl-- + if e.c != containerArrayStart { + e.writeIndent() + } + } + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriverGeneric) WriteMapStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriverGeneric) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerMapKey +} + +func (e *jsonEncDriverGeneric) WriteMapElemValue() { + if e.d { + e.w.writen2(':', ' ') + } else { + e.w.writen1(':') + } + e.c = containerMapValue +} + +func (e *jsonEncDriverGeneric) WriteMapEnd() { + if e.d { + e.dl-- + if e.c != containerMapStart { + e.writeIndent() + } + } + e.w.writen1('}') + e.c = containerMapEnd +} + +func (e *jsonEncDriverGeneric) writeIndent() { + e.w.writen1('\n') + x := int(e.di) * int(e.dl) + if e.dt { + for x > jsonSpacesOrTabsLen { + e.w.writeb(jsonTabs[:]) + x -= jsonSpacesOrTabsLen + } + e.w.writeb(jsonTabs[:x]) + } else { + for x > jsonSpacesOrTabsLen { + e.w.writeb(jsonSpaces[:]) + x -= jsonSpacesOrTabsLen + } + e.w.writeb(jsonSpaces[:x]) + } +} + +func (e *jsonEncDriverGeneric) EncodeBool(b bool) { + if e.ks && e.c == containerMapKey { + if b { + e.w.writeb(jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7]) + } + } else { + if b { + e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) + } + } +} + +func (e *jsonEncDriverGeneric) EncodeFloat64(f float64) { + // instead of using 'g', specify whether to use 'e' or 'f' + fmt, prec := jsonFloatStrconvFmtPrec(f) + + var blen int + if e.ks && e.c == containerMapKey { + blen = 2 + len(strconv.AppendFloat(e.b[1:1], f, fmt, prec, 64)) + e.b[0] = '"' + e.b[blen-1] = '"' + } else { + blen = len(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64)) + } + e.w.writeb(e.b[:blen]) +} + +func (e *jsonEncDriverGeneric) EncodeInt(v int64) { + x := e.is + if x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.ks && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + return + } + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverGeneric) EncodeUint(v uint64) { + x := e.is + if x == 'A' || x == 'L' && v > 1<<53 || (e.ks && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + return + } + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverGeneric) EncodeFloat32(f float32) { + // e.encodeFloat(float64(f), 32) + // always encode all floats as IEEE 64-bit floating point. + // It also ensures that we can decode in full precision even if into a float32, + // as what is written is always to float64 precision. + e.EncodeFloat64(float64(f)) +} + +// func (e *jsonEncDriverGeneric) atEndOfEncode() { +// if e.tw { +// if e.d { +// e.w.writen1('\n') +// } else { +// e.w.writen1(' ') +// } +// } +// } + +// -------------------- + +type jsonEncDriver struct { + noBuiltInTypes + e *Encoder + h *JsonHandle + w *encWriterSwitch + se extWrapper + // ---- cpu cache line boundary? + bs []byte // scratch + // ---- cpu cache line boundary? + // scratch: encode time, etc. + // include scratch buffer and padding, but leave space for containerstate + b [jsonScratchArrayLen + 8 + 8 - 1]byte + c containerState + // _ [2]uint64 // padding +} + +func (e *jsonEncDriver) EncodeNil() { + // We always encode nil as just null (never in quotes) + // This allows us to easily decode if a nil in the json stream + // ie if initial token is n. + e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + + // if e.h.MapKeyAsString && e.c == containerMapKey { + // e.w.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6]) + // } else { + // e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + // } +} + +func (e *jsonEncDriver) EncodeTime(t time.Time) { + // Do NOT use MarshalJSON, as it allocates internally. + // instead, we call AppendFormat directly, using our scratch buffer (e.b) + if t.IsZero() { + e.EncodeNil() + } else { + e.b[0] = '"' + b := t.AppendFormat(e.b[1:1], time.RFC3339Nano) + e.b[len(b)+1] = '"' + e.w.writeb(e.b[:len(b)+2]) + } + // v, err := t.MarshalJSON(); if err != nil { e.e.error(err) } e.w.writeb(v) +} + +func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + // only encodes re.Value (never re.Data) + if re.Value == nil { + e.EncodeNil() + } else { + en.encode(re.Value) + } +} + +func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringEnc(c charEncoding, v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if v == nil { + e.EncodeNil() + return + } + if c == cRAW { + if e.se.InterfaceExt != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + + slen := base64.StdEncoding.EncodedLen(len(v)) + 2 + if cap(e.bs) >= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + e.bs[0] = '"' + base64.StdEncoding.Encode(e.bs[1:], v) + e.bs[slen-1] = '"' + e.w.writeb(e.bs) + } else { + e.quoteStr(stringView(v)) + } +} + +func (e *jsonEncDriver) EncodeStringBytesRaw(v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if v == nil { + e.EncodeNil() + return + } + if e.se.InterfaceExt != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + + slen := base64.StdEncoding.EncodedLen(len(v)) + 2 + if cap(e.bs) >= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + e.bs[0] = '"' + base64.StdEncoding.Encode(e.bs[1:], v) + e.bs[slen-1] = '"' + e.w.writeb(e.bs) +} + +func (e *jsonEncDriver) EncodeAsis(v []byte) { + e.w.writeb(v) +} + +func (e *jsonEncDriver) quoteStr(s string) { + // adapted from std pkg encoding/json + const hex = "0123456789abcdef" + w := e.w + htmlasis := e.h.HTMLCharsAsIs + w.writen1('"') + var start int + for i, slen := 0, len(s); i < slen; { + // encode all bytes < 0x20 (except \r, \n). + // also encode < > & to prevent security holes when served to some browsers. + if b := s[i]; b < utf8.RuneSelf { + // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + // if (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) { + if jsonCharHtmlSafeSet.isset(b) || (htmlasis && jsonCharSafeSet.isset(b)) { + i++ + continue + } + if start < i { + w.writestr(s[start:i]) + } + switch b { + case '\\', '"': + w.writen2('\\', b) + case '\n': + w.writen2('\\', 'n') + case '\r': + w.writen2('\\', 'r') + case '\b': + w.writen2('\\', 'b') + case '\f': + w.writen2('\\', 'f') + case '\t': + w.writen2('\\', 't') + default: + w.writestr(`\u00`) + w.writen2(hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. + // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally. + if c == '\u2028' || c == '\u2029' { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\u202`) + w.writen1(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + w.writestr(s[start:]) + } + w.writen1('"') +} + +func (e *jsonEncDriver) atEndOfEncode() { + // if e.c == 0 { // scalar written, output space + // e.w.writen1(' ') + // } else if e.h.TermWhitespace { // container written, output new-line + // e.w.writen1('\n') + // } + if e.h.TermWhitespace { + if e.c == 0 { // scalar written, output space + e.w.writen1(' ') + } else { // container written, output new-line + e.w.writen1('\n') + } + } + + // e.c = 0 +} + +type jsonDecDriver struct { + noBuiltInTypes + d *Decoder + h *JsonHandle + r *decReaderSwitch + se extWrapper + + // ---- writable fields during execution --- *try* to keep in sep cache line + + c containerState + // tok is used to store the token read right after skipWhiteSpace. + tok uint8 + fnull bool // found null from appendStringAsBytes + bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. + bstr [8]byte // scratch used for string \UXXX parsing + // ---- cpu cache line boundary? + b [jsonScratchArrayLen]byte // scratch 1, used for parsing strings or numbers or time.Time + b2 [jsonScratchArrayLen]byte // scratch 2, used only for readUntil, decNumBytes + + // _ [3]uint64 // padding + // n jsonNum +} + +// func jsonIsWS(b byte) bool { +// // return b == ' ' || b == '\t' || b == '\r' || b == '\n' +// return jsonCharWhitespaceSet.isset(b) +// } + +func (d *jsonDecDriver) uncacheRead() { + if d.tok != 0 { + d.r.unreadn1() + d.tok = 0 + } +} + +func (d *jsonDecDriver) ReadMapStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '{' + if d.tok != xc { + d.d.errorf("read map - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapStart + return -1 +} + +func (d *jsonDecDriver) ReadArrayStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '[' + if d.tok != xc { + d.d.errorf("read array - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayStart + return -1 +} + +func (d *jsonDecDriver) CheckBreak() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + return d.tok == '}' || d.tok == ']' +} + +// For the ReadXXX methods below, we could just delegate to helper functions +// readContainerState(c containerState, xc uint8, check bool) +// - ReadArrayElem would become: +// readContainerState(containerArrayElem, ',', d.c != containerArrayStart) +// +// However, until mid-stack inlining comes in go1.11 which supports inlining of +// one-liners, we explicitly write them all 5 out to elide the extra func call. +// +// TODO: For Go 1.11, if inlined, consider consolidating these. + +func (d *jsonDecDriver) ReadArrayElem() { + const xc uint8 = ',' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerArrayStart { + if d.tok != xc { + d.d.errorf("read array element - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerArrayElem +} + +func (d *jsonDecDriver) ReadArrayEnd() { + const xc uint8 = ']' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read array end - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayEnd +} + +func (d *jsonDecDriver) ReadMapElemKey() { + const xc uint8 = ',' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerMapStart { + if d.tok != xc { + d.d.errorf("read map key - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerMapKey +} + +func (d *jsonDecDriver) ReadMapElemValue() { + const xc uint8 = ':' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read map value - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapValue +} + +func (d *jsonDecDriver) ReadMapEnd() { + const xc uint8 = '}' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read map end - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapEnd +} + +// func (d *jsonDecDriver) readLit(length, fromIdx uint8) { +// // length here is always less than 8 (literals are: null, true, false) +// bs := d.r.readx(int(length)) +// d.tok = 0 +// if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) { +// d.d.errorf("expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs) +// } +// } + +func (d *jsonDecDriver) readLit4True() { + bs := d.r.readx(3) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4True) { + d.d.errorf("expecting %s: got %s", jsonLiteral4True, bs) + } +} + +func (d *jsonDecDriver) readLit4False() { + bs := d.r.readx(4) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4False) { + d.d.errorf("expecting %s: got %s", jsonLiteral4False, bs) + } +} + +func (d *jsonDecDriver) readLit4Null() { + bs := d.r.readx(3) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4Null) { + d.d.errorf("expecting %s: got %s", jsonLiteral4Null, bs) + } +} + +func (d *jsonDecDriver) TryDecodeAsNil() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + // we shouldn't try to see if "null" was here, right? + // only the plain string: `null` denotes a nil (ie not quotes) + if d.tok == 'n' { + d.readLit4Null() + return true + } + return false +} + +func (d *jsonDecDriver) DecodeBool() (v bool) { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + fquot := d.c == containerMapKey && d.tok == '"' + if fquot { + d.tok = d.r.readn1() + } + switch d.tok { + case 'f': + d.readLit4False() + // v = false + case 't': + d.readLit4True() + v = true + default: + d.d.errorf("decode bool: got first char %c", d.tok) + // v = false // "unreachable" + } + if fquot { + d.r.readn1() + } + return +} + +func (d *jsonDecDriver) DecodeTime() (t time.Time) { + // read string, and pass the string into json.unmarshal + d.appendStringAsBytes() + if d.fnull { + return + } + t, err := time.Parse(time.RFC3339, stringView(d.bs)) + if err != nil { + d.d.errorv(err) + } + return +} + +func (d *jsonDecDriver) ContainerType() (vt valueType) { + // check container type by checking the first char + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + + // optimize this, so we don't do 4 checks but do one computation. + // return jsonContainerSet[d.tok] + + // ContainerType is mostly called for Map and Array, + // so this conditional is good enough (max 2 checks typically) + if b := d.tok; b == '{' { + return valueTypeMap + } else if b == '[' { + return valueTypeArray + } else if b == 'n' { + return valueTypeNil + } else if b == '"' { + return valueTypeString + } + return valueTypeUnset +} + +func (d *jsonDecDriver) decNumBytes() (bs []byte) { + // stores num bytes in d.bs + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok == '"' { + bs = d.r.readUntil(d.b2[:0], '"') + bs = bs[:len(bs)-1] + } else { + d.r.unreadn1() + bs = d.r.readTo(d.bs[:0], &jsonNumSet) + } + d.tok = 0 + return bs +} + +func (d *jsonDecDriver) DecodeUint64() (u uint64) { + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + n, neg, badsyntax, overflow := jsonParseInteger(bs) + if overflow { + d.d.errorf("overflow parsing unsigned integer: %s", bs) + } else if neg { + d.d.errorf("minus found parsing unsigned integer: %s", bs) + } else if badsyntax { + // fallback: try to decode as float, and cast + n = d.decUint64ViaFloat(stringView(bs)) + } + return n +} + +func (d *jsonDecDriver) DecodeInt64() (i int64) { + const cutoff = uint64(1 << uint(64-1)) + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + n, neg, badsyntax, overflow := jsonParseInteger(bs) + if overflow { + d.d.errorf("overflow parsing integer: %s", bs) + } else if badsyntax { + // d.d.errorf("invalid syntax for integer: %s", bs) + // fallback: try to decode as float, and cast + if neg { + n = d.decUint64ViaFloat(stringView(bs[1:])) + } else { + n = d.decUint64ViaFloat(stringView(bs)) + } + } + if neg { + if n > cutoff { + d.d.errorf("overflow parsing integer: %s", bs) + } + i = -(int64(n)) + } else { + if n >= cutoff { + d.d.errorf("overflow parsing integer: %s", bs) + } + i = int64(n) + } + return +} + +func (d *jsonDecDriver) decUint64ViaFloat(s string) (u uint64) { + if len(s) == 0 { + return + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + d.d.errorf("invalid syntax for integer: %s", s) + // d.d.errorv(err) + } + fi, ff := math.Modf(f) + if ff > 0 { + d.d.errorf("fractional part found parsing integer: %s", s) + } else if fi > float64(math.MaxUint64) { + d.d.errorf("overflow parsing integer: %s", s) + } + return uint64(fi) +} + +func (d *jsonDecDriver) DecodeFloat64() (f float64) { + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + f, err := strconv.ParseFloat(stringView(bs), 64) + if err != nil { + d.d.errorv(err) + } + return +} + +func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if ext == nil { + re := rv.(*RawExt) + re.Tag = xtag + d.d.decode(&re.Value) + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + return +} + +func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. + if d.se.InterfaceExt != nil { + bsOut = bs + d.DecodeExt(&bsOut, 0, &d.se) + return + } + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.tok == '[' { + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } + d.appendStringAsBytes() + // base64 encodes []byte{} as "", and we encode nil []byte as null. + // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}. + // appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs. + // However, it sets a fnull field to true, so we can check if a null was found. + if len(d.bs) == 0 { + if d.fnull { + return nil + } + return []byte{} + } + bs0 := d.bs + slen := base64.StdEncoding.DecodedLen(len(bs0)) + if slen <= cap(bs) { + bsOut = bs[:slen] + } else if zerocopy && slen <= cap(d.b2) { + bsOut = d.b2[:slen] + } else { + bsOut = make([]byte, slen) + } + slen2, err := base64.StdEncoding.Decode(bsOut, bs0) + if err != nil { + d.d.errorf("error decoding base64 binary '%s': %v", bs0, err) + return nil + } + if slen != slen2 { + bsOut = bsOut[:slen2] + } + return +} + +func (d *jsonDecDriver) DecodeString() (s string) { + d.appendStringAsBytes() + return d.bsToString() +} + +func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) { + d.appendStringAsBytes() + return d.bs +} + +func (d *jsonDecDriver) appendStringAsBytes() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + + d.fnull = false + if d.tok != '"' { + // d.d.errorf("expect char '%c' but got char '%c'", '"', d.tok) + // handle non-string scalar: null, true, false or a number + switch d.tok { + case 'n': + d.readLit4Null() + d.bs = d.bs[:0] + d.fnull = true + case 'f': + d.readLit4False() + d.bs = d.bs[:5] + copy(d.bs, "false") + case 't': + d.readLit4True() + d.bs = d.bs[:4] + copy(d.bs, "true") + default: + // try to parse a valid number + bs := d.decNumBytes() + if len(bs) <= cap(d.bs) { + d.bs = d.bs[:len(bs)] + } else { + d.bs = make([]byte, len(bs)) + } + copy(d.bs, bs) + } + return + } + + d.tok = 0 + r := d.r + var cs = r.readUntil(d.b2[:0], '"') + var cslen = uint(len(cs)) + var c uint8 + v := d.bs[:0] + // append on each byte seen can be expensive, so we just + // keep track of where we last read a contiguous set of + // non-special bytes (using cursor variable), + // and when we see a special byte + // e.g. end-of-slice, " or \, + // we will append the full range into the v slice before proceeding + var i, cursor uint + for { + if i == cslen { + v = append(v, cs[cursor:]...) + cs = r.readUntil(d.b2[:0], '"') + cslen = uint(len(cs)) + i, cursor = 0, 0 + } + c = cs[i] + if c == '"' { + v = append(v, cs[cursor:i]...) + break + } + if c != '\\' { + i++ + continue + } + v = append(v, cs[cursor:i]...) + i++ + c = cs[i] + switch c { + case '"', '\\', '/', '\'': + v = append(v, c) + case 'b': + v = append(v, '\b') + case 'f': + v = append(v, '\f') + case 'n': + v = append(v, '\n') + case 'r': + v = append(v, '\r') + case 't': + v = append(v, '\t') + case 'u': + var r rune + var rr uint32 + if cslen < i+4 { + d.d.errorf("need at least 4 more bytes for unicode sequence") + } + var j uint + for _, c = range cs[i+1 : i+5] { // bounds-check-elimination + // best to use explicit if-else + // - not a table, etc which involve memory loads, array lookup with bounds checks, etc + if c >= '0' && c <= '9' { + rr = rr*16 + uint32(c-jsonU4Chk2) + } else if c >= 'a' && c <= 'f' { + rr = rr*16 + uint32(c-jsonU4Chk1) + } else if c >= 'A' && c <= 'F' { + rr = rr*16 + uint32(c-jsonU4Chk0) + } else { + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + } + r = rune(rr) + i += 4 + if utf16.IsSurrogate(r) { + if len(cs) >= int(i+6) { + var cx = cs[i+1:][:6:6] // [:6] affords bounds-check-elimination + if cx[0] == '\\' && cx[1] == 'u' { + i += 2 + var rr1 uint32 + for j = 2; j < 6; j++ { + c = cx[j] + if c >= '0' && c <= '9' { + rr = rr*16 + uint32(c-jsonU4Chk2) + } else if c >= 'a' && c <= 'f' { + rr = rr*16 + uint32(c-jsonU4Chk1) + } else if c >= 'A' && c <= 'F' { + rr = rr*16 + uint32(c-jsonU4Chk0) + } else { + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + } + r = utf16.DecodeRune(r, rune(rr1)) + i += 4 + goto encode_rune + } + } + r = unicode.ReplacementChar + } + encode_rune: + w2 := utf8.EncodeRune(d.bstr[:], r) + v = append(v, d.bstr[:w2]...) + default: + d.d.errorf("unsupported escaped value: %c", c) + } + i++ + cursor = i + } + d.bs = v +} + +func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) { + const cutoff = uint64(1 << uint(64-1)) + + var n uint64 + var neg, badsyntax, overflow bool + + if len(bs) == 0 { + if d.h.PreferFloat { + z.v = valueTypeFloat + z.f = 0 + } else if d.h.SignedInteger { + z.v = valueTypeInt + z.i = 0 + } else { + z.v = valueTypeUint + z.u = 0 + } + return + } + if d.h.PreferFloat { + goto F + } + n, neg, badsyntax, overflow = jsonParseInteger(bs) + if badsyntax || overflow { + goto F + } + if neg { + if n > cutoff { + goto F + } + z.v = valueTypeInt + z.i = -(int64(n)) + } else if d.h.SignedInteger { + if n >= cutoff { + goto F + } + z.v = valueTypeInt + z.i = int64(n) + } else { + z.v = valueTypeUint + z.u = n + } + return +F: + z.v = valueTypeFloat + z.f, err = strconv.ParseFloat(stringView(bs), 64) + return +} + +func (d *jsonDecDriver) bsToString() string { + // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key + if jsonAlwaysReturnInternString || d.c == containerMapKey { + return d.d.string(d.bs) + } + return string(d.bs) +} + +func (d *jsonDecDriver) DecodeNaked() { + z := d.d.naked() + // var decodeFurther bool + + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + switch d.tok { + case 'n': + d.readLit4Null() + z.v = valueTypeNil + case 'f': + d.readLit4False() + z.v = valueTypeBool + z.b = false + case 't': + d.readLit4True() + z.v = valueTypeBool + z.b = true + case '{': + z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart + case '[': + z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart + case '"': + // if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first + d.appendStringAsBytes() + if len(d.bs) > 0 && d.c == containerMapKey && d.h.MapKeyAsString { + switch stringView(d.bs) { + case "null": + z.v = valueTypeNil + case "true": + z.v = valueTypeBool + z.b = true + case "false": + z.v = valueTypeBool + z.b = false + default: + // check if a number: float, int or uint + if err := d.nakedNum(z, d.bs); err != nil { + z.v = valueTypeString + z.s = d.bsToString() + } + } + } else { + z.v = valueTypeString + z.s = d.bsToString() + } + default: // number + bs := d.decNumBytes() + if len(bs) == 0 { + d.d.errorf("decode number from empty string") + return + } + if err := d.nakedNum(z, bs); err != nil { + d.d.errorf("decode number from %s: %v", bs, err) + return + } + } + // if decodeFurther { + // d.s.sc.retryRead() + // } +} + +//---------------------- + +// JsonHandle is a handle for JSON encoding format. +// +// Json is comprehensively supported: +// - decodes numbers into interface{} as int, uint or float64 +// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc. +// - decode integers from float formatted numbers e.g. 1.27e+8 +// - decode any json value (numbers, bool, etc) from quoted strings +// - configurable way to encode/decode []byte . +// by default, encodes and decodes []byte using base64 Std Encoding +// - UTF-8 support for encoding and decoding +// +// It has better performance than the json library in the standard library, +// by leveraging the performance improvements of the codec library. +// +// In addition, it doesn't read more bytes than necessary during a decode, which allows +// reading multiple values from a stream containing json and non-json content. +// For example, a user can read a json value, then a cbor value, then a msgpack value, +// all from the same stream in sequence. +// +// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are +// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD. +type JsonHandle struct { + textEncodingType + BasicHandle + + // Indent indicates how a value is encoded. + // - If positive, indent by that number of spaces. + // - If negative, indent by that number of tabs. + Indent int8 + + // IntegerAsString controls how integers (signed and unsigned) are encoded. + // + // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. + // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. + // This can be mitigated by configuring how to encode integers. + // + // IntegerAsString interpretes the following values: + // - if 'L', then encode integers > 2^53 as a json string. + // - if 'A', then encode all integers as a json string + // containing the exact integer representation as a decimal. + // - else encode all integers as a json number (default) + IntegerAsString byte + + // HTMLCharsAsIs controls how to encode some special characters to html: < > & + // + // By default, we encode them as \uXXX + // to prevent security holes when served from some browsers. + HTMLCharsAsIs bool + + // PreferFloat says that we will default to decoding a number as a float. + // If not set, we will examine the characters of the number and decode as an + // integer type if it doesn't have any of the characters [.eE]. + PreferFloat bool + + // TermWhitespace says that we add a whitespace character + // at the end of an encoding. + // + // The whitespace is important, especially if using numbers in a context + // where multiple items are written to a stream. + TermWhitespace bool + + // MapKeyAsString says to encode all map keys as strings. + // + // Use this to enforce strict json output. + // The only caveat is that nil value is ALWAYS written as null (never as "null") + MapKeyAsString bool + + // _ [2]byte // padding + + // Note: below, we store hardly-used items e.g. RawBytesExt is cached in the (en|de)cDriver. + + // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. + // If not configured, raw bytes are encoded to/from base64 text. + RawBytesExt InterfaceExt + + _ [2]uint64 // padding +} + +// Name returns the name of the handle: json +func (h *JsonHandle) Name() string { return "json" } +func (h *JsonHandle) hasElemSeparators() bool { return true } +func (h *JsonHandle) typical() bool { + return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L' +} + +type jsonTypical interface { + typical() +} + +func (h *JsonHandle) recreateEncDriver(ed encDriver) (v bool) { + _, v = ed.(jsonTypical) + return v != h.typical() +} + +// SetInterfaceExt sets an extension +func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) +} + +func (h *JsonHandle) newEncDriver(e *Encoder) (ee encDriver) { + var hd *jsonEncDriver + if h.typical() { + var v jsonEncDriverTypical + ee = &v + hd = &v.jsonEncDriver + } else { + var v jsonEncDriverGeneric + ee = &v + hd = &v.jsonEncDriver + } + hd.e, hd.h, hd.bs = e, h, hd.b[:0] + hd.se.BytesExt = bytesExtFailer{} + ee.reset() + return +} + +func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { + // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} + hd := jsonDecDriver{d: d, h: h} + hd.se.BytesExt = bytesExtFailer{} + hd.bs = hd.b[:0] + hd.reset() + return &hd +} + +func (e *jsonEncDriver) reset() { + e.w = e.e.w + e.se.InterfaceExt = e.h.RawBytesExt + if e.bs != nil { + e.bs = e.bs[:0] + } + e.c = 0 +} + +func (d *jsonDecDriver) reset() { + d.r = d.d.r + d.se.InterfaceExt = d.h.RawBytesExt + if d.bs != nil { + d.bs = d.bs[:0] + } + d.c, d.tok = 0, 0 + // d.n.reset() +} + +func jsonFloatStrconvFmtPrec(f float64) (fmt byte, prec int) { + prec = -1 + var abs = math.Abs(f) + if abs != 0 && (abs < 1e-6 || abs >= 1e21) { + fmt = 'e' + } else { + fmt = 'f' + // set prec to 1 iff mod is 0. + // better than using jsonIsFloatBytesB2 to check if a . or E in the float bytes. + // this ensures that every float has an e or .0 in it. + if abs <= 1 { + if abs == 0 || abs == 1 { + prec = 1 + } + } else if _, mod := math.Modf(abs); mod == 0 { + prec = 1 + } + } + return +} + +// custom-fitted version of strconv.Parse(Ui|I)nt. +// Also ensures we don't have to search for .eE to determine if a float or not. +// Note: s CANNOT be a zero-length slice. +func jsonParseInteger(s []byte) (n uint64, neg, badSyntax, overflow bool) { + const maxUint64 = (1<<64 - 1) + const cutoff = maxUint64/10 + 1 + + if len(s) == 0 { // bounds-check-elimination + // treat empty string as zero value + // badSyntax = true + return + } + switch s[0] { + case '+': + s = s[1:] + case '-': + s = s[1:] + neg = true + } + for _, c := range s { + if c < '0' || c > '9' { + badSyntax = true + return + } + // unsigned integers don't overflow well on multiplication, so check cutoff here + // e.g. (maxUint64-5)*10 doesn't overflow well ... + if n >= cutoff { + overflow = true + return + } + n *= 10 + n1 := n + uint64(c-'0') + if n1 < n || n1 > maxUint64 { + overflow = true + return + } + n = n1 + } + return +} + +var _ decDriver = (*jsonDecDriver)(nil) +var _ encDriver = (*jsonEncDriverGeneric)(nil) +var _ encDriver = (*jsonEncDriverTypical)(nil) +var _ jsonTypical = (*jsonEncDriverTypical)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/mammoth-test.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/mammoth-test.go.tmpl new file mode 100644 index 0000000000000..c598cc73a5eba --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/mammoth-test.go.tmpl @@ -0,0 +1,154 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from mammoth-test.go.tmpl - DO NOT EDIT. + +package codec + +import "testing" +import "fmt" +import "reflect" + +// TestMammoth has all the different paths optimized in fast-path +// It has all the primitives, slices and maps. +// +// For each of those types, it has a pointer and a non-pointer field. + +func init() { _ = fmt.Printf } // so we can include fmt as needed + +type TestMammoth struct { + +{{range .Values }}{{if .Primitive }}{{/* +*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} +{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} +{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}} type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }} +func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { } +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}} type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +func doTestMammothSlices(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}} + var v{{$i}}va [8]{{ .Elem }} + for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { {{/* + // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v) + // - encode value to some []byte + // - decode into a length-wise-equal []byte + // - check if equal to initial slice + // - encode ptr to the value + // - check if encode bytes are same + // - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice + // - decode into non-addressable slice of equal length, then larger len + // - for each decode, compare elem-by-elem to the original slice + // - + // - rinse and repeat for a MapBySlice version + // - + */}} + var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }} + v{{$i}}v1 = v + bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr") + // ... + bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:1:1] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-1") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-len") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-cap") + if len(v{{$i}}v1) > 1 { + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr") + testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-len-noaddr") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr") + testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr") + } + // ... + var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }} + v{{$i}}v2 = nil + if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1) + v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) + bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom") + testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom") + bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p") + v{{$i}}v2 = nil + v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) + testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p") + } +{{end}}{{end}}{{end}} +} + +func doTestMammothMaps(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}} + for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } { + // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v) + var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }} + v{{$i}}v1 = v + bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-noaddr") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len") + bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil") + // ... + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }} + v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1) + v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2) + bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom") + testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len") + } +{{end}}{{end}}{{end}} + +} + +func doTestMammothMapsAndSlices(t *testing.T, h Handle) { + doTestMammothSlices(t, h) + doTestMammothMaps(t, h) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/mammoth2-test.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/codec/mammoth2-test.go.tmpl new file mode 100644 index 0000000000000..71eaf618a50a6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/mammoth2-test.go.tmpl @@ -0,0 +1,94 @@ +// +build !notfastpath + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT. + +package codec + +// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go.... +// +// Add: +// - test file for creating a mammoth generated file as _mammoth_generated.go +// - generate a second mammoth files in a different file: mammoth2_generated_test.go +// - mammoth-test.go.tmpl will do this +// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags) +// - as part of TestMammoth, run it also +// - this will cover all the codecgen, gen-helper, etc in one full run +// - check in mammoth* files into github also +// - then +// +// Now, add some types: +// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it +// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types +// - this wrapper object is what we work encode/decode (so that the codecgen methods are called) + + +// import "encoding/binary" +import "fmt" + +type TestMammoth2 struct { + +{{range .Values }}{{if .Primitive }}{{/* +*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} +{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} +{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +} + +// ----------- + +type testMammoth2Binary uint64 +func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) { +data = make([]byte, 8) +bigen.PutUint64(data, uint64(x)) +return +} +func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) { +*x = testMammoth2Binary(bigen.Uint64(data)) +return +} + +type testMammoth2Text uint64 +func (x testMammoth2Text) MarshalText() (data []byte, err error) { +data = []byte(fmt.Sprintf("%b", uint64(x))) +return +} +func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) { +_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x)) +return +} + +type testMammoth2Json uint64 +func (x testMammoth2Json) MarshalJSON() (data []byte, err error) { +data = []byte(fmt.Sprintf("%v", uint64(x))) +return +} +func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) { +_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x)) +return +} + +type testMammoth2Basic [4]uint64 + +type TestMammoth2Wrapper struct { + V TestMammoth2 + T testMammoth2Text + B testMammoth2Binary + J testMammoth2Json + C testMammoth2Basic + M map[testMammoth2Basic]TestMammoth2 + L []TestMammoth2 + A [4]int64 +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go index da0500d19223b..bf311a6778a27 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go @@ -1,5 +1,5 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. /* MSGPACK @@ -15,8 +15,8 @@ For compatibility with behaviour of msgpack-c reference implementation: - Go intX (<0) IS ENCODED AS msgpack -ve fixnum, signed - */ + package codec import ( @@ -24,59 +24,144 @@ import ( "io" "math" "net/rpc" + "reflect" + "time" ) const ( mpPosFixNumMin byte = 0x00 - mpPosFixNumMax = 0x7f - mpFixMapMin = 0x80 - mpFixMapMax = 0x8f - mpFixArrayMin = 0x90 - mpFixArrayMax = 0x9f - mpFixStrMin = 0xa0 - mpFixStrMax = 0xbf - mpNil = 0xc0 - _ = 0xc1 - mpFalse = 0xc2 - mpTrue = 0xc3 - mpFloat = 0xca - mpDouble = 0xcb - mpUint8 = 0xcc - mpUint16 = 0xcd - mpUint32 = 0xce - mpUint64 = 0xcf - mpInt8 = 0xd0 - mpInt16 = 0xd1 - mpInt32 = 0xd2 - mpInt64 = 0xd3 + mpPosFixNumMax byte = 0x7f + mpFixMapMin byte = 0x80 + mpFixMapMax byte = 0x8f + mpFixArrayMin byte = 0x90 + mpFixArrayMax byte = 0x9f + mpFixStrMin byte = 0xa0 + mpFixStrMax byte = 0xbf + mpNil byte = 0xc0 + _ byte = 0xc1 + mpFalse byte = 0xc2 + mpTrue byte = 0xc3 + mpFloat byte = 0xca + mpDouble byte = 0xcb + mpUint8 byte = 0xcc + mpUint16 byte = 0xcd + mpUint32 byte = 0xce + mpUint64 byte = 0xcf + mpInt8 byte = 0xd0 + mpInt16 byte = 0xd1 + mpInt32 byte = 0xd2 + mpInt64 byte = 0xd3 // extensions below - mpBin8 = 0xc4 - mpBin16 = 0xc5 - mpBin32 = 0xc6 - mpExt8 = 0xc7 - mpExt16 = 0xc8 - mpExt32 = 0xc9 - mpFixExt1 = 0xd4 - mpFixExt2 = 0xd5 - mpFixExt4 = 0xd6 - mpFixExt8 = 0xd7 - mpFixExt16 = 0xd8 - - mpStr8 = 0xd9 // new - mpStr16 = 0xda - mpStr32 = 0xdb - - mpArray16 = 0xdc - mpArray32 = 0xdd - - mpMap16 = 0xde - mpMap32 = 0xdf - - mpNegFixNumMin = 0xe0 - mpNegFixNumMax = 0xff + mpBin8 byte = 0xc4 + mpBin16 byte = 0xc5 + mpBin32 byte = 0xc6 + mpExt8 byte = 0xc7 + mpExt16 byte = 0xc8 + mpExt32 byte = 0xc9 + mpFixExt1 byte = 0xd4 + mpFixExt2 byte = 0xd5 + mpFixExt4 byte = 0xd6 + mpFixExt8 byte = 0xd7 + mpFixExt16 byte = 0xd8 + + mpStr8 byte = 0xd9 // new + mpStr16 byte = 0xda + mpStr32 byte = 0xdb + + mpArray16 byte = 0xdc + mpArray32 byte = 0xdd + + mpMap16 byte = 0xde + mpMap32 byte = 0xdf + + mpNegFixNumMin byte = 0xe0 + mpNegFixNumMax byte = 0xff ) +var mpTimeExtTag int8 = -1 +var mpTimeExtTagU = uint8(mpTimeExtTag) + +// var mpdesc = map[byte]string{ +// mpPosFixNumMin: "PosFixNumMin", +// mpPosFixNumMax: "PosFixNumMax", +// mpFixMapMin: "FixMapMin", +// mpFixMapMax: "FixMapMax", +// mpFixArrayMin: "FixArrayMin", +// mpFixArrayMax: "FixArrayMax", +// mpFixStrMin: "FixStrMin", +// mpFixStrMax: "FixStrMax", +// mpNil: "Nil", +// mpFalse: "False", +// mpTrue: "True", +// mpFloat: "Float", +// mpDouble: "Double", +// mpUint8: "Uint8", +// mpUint16: "Uint16", +// mpUint32: "Uint32", +// mpUint64: "Uint64", +// mpInt8: "Int8", +// mpInt16: "Int16", +// mpInt32: "Int32", +// mpInt64: "Int64", +// mpBin8: "Bin8", +// mpBin16: "Bin16", +// mpBin32: "Bin32", +// mpExt8: "Ext8", +// mpExt16: "Ext16", +// mpExt32: "Ext32", +// mpFixExt1: "FixExt1", +// mpFixExt2: "FixExt2", +// mpFixExt4: "FixExt4", +// mpFixExt8: "FixExt8", +// mpFixExt16: "FixExt16", +// mpStr8: "Str8", +// mpStr16: "Str16", +// mpStr32: "Str32", +// mpArray16: "Array16", +// mpArray32: "Array32", +// mpMap16: "Map16", +// mpMap32: "Map32", +// mpNegFixNumMin: "NegFixNumMin", +// mpNegFixNumMax: "NegFixNumMax", +// } + +func mpdesc(bd byte) string { + switch bd { + case mpNil: + return "nil" + case mpFalse: + return "false" + case mpTrue: + return "true" + case mpFloat, mpDouble: + return "float" + case mpUint8, mpUint16, mpUint32, mpUint64: + return "uint" + case mpInt8, mpInt16, mpInt32, mpInt64: + return "int" + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + return "int" + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + return "int" + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + return "string|bytes" + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + return "bytes" + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + return "array" + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + return "map" + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + return "ext" + default: + return "unknown" + } + } +} + // MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec // that the backend RPC service takes multiple arguments, which have been arranged // in sequence in the slice. @@ -87,76 +172,102 @@ type MsgpackSpecRpcMultiArgs []interface{} // A MsgpackContainer type specifies the different types of msgpackContainers. type msgpackContainerType struct { - fixCutoff int - bFixMin, b8, b16, b32 byte - hasFixMin, has8, has8Always bool + fixCutoff uint8 + bFixMin, b8, b16, b32 byte + // hasFixMin, has8, has8Always bool } var ( - msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} - msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} - msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} - msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} + msgpackContainerRawLegacy = msgpackContainerType{ + 32, mpFixStrMin, 0, mpStr16, mpStr32, + } + msgpackContainerStr = msgpackContainerType{ + 32, mpFixStrMin, mpStr8, mpStr16, mpStr32, // true, true, false, + } + msgpackContainerBin = msgpackContainerType{ + 0, 0, mpBin8, mpBin16, mpBin32, // false, true, true, + } + msgpackContainerList = msgpackContainerType{ + 16, mpFixArrayMin, 0, mpArray16, mpArray32, // true, false, false, + } + msgpackContainerMap = msgpackContainerType{ + 16, mpFixMapMin, 0, mpMap16, mpMap32, // true, false, false, + } ) //--------------------------------------------- type msgpackEncDriver struct { - w encWriter + noBuiltInTypes + encDriverNoopContainerWriter + // encNoSeparator + e *Encoder + w *encWriterSwitch h *MsgpackHandle + x [8]byte + // _ [3]uint64 // padding } -func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool { - //no builtin types. All encodings are based on kinds. Types supported as extensions. - return false -} - -func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {} - -func (e *msgpackEncDriver) encodeNil() { +func (e *msgpackEncDriver) EncodeNil() { e.w.writen1(mpNil) } -func (e *msgpackEncDriver) encodeInt(i int64) { - - switch { - case i >= 0: - e.encodeUint(uint64(i)) - case i >= -32: - e.w.writen1(byte(i)) - case i >= math.MinInt8: +func (e *msgpackEncDriver) EncodeInt(i int64) { + if e.h.PositiveIntUnsigned && i >= 0 { + e.EncodeUint(uint64(i)) + } else if i > math.MaxInt8 { + if i <= math.MaxInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } + } else if i >= -32 { + if e.h.NoFixedNum { + e.w.writen2(mpInt8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i >= math.MinInt8 { e.w.writen2(mpInt8, byte(i)) - case i >= math.MinInt16: + } else if i >= math.MinInt16 { e.w.writen1(mpInt16) - e.w.writeUint16(uint16(i)) - case i >= math.MinInt32: + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i >= math.MinInt32 { e.w.writen1(mpInt32) - e.w.writeUint32(uint32(i)) - default: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { e.w.writen1(mpInt64) - e.w.writeUint64(uint64(i)) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) } } -func (e *msgpackEncDriver) encodeUint(i uint64) { - switch { - case i <= math.MaxInt8: - e.w.writen1(byte(i)) - case i <= math.MaxUint8: +func (e *msgpackEncDriver) EncodeUint(i uint64) { + if i <= math.MaxInt8 { + if e.h.NoFixedNum { + e.w.writen2(mpUint8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i <= math.MaxUint8 { e.w.writen2(mpUint8, byte(i)) - case i <= math.MaxUint16: + } else if i <= math.MaxUint16 { e.w.writen1(mpUint16) - e.w.writeUint16(uint16(i)) - case i <= math.MaxUint32: + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxUint32 { e.w.writen1(mpUint32) - e.w.writeUint32(uint32(i)) - default: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { e.w.writen1(mpUint64) - e.w.writeUint64(uint64(i)) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) } } -func (e *msgpackEncDriver) encodeBool(b bool) { +func (e *msgpackEncDriver) EncodeBool(b bool) { if b { e.w.writen1(mpTrue) } else { @@ -164,229 +275,318 @@ func (e *msgpackEncDriver) encodeBool(b bool) { } } -func (e *msgpackEncDriver) encodeFloat32(f float32) { +func (e *msgpackEncDriver) EncodeFloat32(f float32) { e.w.writen1(mpFloat) - e.w.writeUint32(math.Float32bits(f)) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) } -func (e *msgpackEncDriver) encodeFloat64(f float64) { +func (e *msgpackEncDriver) EncodeFloat64(f float64) { e.w.writen1(mpDouble) - e.w.writeUint64(math.Float64bits(f)) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + return + } + t = t.UTC() + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + var data64 uint64 + var l = 4 + if sec >= 0 && sec>>34 == 0 { + data64 = (nsec << 34) | uint64(sec) + if data64&0xffffffff00000000 != 0 { + l = 8 + } + } else { + l = 12 + } + if e.h.WriteExt { + e.encodeExtPreamble(mpTimeExtTagU, l) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, l) + } + switch l { + case 4: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(data64)) + case 8: + bigenHelper{e.x[:8], e.w}.writeUint64(data64) + case 12: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(nsec)) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(sec)) + } +} + +func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(v) + if bs == nil { + e.EncodeNil() + return + } + if e.h.WriteExt { + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) + } else { + e.EncodeStringBytesRaw(bs) + } +} + +func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) } func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { - switch { - case l == 1: + if l == 1 { e.w.writen2(mpFixExt1, xtag) - case l == 2: + } else if l == 2 { e.w.writen2(mpFixExt2, xtag) - case l == 4: + } else if l == 4 { e.w.writen2(mpFixExt4, xtag) - case l == 8: + } else if l == 8 { e.w.writen2(mpFixExt8, xtag) - case l == 16: + } else if l == 16 { e.w.writen2(mpFixExt16, xtag) - case l < 256: + } else if l < 256 { e.w.writen2(mpExt8, byte(l)) e.w.writen1(xtag) - case l < 65536: + } else if l < 65536 { e.w.writen1(mpExt16) - e.w.writeUint16(uint16(l)) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) e.w.writen1(xtag) - default: + } else { e.w.writen1(mpExt32) - e.w.writeUint32(uint32(l)) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) e.w.writen1(xtag) } } -func (e *msgpackEncDriver) encodeArrayPreamble(length int) { +func (e *msgpackEncDriver) WriteArrayStart(length int) { e.writeContainerLen(msgpackContainerList, length) } -func (e *msgpackEncDriver) encodeMapPreamble(length int) { +func (e *msgpackEncDriver) WriteMapStart(length int) { e.writeContainerLen(msgpackContainerMap, length) } -func (e *msgpackEncDriver) encodeString(c charEncoding, s string) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(s)) +func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { + slen := len(s) + if c == cRAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) } else { - e.writeContainerLen(msgpackContainerStr, len(s)) + e.writeContainerLen(msgpackContainerRawLegacy, slen) } - if len(s) > 0 { + if slen > 0 { e.w.writestr(s) } } -func (e *msgpackEncDriver) encodeSymbol(v string) { - e.encodeString(c_UTF8, v) +func (e *msgpackEncDriver) EncodeStringEnc(c charEncoding, s string) { + slen := len(s) + if e.h.WriteExt { + e.writeContainerLen(msgpackContainerStr, slen) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, slen) + } + if slen > 0 { + e.w.writestr(s) + } } -func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(bs)) +func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { + if bs == nil { + e.EncodeNil() + return + } + slen := len(bs) + if c == cRAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) } else { - e.writeContainerLen(msgpackContainerStr, len(bs)) + e.writeContainerLen(msgpackContainerRawLegacy, slen) } - if len(bs) > 0 { + if slen > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) EncodeStringBytesRaw(bs []byte) { + if bs == nil { + e.EncodeNil() + return + } + slen := len(bs) + if e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, slen) + } + if slen > 0 { e.w.writeb(bs) } } func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { - switch { - case ct.hasFixMin && l < ct.fixCutoff: + if ct.fixCutoff > 0 && l < int(ct.fixCutoff) { e.w.writen1(ct.bFixMin | byte(l)) - case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt): + } else if ct.b8 > 0 && l < 256 { e.w.writen2(ct.b8, uint8(l)) - case l < 65536: + } else if l < 65536 { e.w.writen1(ct.b16) - e.w.writeUint16(uint16(l)) - default: + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + } else { e.w.writen1(ct.b32) - e.w.writeUint32(uint32(l)) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) } } //--------------------------------------------- type msgpackDecDriver struct { - r decReader - h *MsgpackHandle + d *Decoder + r *decReaderSwitch + h *MsgpackHandle + // b [scratchByteArrayLen]byte bd byte bdRead bool - bdType valueType + br bool // bytes reader + noBuiltInTypes + // noStreamingCodec + // decNoSeparator + decDriverNoopContainerReader + // _ [3]uint64 // padding } -func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool { - //no builtin types. All encodings are based on kinds. Types supported as extensions. - return false -} - -func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {} - // Note: This returns either a primitive (int, bool, etc) for non-containers, // or a containerType, or a specific type denoting nil or extension. // It is called when a nil interface{} is passed, leaving it up to the DecDriver // to introspect the stream and decide how best to decode. // It deciphers the value by looking at the stream first. -func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() +func (d *msgpackDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } bd := d.bd + n := d.d.naked() + var decodeFurther bool switch bd { case mpNil: - vt = valueTypeNil + n.v = valueTypeNil d.bdRead = false case mpFalse: - vt = valueTypeBool - v = false + n.v = valueTypeBool + n.b = false case mpTrue: - vt = valueTypeBool - v = true + n.v = valueTypeBool + n.b = true case mpFloat: - vt = valueTypeFloat - v = float64(math.Float32frombits(d.r.readUint32())) + n.v = valueTypeFloat + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) case mpDouble: - vt = valueTypeFloat - v = math.Float64frombits(d.r.readUint64()) + n.v = valueTypeFloat + n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) case mpUint8: - vt = valueTypeUint - v = uint64(d.r.readn1()) + n.v = valueTypeUint + n.u = uint64(d.r.readn1()) case mpUint16: - vt = valueTypeUint - v = uint64(d.r.readUint16()) + n.v = valueTypeUint + n.u = uint64(bigen.Uint16(d.r.readx(2))) case mpUint32: - vt = valueTypeUint - v = uint64(d.r.readUint32()) + n.v = valueTypeUint + n.u = uint64(bigen.Uint32(d.r.readx(4))) case mpUint64: - vt = valueTypeUint - v = uint64(d.r.readUint64()) + n.v = valueTypeUint + n.u = uint64(bigen.Uint64(d.r.readx(8))) case mpInt8: - vt = valueTypeInt - v = int64(int8(d.r.readn1())) + n.v = valueTypeInt + n.i = int64(int8(d.r.readn1())) case mpInt16: - vt = valueTypeInt - v = int64(int16(d.r.readUint16())) + n.v = valueTypeInt + n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) case mpInt32: - vt = valueTypeInt - v = int64(int32(d.r.readUint32())) + n.v = valueTypeInt + n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) case mpInt64: - vt = valueTypeInt - v = int64(int64(d.r.readUint64())) + n.v = valueTypeInt + n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) default: switch { case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: // positive fixnum (always signed) - vt = valueTypeInt - v = int64(int8(bd)) + n.v = valueTypeInt + n.i = int64(int8(bd)) case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: // negative fixnum - vt = valueTypeInt - v = int64(int8(bd)) + n.v = valueTypeInt + n.i = int64(int8(bd)) case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - var rvm string - vt = valueTypeString - v = &rvm + if d.h.WriteExt || d.h.RawToString { + n.v = valueTypeString + n.s = d.DecodeString() } else { - var rvm = []byte{} - vt = valueTypeBytes - v = &rvm + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) } - decodeFurther = true case bd == mpBin8, bd == mpBin16, bd == mpBin32: - var rvm = []byte{} - vt = valueTypeBytes - v = &rvm - decodeFurther = true + decNakedReadRawBytes(d, d.d, n, d.h.RawToString) case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - vt = valueTypeArray + n.v = valueTypeArray decodeFurther = true case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - vt = valueTypeMap + n.v = valueTypeMap decodeFurther = true case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + n.v = valueTypeExt clen := d.readExtLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(clen) - v = &re - vt = valueTypeExt + n.u = uint64(d.r.readn1()) + if n.u == uint64(mpTimeExtTagU) { + n.v = valueTypeTime + n.t = d.decodeTime(clen) + } else if d.br { + n.l = d.r.readx(uint(clen)) + } else { + n.l = decByteSlice(d.r, clen, d.d.h.MaxInitLen, d.d.b[:]) + } default: - decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + d.d.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) } } if !decodeFurther { d.bdRead = false } - return + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } } // int can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { +func (d *msgpackDecDriver) DecodeInt64() (i int64) { + if !d.bdRead { + d.readNextBd() + } switch d.bd { case mpUint8: i = int64(uint64(d.r.readn1())) case mpUint16: - i = int64(uint64(d.r.readUint16())) + i = int64(uint64(bigen.Uint16(d.r.readx(2)))) case mpUint32: - i = int64(uint64(d.r.readUint32())) + i = int64(uint64(bigen.Uint32(d.r.readx(4)))) case mpUint64: - i = int64(d.r.readUint64()) + i = int64(bigen.Uint64(d.r.readx(8))) case mpInt8: i = int64(int8(d.r.readn1())) case mpInt16: - i = int64(int16(d.r.readUint16())) + i = int64(int16(bigen.Uint16(d.r.readx(2)))) case mpInt32: - i = int64(int32(d.r.readUint32())) + i = int64(int32(bigen.Uint32(d.r.readx(4)))) case mpInt64: - i = int64(d.r.readUint64()) + i = int64(bigen.Uint64(d.r.readx(8))) default: switch { case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: @@ -394,13 +594,8 @@ func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: i = int64(int8(d.bd)) default: - decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - decErr("Overflow int value: %v", i) + d.d.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return } } d.bdRead = false @@ -408,54 +603,57 @@ func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { } // uint can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { +func (d *msgpackDecDriver) DecodeUint64() (ui uint64) { + if !d.bdRead { + d.readNextBd() + } switch d.bd { case mpUint8: ui = uint64(d.r.readn1()) case mpUint16: - ui = uint64(d.r.readUint16()) + ui = uint64(bigen.Uint16(d.r.readx(2))) case mpUint32: - ui = uint64(d.r.readUint32()) + ui = uint64(bigen.Uint32(d.r.readx(4))) case mpUint64: - ui = d.r.readUint64() + ui = bigen.Uint64(d.r.readx(8)) case mpInt8: if i := int64(int8(d.r.readn1())); i >= 0 { ui = uint64(i) } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return } case mpInt16: - if i := int64(int16(d.r.readUint16())); i >= 0 { + if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { ui = uint64(i) } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return } case mpInt32: - if i := int64(int32(d.r.readUint32())); i >= 0 { + if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { ui = uint64(i) } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return } case mpInt64: - if i := int64(d.r.readUint64()); i >= 0 { + if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { ui = uint64(i) } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return } default: switch { case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: ui = uint64(d.bd) case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd)) + d.d.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd)) + return default: - decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - decErr("Overflow uint value: %v", ui) + d.d.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return } } d.bdRead = false @@ -463,160 +661,173 @@ func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { } // float can either be decoded from msgpack type: float, double or intX -func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.bd { - case mpFloat: - f = float64(math.Float32frombits(d.r.readUint32())) - case mpDouble: - f = math.Float64frombits(d.r.readUint64()) - default: - f = float64(d.decodeInt(0)) +func (d *msgpackDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFloat { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == mpDouble { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + f = float64(d.DecodeInt64()) } - checkOverflowFloat32(f, chkOverflow32) d.bdRead = false return } // bool can be decoded from bool, fixnum 0 or 1. -func (d *msgpackDecDriver) decodeBool() (b bool) { - switch d.bd { - case mpFalse, 0: +func (d *msgpackDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFalse || d.bd == 0 { // b = false - case mpTrue, 1: + } else if d.bd == mpTrue || d.bd == 1 { b = true - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } else { + d.d.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return } d.bdRead = false return } -func (d *msgpackDecDriver) decodeString() (s string) { - clen := d.readContainerLen(msgpackContainerStr) - if clen > 0 { - s = string(d.r.readn(clen)) +func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() } - d.bdRead = false - return -} -// Callers must check if changed=true (to decide whether to replace the one they have) -func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - // bytes can be decoded from msgpackContainerStr or msgpackContainerBin + bd := d.bd var clen int - switch d.bd { - case mpBin8, mpBin16, mpBin32: - clen = d.readContainerLen(msgpackContainerBin) - default: - clen = d.readContainerLen(msgpackContainerStr) - } - // if clen < 0 { - // changed = true - // panic("length cannot be zero. this cannot be nil.") - // } - if clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - // Return changed=true if length of passed slice diff from length of bytes in stream - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true + if bd == mpNil { + d.bdRead = false + return + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) // binary + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + clen = d.readContainerLen(msgpackContainerStr) // string/raw + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + // check if an "array" of uint8's + if zerocopy && len(bs) == 0 { + bs = d.d.b[:] } - d.r.readb(bs) + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } else { + d.d.errorf("invalid byte descriptor for decoding bytes, got: 0x%x", d.bd) + return } + d.bdRead = false - return + if zerocopy { + if d.br { + return d.r.readx(uint(clen)) + } else if len(bs) == 0 { + bs = d.d.b[:] + } + } + return decByteSlice(d.r, clen, d.h.MaxInitLen, bs) } -// Every top-level decode funcs (i.e. decodeValue, decode) must call this first. -func (d *msgpackDecDriver) initReadNext() { - if d.bdRead { - return - } +func (d *msgpackDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.d.b[:], true)) +} + +func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.d.b[:], true) +} + +func (d *msgpackDecDriver) readNextBd() { d.bd = d.r.readn1() d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *msgpackDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - bd := d.bd - switch bd { - case mpNil: - d.bdType = valueTypeNil - case mpFalse, mpTrue: - d.bdType = valueTypeBool - case mpFloat, mpDouble: - d.bdType = valueTypeFloat - case mpUint8, mpUint16, mpUint32, mpUint64: - d.bdType = valueTypeUint - case mpInt8, mpInt16, mpInt32, mpInt64: - d.bdType = valueTypeInt - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - d.bdType = valueTypeInt - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - d.bdType = valueTypeInt - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - d.bdType = valueTypeString - } else { - d.bdType = valueTypeBytes - } - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - d.bdType = valueTypeBytes - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - d.bdType = valueTypeArray - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - d.bdType = valueTypeMap - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - d.bdType = valueTypeExt - default: - decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } +} + +func (d *msgpackDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *msgpackDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + // if bd == mpNil { + // // nil + // } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + // // binary + // } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + // // string/raw + // } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + // // array + // } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + // // map + // } + if bd == mpNil { + return valueTypeNil + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + return valueTypeBytes + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + if d.h.WriteExt || d.h.RawToString { // UTF-8 string (new spec) + return valueTypeString } + return valueTypeBytes // raw (old spec) + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + return valueTypeArray + } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + return valueTypeMap } - return d.bdType + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset } -func (d *msgpackDecDriver) tryDecodeAsNil() bool { +func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { + if !d.bdRead { + d.readNextBd() + } if d.bd == mpNil { d.bdRead = false return true } - return false + return } func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { bd := d.bd - switch { - case bd == mpNil: + if bd == mpNil { clen = -1 // to represent nil - case bd == ct.b8: + } else if bd == ct.b8 { clen = int(d.r.readn1()) - case bd == ct.b16: - clen = int(d.r.readUint16()) - case bd == ct.b32: - clen = int(d.r.readUint32()) - case (ct.bFixMin & bd) == ct.bFixMin: + } else if bd == ct.b16 { + clen = int(bigen.Uint16(d.r.readx(2))) + } else if bd == ct.b32 { + clen = int(bigen.Uint32(d.r.readx(4))) + } else if (ct.bFixMin & bd) == ct.bFixMin { clen = int(ct.bFixMin ^ bd) - default: - decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } else { + d.d.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + return } d.bdRead = false return } -func (d *msgpackDecDriver) readMapLen() int { +func (d *msgpackDecDriver) ReadMapStart() int { + if !d.bdRead { + d.readNextBd() + } return d.readContainerLen(msgpackContainerMap) } -func (d *msgpackDecDriver) readArrayLen() int { +func (d *msgpackDecDriver) ReadArrayStart() int { + if !d.bdRead { + d.readNextBd() + } return d.readContainerLen(msgpackContainerList) } @@ -637,30 +848,107 @@ func (d *msgpackDecDriver) readExtLen() (clen int) { case mpExt8: clen = int(d.r.readn1()) case mpExt16: - clen = int(d.r.readUint16()) + clen = int(bigen.Uint16(d.r.readx(2))) case mpExt32: - clen = int(d.r.readUint32()) + clen = int(bigen.Uint32(d.r.readx(4))) default: - decErr("decoding ext bytes: found unexpected byte: %x", d.bd) + d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + return } return } -func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - xbd := d.bd - switch { - case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32: - xbs, _ = d.decodeBytes(nil) - case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32, - xbd >= mpFixStrMin && xbd <= mpFixStrMax: - xbs = []byte(d.decodeString()) +func (d *msgpackDecDriver) DecodeTime() (t time.Time) { + // decode time from string bytes or ext + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + var clen int + if bd == mpNil { + d.bdRead = false + return + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) // binary + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + clen = d.readContainerLen(msgpackContainerStr) // string/raw + } else { + // expect to see mpFixExt4,-1 OR mpFixExt8,-1 OR mpExt8,12,-1 + d.bdRead = false + b2 := d.r.readn1() + if d.bd == mpFixExt4 && b2 == mpTimeExtTagU { + clen = 4 + } else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU { + clen = 8 + } else if d.bd == mpExt8 && b2 == 12 && d.r.readn1() == mpTimeExtTagU { + clen = 12 + } else { + d.d.errorf("invalid stream for decoding time as extension: got 0x%x, 0x%x", d.bd, b2) + return + } + } + return d.decodeTime(clen) +} + +func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) { + // bs = d.r.readx(clen) + d.bdRead = false + switch clen { + case 4: + t = time.Unix(int64(bigen.Uint32(d.r.readx(4))), 0).UTC() + case 8: + tv := bigen.Uint64(d.r.readx(8)) + t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC() + case 12: + nsec := bigen.Uint32(d.r.readx(4)) + sec := bigen.Uint64(d.r.readx(8)) + t = time.Unix(int64(sec), int64(nsec)).UTC() default: + d.d.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen) + return + } + return +} + +func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + xbd := d.bd + if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { + xbs = d.DecodeBytes(nil, true) + } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || + (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { + xbs = d.DecodeStringAsBytes() + } else { clen := d.readExtLen() xtag = d.r.readn1() if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag) + return + } + if d.br { + xbs = d.r.readx(uint(clen)) + } else { + xbs = decByteSlice(d.r, clen, d.d.h.MaxInitLen, d.d.b[:]) } - xbs = d.r.readn(clen) } d.bdRead = false return @@ -672,35 +960,55 @@ func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs [ type MsgpackHandle struct { BasicHandle - // RawToString controls how raw bytes are decoded into a nil interface{}. - RawToString bool - // WriteExt flag supports encoding configured extensions with extension tags. - // It also controls whether other elements of the new spec are encoded (ie Str8). + // NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum. + NoFixedNum bool + + // WriteExt controls whether the new spec is honored. + // + // With WriteExt=true, we can encode configured extensions with extension tags + // and encode string/[]byte/extensions in a way compatible with the new spec + // but incompatible with the old spec. // - // With WriteExt=false, configured extensions are serialized as raw bytes - // and Str8 is not encoded. + // For compatibility with the old spec, set WriteExt=false. // - // A stream can still be decoded into a typed value, provided an appropriate value - // is provided, but the type cannot be inferred from the stream. If no appropriate - // type is provided (e.g. decoding into a nil interface{}), you get back - // a []byte or string based on the setting of RawToString. + // With WriteExt=false: + // configured extensions are serialized as raw bytes (not msgpack extensions). + // reserved byte descriptors like Str8 and those enabling the new msgpack Binary type + // are not encoded. WriteExt bool + + // PositiveIntUnsigned says to encode positive integers as unsigned. + PositiveIntUnsigned bool + + binaryEncodingType + noElemSeparators + + // _ [1]uint64 // padding } -func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver { - return &msgpackEncDriver{w: w, h: h} +// Name returns the name of the handle: msgpack +func (h *MsgpackHandle) Name() string { return "msgpack" } + +// SetBytesExt sets an extension +func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) } -func (h *MsgpackHandle) newDecDriver(r decReader) decDriver { - return &msgpackDecDriver{r: r, h: h} +func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { + return &msgpackEncDriver{e: e, w: e.w, h: h} } -func (h *MsgpackHandle) writeExt() bool { - return h.WriteExt +func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { + return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes} } -func (h *MsgpackHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle +func (e *msgpackEncDriver) reset() { + e.w = e.e.w +} + +func (d *msgpackDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false } //-------------------------------------------------- @@ -721,7 +1029,7 @@ func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) err bodyArr = []interface{}{body} } r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} - return c.write(r2, nil, false, true) + return c.write(r2, nil, false) } func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { @@ -733,7 +1041,7 @@ func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) e body = nil } r2 := []interface{}{1, uint32(r.Seq), moe, body} - return c.write(r2, nil, false, true) + return c.write(r2, nil, false) } func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { @@ -753,8 +1061,7 @@ func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { } func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { - - if c.cls { + if cls := c.cls.load(); cls.closed { return io.EOF } @@ -767,28 +1074,34 @@ func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) // return // } - var b byte - b, err = c.br.ReadByte() - if err != nil { - return - } - if b != fia { - err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) - return + var ba [1]byte + var n int + for { + n, err = c.r.Read(ba[:]) + if err != nil { + return + } + if n == 1 { + break + } } - if err = c.read(&b); err != nil { - return - } - if b != expectTypeByte { - err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) - return - } - if err = c.read(msgid); err != nil { - return - } - if err = c.read(methodOrError); err != nil { - return + var b = ba[0] + if b != fia { + err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b)) + } else { + err = c.read(&b) + if err == nil { + if b != expectTypeByte { + err = fmt.Errorf("%s - expecting %v but got %x/%s", + msgBadDesc, expectTypeByte, b, mpdesc(b)) + } else { + err = c.read(msgid) + if err == nil { + err = c.read(methodOrError) + } + } + } } return } @@ -801,7 +1114,8 @@ type msgpackSpecRpc struct{} // MsgpackSpecRpc implements Rpc using the communication protocol defined in // the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +// +// See GoRpc documentation, for information on buffering for better performance. var MsgpackSpecRpc msgpackSpecRpc func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go index d014dbdcc7d0a..3925088152dc1 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go @@ -1,99 +1,150 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec import ( "bufio" + "errors" "io" "net/rpc" - "sync" ) +var errRpcJsonNeedsTermWhitespace = errors.New("rpc requires JsonHandle with TermWhitespace=true") + // Rpc provides a rpc Server or Client Codec for rpc communication. type Rpc interface { ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec } -// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer -// used by the rpc connection. It accomodates use-cases where the connection -// should be used by rpc and non-rpc functions, e.g. streaming a file after -// sending an rpc response. -type RpcCodecBuffered interface { - BufferedReader() *bufio.Reader - BufferedWriter() *bufio.Writer +// RPCOptions holds options specific to rpc functionality +type RPCOptions struct { + // RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls. + // + // Set RPCNoBuffer=true to turn buffering off. + // Buffering can still be done if buffered connections are passed in, or + // buffering is configured on the handle. + RPCNoBuffer bool } -// ------------------------------------- - // rpcCodec defines the struct members and common methods. type rpcCodec struct { - rwc io.ReadWriteCloser + c io.Closer + r io.Reader + w io.Writer + f ioFlusher + dec *Decoder enc *Encoder - bw *bufio.Writer - br *bufio.Reader - mu sync.Mutex - cls bool -} + // bw *bufio.Writer + // br *bufio.Reader + h Handle -func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { - bw := bufio.NewWriter(conn) - br := bufio.NewReader(conn) - return rpcCodec{ - rwc: conn, - bw: bw, - br: br, - enc: NewEncoder(bw, h), - dec: NewDecoder(br, h), - } + cls atomicClsErr } -func (c *rpcCodec) BufferedReader() *bufio.Reader { - return c.br +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + // return newRPCCodec2(bufio.NewReader(conn), bufio.NewWriter(conn), conn, h) + return newRPCCodec2(conn, conn, conn, h) } -func (c *rpcCodec) BufferedWriter() *bufio.Writer { - return c.bw +func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec { + // defensive: ensure that jsonH has TermWhitespace turned on. + if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace { + panic(errRpcJsonNeedsTermWhitespace) + } + // always ensure that we use a flusher, and always flush what was written to the connection. + // we lose nothing by using a buffered writer internally. + f, ok := w.(ioFlusher) + bh := basicHandle(h) + if !bh.RPCNoBuffer { + if bh.WriterBufferSize <= 0 { + if !ok { + bw := bufio.NewWriter(w) + f, w = bw, bw + } + } + if bh.ReaderBufferSize <= 0 { + if _, ok = w.(ioPeeker); !ok { + if _, ok = w.(ioBuffered); !ok { + br := bufio.NewReader(r) + r = br + } + } + } + } + return rpcCodec{ + c: c, + w: w, + r: r, + f: f, + h: h, + enc: NewEncoder(w, h), + dec: NewDecoder(r, h), + } } -func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { - if c.cls { - return io.EOF - } - if err = c.enc.Encode(obj1); err != nil { - return +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) { + if c.c != nil { + cls := c.cls.load() + if cls.closed { + return cls.errClosed + } } - if writeObj2 { - if err = c.enc.Encode(obj2); err != nil { - return + err = c.enc.Encode(obj1) + if err == nil { + if writeObj2 { + err = c.enc.Encode(obj2) } + // if err == nil && c.f != nil { + // err = c.f.Flush() + // } } - if doFlush && c.bw != nil { - return c.bw.Flush() + if c.f != nil { + if err == nil { + err = c.f.Flush() + } else { + _ = c.f.Flush() // swallow flush error, so we maintain prior error on write + } } return } +func (c *rpcCodec) swallow(err *error) { + defer panicToErr(c.dec, err) + c.dec.swallow() +} + func (c *rpcCodec) read(obj interface{}) (err error) { - if c.cls { - return io.EOF + if c.c != nil { + cls := c.cls.load() + if cls.closed { + return cls.errClosed + } } - //If nil is passed in, we should still attempt to read content to nowhere. + //If nil is passed in, we should read and discard if obj == nil { - var obj2 interface{} - return c.dec.Decode(&obj2) + // var obj2 interface{} + // return c.dec.Decode(&obj2) + c.swallow(&err) + return } return c.dec.Decode(obj) } func (c *rpcCodec) Close() error { - if c.cls { - return io.EOF + if c.c == nil { + return nil + } + cls := c.cls.load() + if cls.closed { + return cls.errClosed } - c.cls = true - return c.rwc.Close() + cls.errClosed = c.c.Close() + cls.closed = true + c.cls.store(cls) + return cls.errClosed } func (c *rpcCodec) ReadResponseBody(body interface{}) error { @@ -107,16 +158,11 @@ type goRpcCodec struct { } func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // Must protect for concurrent access as per API - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) + return c.write(r, body, true) } func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) + return c.write(r, body, true) } func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { @@ -138,7 +184,36 @@ func (c *goRpcCodec) ReadRequestBody(body interface{}) error { type goRpc struct{} // GoRpc implements Rpc using the communication protocol defined in net/rpc package. -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +// +// Note: network connection (from net.Dial, of type io.ReadWriteCloser) is not buffered. +// +// For performance, you should configure WriterBufferSize and ReaderBufferSize on the handle. +// This ensures we use an adequate buffer during reading and writing. +// If not configured, we will internally initialize and use a buffer during reads and writes. +// This can be turned off via the RPCNoBuffer option on the Handle. +// var handle codec.JsonHandle +// handle.RPCNoBuffer = true // turns off attempt by rpc module to initialize a buffer +// +// Example 1: one way of configuring buffering explicitly: +// var handle codec.JsonHandle // codec handle +// handle.ReaderBufferSize = 1024 +// handle.WriterBufferSize = 1024 +// var conn io.ReadWriteCloser // connection got from a socket +// var serverCodec = GoRpc.ServerCodec(conn, handle) +// var clientCodec = GoRpc.ClientCodec(conn, handle) +// +// Example 2: you can also explicitly create a buffered connection yourself, +// and not worry about configuring the buffer sizes in the Handle. +// var handle codec.Handle // codec handle +// var conn io.ReadWriteCloser // connection got from a socket +// var bufconn = struct { // bufconn here is a buffered io.ReadWriteCloser +// io.Closer +// *bufio.Reader +// *bufio.Writer +// }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)} +// var serverCodec = GoRpc.ServerCodec(bufconn, handle) +// var clientCodec = GoRpc.ClientCodec(bufconn, handle) +// var GoRpc goRpc func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { @@ -148,5 +223,3 @@ func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { return &goRpcCodec{newRPCCodec(conn, h)} } - -var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/simple.go b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go index 9e4d148a2a179..a3257c1a7bdd3 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/simple.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go @@ -1,9 +1,13 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. package codec -import "math" +import ( + "math" + "reflect" + "time" +) const ( _ uint8 = iota @@ -17,6 +21,8 @@ const ( simpleVdPosInt = 8 simpleVdNegInt = 12 + simpleVdTime = 24 + // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) simpleVdString = 216 simpleVdByteArray = 224 @@ -26,23 +32,27 @@ const ( ) type simpleEncDriver struct { + noBuiltInTypes + // encNoSeparator + e *Encoder h *SimpleHandle - w encWriter - //b [8]byte -} - -func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool { - return false + w *encWriterSwitch + b [8]byte + // c containerState + encDriverTrackContainerWriter + // encDriverNoopContainerWriter + _ [3]uint64 // padding } -func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) { -} - -func (e *simpleEncDriver) encodeNil() { +func (e *simpleEncDriver) EncodeNil() { e.w.writen1(simpleVdNil) } -func (e *simpleEncDriver) encodeBool(b bool) { +func (e *simpleEncDriver) EncodeBool(b bool) { + if e.h.EncZeroValuesAsNil && e.c != containerMapKey && !b { + e.EncodeNil() + return + } if b { e.w.writen1(simpleVdTrue) } else { @@ -50,17 +60,25 @@ func (e *simpleEncDriver) encodeBool(b bool) { } } -func (e *simpleEncDriver) encodeFloat32(f float32) { +func (e *simpleEncDriver) EncodeFloat32(f float32) { + if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 { + e.EncodeNil() + return + } e.w.writen1(simpleVdFloat32) - e.w.writeUint32(math.Float32bits(f)) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) } -func (e *simpleEncDriver) encodeFloat64(f float64) { +func (e *simpleEncDriver) EncodeFloat64(f float64) { + if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 { + e.EncodeNil() + return + } e.w.writen1(simpleVdFloat64) - e.w.writeUint64(math.Float64bits(f)) + bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f)) } -func (e *simpleEncDriver) encodeInt(v int64) { +func (e *simpleEncDriver) EncodeInt(v int64) { if v < 0 { e.encUint(uint64(-v), simpleVdNegInt) } else { @@ -68,236 +86,323 @@ func (e *simpleEncDriver) encodeInt(v int64) { } } -func (e *simpleEncDriver) encodeUint(v uint64) { +func (e *simpleEncDriver) EncodeUint(v uint64) { e.encUint(v, simpleVdPosInt) } func (e *simpleEncDriver) encUint(v uint64, bd uint8) { - switch { - case v <= math.MaxUint8: + if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == 0 { + e.EncodeNil() + return + } + if v <= math.MaxUint8 { e.w.writen2(bd, uint8(v)) - case v <= math.MaxUint16: + } else if v <= math.MaxUint16 { e.w.writen1(bd + 1) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { e.w.writen1(bd + 2) - e.w.writeUint32(uint32(v)) - case v <= math.MaxUint64: + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { e.w.writen1(bd + 3) - e.w.writeUint64(v) + bigenHelper{e.b[:8], e.w}.writeUint64(v) } } func (e *simpleEncDriver) encLen(bd byte, length int) { - switch { - case length == 0: + if length == 0 { e.w.writen1(bd) - case length <= math.MaxUint8: + } else if length <= math.MaxUint8 { e.w.writen1(bd + 1) e.w.writen1(uint8(length)) - case length <= math.MaxUint16: + } else if length <= math.MaxUint16 { e.w.writen1(bd + 2) - e.w.writeUint16(uint16(length)) - case int64(length) <= math.MaxUint32: + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length)) + } else if int64(length) <= math.MaxUint32 { e.w.writen1(bd + 3) - e.w.writeUint32(uint32(length)) - default: + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length)) + } else { e.w.writen1(bd + 4) - e.w.writeUint64(uint64(length)) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length)) } } +func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { e.encLen(simpleVdExt, length) e.w.writen1(xtag) } -func (e *simpleEncDriver) encodeArrayPreamble(length int) { +func (e *simpleEncDriver) WriteArrayStart(length int) { + e.c = containerArrayStart e.encLen(simpleVdArray, length) } -func (e *simpleEncDriver) encodeMapPreamble(length int) { +func (e *simpleEncDriver) WriteMapStart(length int) { + e.c = containerMapStart e.encLen(simpleVdMap, length) } -func (e *simpleEncDriver) encodeString(c charEncoding, v string) { +// func (e *simpleEncDriver) EncodeSymbol(v string) { +// e.EncodeStringEnc(cUTF8, v) +// } + +func (e *simpleEncDriver) EncodeStringEnc(c charEncoding, v string) { + if false && e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == "" { + e.EncodeNil() + return + } e.encLen(simpleVdString, len(v)) e.w.writestr(v) } -func (e *simpleEncDriver) encodeSymbol(v string) { - e.encodeString(c_UTF8, v) +func (e *simpleEncDriver) EncodeString(c charEncoding, v string) { + e.EncodeStringEnc(c, v) } -func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) { +func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + e.EncodeStringBytesRaw(v) +} + +func (e *simpleEncDriver) EncodeStringBytesRaw(v []byte) { + // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil { + if v == nil { + e.EncodeNil() + return + } e.encLen(simpleVdByteArray, len(v)) e.w.writeb(v) } +func (e *simpleEncDriver) EncodeTime(t time.Time) { + // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() { + if t.IsZero() { + e.EncodeNil() + return + } + v, err := t.MarshalBinary() + if err != nil { + e.e.errorv(err) + return + } + // time.Time marshalbinary takes about 14 bytes. + e.w.writen2(simpleVdTime, uint8(len(v))) + e.w.writeb(v) +} + //------------------------------------ type simpleDecDriver struct { + d *Decoder h *SimpleHandle - r decReader + r *decReaderSwitch bdRead bool - bdType valueType bd byte - //b [8]byte + br bool // a bytes reader? + c containerState + // b [scratchByteArrayLen]byte + noBuiltInTypes + // noStreamingCodec + decDriverNoopContainerReader + // _ [3]uint64 // padding } -func (d *simpleDecDriver) initReadNext() { - if d.bdRead { - return - } +func (d *simpleDecDriver) readNextBd() { d.bd = d.r.readn1() d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *simpleDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - switch d.bd { - case simpleVdNil: - d.bdType = valueTypeNil - case simpleVdTrue, simpleVdFalse: - d.bdType = valueTypeBool - case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - d.bdType = valueTypeUint - case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - d.bdType = valueTypeInt - case simpleVdFloat32, simpleVdFloat64: - d.bdType = valueTypeFloat - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - d.bdType = valueTypeString - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - d.bdType = valueTypeBytes - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - d.bdType = valueTypeExt - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - d.bdType = valueTypeArray - case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - d.bdType = valueTypeMap - default: - decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd) - } - } - return d.bdType } -func (d *simpleDecDriver) tryDecodeAsNil() bool { - if d.bd == simpleVdNil { +func (d *simpleDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() d.bdRead = false - return true } - return false } -func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool { - return false +func (d *simpleDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdNil: + return valueTypeNil + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + return valueTypeBytes + case simpleVdString, simpleVdString + 1, + simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + return valueTypeString + case simpleVdArray, simpleVdArray + 1, + simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + return valueTypeArray + case simpleVdMap, simpleVdMap + 1, + simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + return valueTypeMap + // case simpleVdTime: + // return valueTypeTime + } + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset } -func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) { +func (d *simpleDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return false } -func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) { +func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } switch d.bd { case simpleVdPosInt: ui = uint64(d.r.readn1()) - i = int64(ui) case simpleVdPosInt + 1: - ui = uint64(d.r.readUint16()) - i = int64(ui) + ui = uint64(bigen.Uint16(d.r.readx(2))) case simpleVdPosInt + 2: - ui = uint64(d.r.readUint32()) - i = int64(ui) + ui = uint64(bigen.Uint32(d.r.readx(4))) case simpleVdPosInt + 3: - ui = uint64(d.r.readUint64()) - i = int64(ui) + ui = uint64(bigen.Uint64(d.r.readx(8))) case simpleVdNegInt: ui = uint64(d.r.readn1()) - i = -(int64(ui)) neg = true case simpleVdNegInt + 1: - ui = uint64(d.r.readUint16()) - i = -(int64(ui)) + ui = uint64(bigen.Uint16(d.r.readx(2))) neg = true case simpleVdNegInt + 2: - ui = uint64(d.r.readUint32()) - i = -(int64(ui)) + ui = uint64(bigen.Uint32(d.r.readx(4))) neg = true case simpleVdNegInt + 3: - ui = uint64(d.r.readUint64()) - i = -(int64(ui)) + ui = uint64(bigen.Uint64(d.r.readx(8))) neg = true default: - decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + return } // don't do this check, because callers may only want the unsigned value. // if ui > math.MaxInt64 { - // decErr("decIntAny: Integer out of range for signed int64: %v", ui) + // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) + // return // } return } -func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) { - _, i, _ = d.decIntAny() - checkOverflow(0, i, bitsize) +func (d *simpleDecDriver) DecodeInt64() (i int64) { + ui, neg := d.decCheckInteger() + i = chkOvf.SignedIntV(ui) + if neg { + i = -i + } d.bdRead = false return } -func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) { - ui, i, neg := d.decIntAny() +func (d *simpleDecDriver) DecodeUint64() (ui uint64) { + ui, neg := d.decCheckInteger() if neg { - decErr("Assigning negative signed value: %v, to unsigned type", i) + d.d.errorf("assigning negative signed value to unsigned type") + return } - checkOverflow(ui, 0, bitsize) d.bdRead = false return } -func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.bd { - case simpleVdFloat32: - f = float64(math.Float32frombits(d.r.readUint32())) - case simpleVdFloat64: - f = math.Float64frombits(d.r.readUint64()) - default: +func (d *simpleDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == simpleVdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { - _, i, _ := d.decIntAny() - f = float64(i) + f = float64(d.DecodeInt64()) } else { - decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd) + d.d.errorf("float only valid from float32/64: Invalid descriptor: %v", d.bd) + return } } - checkOverflowFloat32(f, chkOverflow32) d.bdRead = false return } // bool can be decoded from bool only (single byte). -func (d *simpleDecDriver) decodeBool() (b bool) { - switch d.bd { - case simpleVdTrue: +func (d *simpleDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdTrue { b = true - case simpleVdFalse: - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } else if d.bd == simpleVdFalse { + } else { + d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd) + return } d.bdRead = false return } -func (d *simpleDecDriver) readMapLen() (length int) { +func (d *simpleDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } d.bdRead = false + d.c = containerMapStart return d.decLen() } -func (d *simpleDecDriver) readArrayLen() (length int) { +func (d *simpleDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } d.bdRead = false + d.c = containerArrayStart return d.decLen() } +func (d *simpleDecDriver) ReadArrayElem() { + d.c = containerArrayElem +} + +func (d *simpleDecDriver) ReadArrayEnd() { + d.c = containerArrayEnd +} + +func (d *simpleDecDriver) ReadMapElemKey() { + d.c = containerMapKey +} + +func (d *simpleDecDriver) ReadMapElemValue() { + d.c = containerMapValue +} + +func (d *simpleDecDriver) ReadMapEnd() { + d.c = containerMapEnd +} + func (d *simpleDecDriver) decLen() int { switch d.bd % 8 { case 0: @@ -305,116 +410,196 @@ func (d *simpleDecDriver) decLen() int { case 1: return int(d.r.readn1()) case 2: - return int(d.r.readUint16()) + return int(bigen.Uint16(d.r.readx(2))) case 3: - ui := uint64(d.r.readUint32()) - checkOverflow(ui, 0, intBitsize) + ui := uint64(bigen.Uint32(d.r.readx(4))) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("overflow integer: %v", ui) + return 0 + } return int(ui) case 4: - ui := d.r.readUint64() - checkOverflow(ui, 0, intBitsize) + ui := bigen.Uint64(d.r.readx(8)) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("overflow integer: %v", ui) + return 0 + } return int(ui) } - decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) + d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) return -1 } -func (d *simpleDecDriver) decodeString() (s string) { - s = string(d.r.readn(d.decLen())) - d.bdRead = false - return +func (d *simpleDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.d.b[:], true)) +} + +func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.d.b[:], true) } -func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - if clen := d.decLen(); clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true +func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.bd >= simpleVdArray && d.bd <= simpleVdMap+4 { + if len(bs) == 0 && zerocopy { + bs = d.d.b[:] + } + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } + + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(uint(clen)) + } else if len(bs) == 0 { + bs = d.d.b[:] } - d.r.readb(bs) + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *simpleDecDriver) DecodeTime() (t time.Time) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return + } + if d.bd != simpleVdTime { + d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd) + return } d.bdRead = false + clen := int(d.r.readn1()) + b := d.r.readx(uint(clen)) + if err := (&t).UnmarshalBinary(b); err != nil { + d.d.errorv(err) + } + return +} + +func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } return } -func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { +func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } switch d.bd { case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: l := d.decLen() xtag = d.r.readn1() if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return } - xbs = d.r.readn(l) - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - xbs, _ = d.decodeBytes(nil) + if d.br { + xbs = d.r.readx(uint(l)) + } else { + xbs = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) + } + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs = d.DecodeBytes(nil, true) default: - decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) + d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd) + return } d.bdRead = false return } -func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() +func (d *simpleDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.naked() + var decodeFurther bool switch d.bd { case simpleVdNil: - vt = valueTypeNil + n.v = valueTypeNil case simpleVdFalse: - vt = valueTypeBool - v = false + n.v = valueTypeBool + n.b = false case simpleVdTrue: - vt = valueTypeBool - v = true + n.v = valueTypeBool + n.b = true case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - vt = valueTypeUint - ui, _, _ := d.decIntAny() - v = ui + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt64() + } else { + n.v = valueTypeUint + n.u = d.DecodeUint64() + } case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - vt = valueTypeInt - _, i, _ := d.decIntAny() - v = i + n.v = valueTypeInt + n.i = d.DecodeInt64() case simpleVdFloat32: - vt = valueTypeFloat - v = d.decodeFloat(true) + n.v = valueTypeFloat + n.f = d.DecodeFloat64() case simpleVdFloat64: - vt = valueTypeFloat - v = d.decodeFloat(false) - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - vt = valueTypeString - v = d.decodeString() - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - vt = valueTypeBytes - v, _ = d.decodeBytes(nil) + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + case simpleVdTime: + n.v = valueTypeTime + n.t = d.DecodeTime() + case simpleVdString, simpleVdString + 1, + simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + n.v = valueTypeString + n.s = d.DecodeString() + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + decNakedReadRawBytes(d, d.d, n, d.h.RawToString) case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - vt = valueTypeExt + n.v = valueTypeExt l := d.decLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(l) - v = &re - vt = valueTypeExt - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - vt = valueTypeArray + n.u = uint64(d.r.readn1()) + if d.br { + n.l = d.r.readx(uint(l)) + } else { + n.l = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) + } + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, + simpleVdArray + 3, simpleVdArray + 4: + n.v = valueTypeArray decodeFurther = true case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - vt = valueTypeMap + n.v = valueTypeMap decodeFurther = true default: - decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd) + d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd) } if !decodeFurther { d.bdRead = false } - return } //------------------------------------ @@ -422,12 +607,12 @@ func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurt // SimpleHandle is a Handle for a very simple encoding format. // // simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceeded by the descriptor byte (bd) +// - Encoding of a value is always preceded by the descriptor byte (bd) // - True, false, nil are encoded fully in 1 byte (the descriptor) // - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). // There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. // - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) -// - Lenght of containers (strings, bytes, array, map, extensions) +// - Length of containers (strings, bytes, array, map, extensions) // are encoded in 0, 1, 2, 4 or 8 bytes. // Zero-length containers have no length encoded. // For others, the number of bytes is given by pow(2, bd%3) @@ -435,26 +620,46 @@ func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurt // - arrays are encoded as [bd] [length] [value]... // - extensions are encoded as [bd] [length] [tag] [byte]... // - strings/bytearrays are encoded as [bd] [length] [byte]... +// - time.Time are encoded as [bd] [length] [byte]... // // The full spec will be published soon. type SimpleHandle struct { BasicHandle + binaryEncodingType + noElemSeparators + // EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil + EncZeroValuesAsNil bool + + // _ [1]uint64 // padding } -func (h *SimpleHandle) newEncDriver(w encWriter) encDriver { - return &simpleEncDriver{w: w, h: h} +// Name returns the name of the handle: simple +func (h *SimpleHandle) Name() string { return "simple" } + +// SetBytesExt sets an extension +func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) +} + +func (h *SimpleHandle) hasElemSeparators() bool { return true } // as it implements Write(Map|Array)XXX + +func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { + return &simpleEncDriver{e: e, w: e.w, h: h} } -func (h *SimpleHandle) newDecDriver(r decReader) decDriver { - return &simpleDecDriver{r: r, h: h} +func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { + return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes} } -func (_ *SimpleHandle) writeExt() bool { - return true +func (e *simpleEncDriver) reset() { + e.c = 0 + e.w = e.e.w } -func (h *SimpleHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle +func (d *simpleDecDriver) reset() { + d.c = 0 + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false } var _ decDriver = (*simpleDecDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/test-cbor-goldens.json b/vendor/github.com/hashicorp/go-msgpack/codec/test-cbor-goldens.json new file mode 100644 index 0000000000000..9028586711e44 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/test-cbor-goldens.json @@ -0,0 +1,639 @@ +[ + { + "cbor": "AA==", + "hex": "00", + "roundtrip": true, + "decoded": 0 + }, + { + "cbor": "AQ==", + "hex": "01", + "roundtrip": true, + "decoded": 1 + }, + { + "cbor": "Cg==", + "hex": "0a", + "roundtrip": true, + "decoded": 10 + }, + { + "cbor": "Fw==", + "hex": "17", + "roundtrip": true, + "decoded": 23 + }, + { + "cbor": "GBg=", + "hex": "1818", + "roundtrip": true, + "decoded": 24 + }, + { + "cbor": "GBk=", + "hex": "1819", + "roundtrip": true, + "decoded": 25 + }, + { + "cbor": "GGQ=", + "hex": "1864", + "roundtrip": true, + "decoded": 100 + }, + { + "cbor": "GQPo", + "hex": "1903e8", + "roundtrip": true, + "decoded": 1000 + }, + { + "cbor": "GgAPQkA=", + "hex": "1a000f4240", + "roundtrip": true, + "decoded": 1000000 + }, + { + "cbor": "GwAAAOjUpRAA", + "hex": "1b000000e8d4a51000", + "roundtrip": true, + "decoded": 1000000000000 + }, + { + "cbor": "G///////////", + "hex": "1bffffffffffffffff", + "roundtrip": true, + "decoded": 18446744073709551615 + }, + { + "cbor": "wkkBAAAAAAAAAAA=", + "hex": "c249010000000000000000", + "roundtrip": true, + "decoded": 18446744073709551616 + }, + { + "cbor": "O///////////", + "hex": "3bffffffffffffffff", + "roundtrip": true, + "decoded": -18446744073709551616, + "skip": true + }, + { + "cbor": "w0kBAAAAAAAAAAA=", + "hex": "c349010000000000000000", + "roundtrip": true, + "decoded": -18446744073709551617 + }, + { + "cbor": "IA==", + "hex": "20", + "roundtrip": true, + "decoded": -1 + }, + { + "cbor": "KQ==", + "hex": "29", + "roundtrip": true, + "decoded": -10 + }, + { + "cbor": "OGM=", + "hex": "3863", + "roundtrip": true, + "decoded": -100 + }, + { + "cbor": "OQPn", + "hex": "3903e7", + "roundtrip": true, + "decoded": -1000 + }, + { + "cbor": "+QAA", + "hex": "f90000", + "roundtrip": true, + "decoded": 0.0 + }, + { + "cbor": "+YAA", + "hex": "f98000", + "roundtrip": true, + "decoded": -0.0 + }, + { + "cbor": "+TwA", + "hex": "f93c00", + "roundtrip": true, + "decoded": 1.0 + }, + { + "cbor": "+z/xmZmZmZma", + "hex": "fb3ff199999999999a", + "roundtrip": true, + "decoded": 1.1 + }, + { + "cbor": "+T4A", + "hex": "f93e00", + "roundtrip": true, + "decoded": 1.5 + }, + { + "cbor": "+Xv/", + "hex": "f97bff", + "roundtrip": true, + "decoded": 65504.0 + }, + { + "cbor": "+kfDUAA=", + "hex": "fa47c35000", + "roundtrip": true, + "decoded": 100000.0 + }, + { + "cbor": "+n9///8=", + "hex": "fa7f7fffff", + "roundtrip": true, + "decoded": 3.4028234663852886e+38 + }, + { + "cbor": "+3435DyIAHWc", + "hex": "fb7e37e43c8800759c", + "roundtrip": true, + "decoded": 1.0e+300 + }, + { + "cbor": "+QAB", + "hex": "f90001", + "roundtrip": true, + "decoded": 5.960464477539063e-08 + }, + { + "cbor": "+QQA", + "hex": "f90400", + "roundtrip": true, + "decoded": 6.103515625e-05 + }, + { + "cbor": "+cQA", + "hex": "f9c400", + "roundtrip": true, + "decoded": -4.0 + }, + { + "cbor": "+8AQZmZmZmZm", + "hex": "fbc010666666666666", + "roundtrip": true, + "decoded": -4.1 + }, + { + "cbor": "+XwA", + "hex": "f97c00", + "roundtrip": true, + "diagnostic": "Infinity" + }, + { + "cbor": "+X4A", + "hex": "f97e00", + "roundtrip": true, + "diagnostic": "NaN" + }, + { + "cbor": "+fwA", + "hex": "f9fc00", + "roundtrip": true, + "diagnostic": "-Infinity" + }, + { + "cbor": "+n+AAAA=", + "hex": "fa7f800000", + "roundtrip": false, + "diagnostic": "Infinity" + }, + { + "cbor": "+n/AAAA=", + "hex": "fa7fc00000", + "roundtrip": false, + "diagnostic": "NaN" + }, + { + "cbor": "+v+AAAA=", + "hex": "faff800000", + "roundtrip": false, + "diagnostic": "-Infinity" + }, + { + "cbor": "+3/wAAAAAAAA", + "hex": "fb7ff0000000000000", + "roundtrip": false, + "diagnostic": "Infinity" + }, + { + "cbor": "+3/4AAAAAAAA", + "hex": "fb7ff8000000000000", + "roundtrip": false, + "diagnostic": "NaN" + }, + { + "cbor": "+//wAAAAAAAA", + "hex": "fbfff0000000000000", + "roundtrip": false, + "diagnostic": "-Infinity" + }, + { + "cbor": "9A==", + "hex": "f4", + "roundtrip": true, + "decoded": false + }, + { + "cbor": "9Q==", + "hex": "f5", + "roundtrip": true, + "decoded": true + }, + { + "cbor": "9g==", + "hex": "f6", + "roundtrip": true, + "decoded": null + }, + { + "cbor": "9w==", + "hex": "f7", + "roundtrip": true, + "diagnostic": "undefined" + }, + { + "cbor": "8A==", + "hex": "f0", + "roundtrip": true, + "diagnostic": "simple(16)" + }, + { + "cbor": "+Bg=", + "hex": "f818", + "roundtrip": true, + "diagnostic": "simple(24)" + }, + { + "cbor": "+P8=", + "hex": "f8ff", + "roundtrip": true, + "diagnostic": "simple(255)" + }, + { + "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==", + "hex": "c074323031332d30332d32315432303a30343a30305a", + "roundtrip": true, + "diagnostic": "0(\"2013-03-21T20:04:00Z\")" + }, + { + "cbor": "wRpRS2ew", + "hex": "c11a514b67b0", + "roundtrip": true, + "diagnostic": "1(1363896240)" + }, + { + "cbor": "wftB1FLZ7CAAAA==", + "hex": "c1fb41d452d9ec200000", + "roundtrip": true, + "diagnostic": "1(1363896240.5)" + }, + { + "cbor": "10QBAgME", + "hex": "d74401020304", + "roundtrip": true, + "diagnostic": "23(h'01020304')" + }, + { + "cbor": "2BhFZElFVEY=", + "hex": "d818456449455446", + "roundtrip": true, + "diagnostic": "24(h'6449455446')" + }, + { + "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==", + "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d", + "roundtrip": true, + "diagnostic": "32(\"http://www.example.com\")" + }, + { + "cbor": "QA==", + "hex": "40", + "roundtrip": true, + "diagnostic": "h''" + }, + { + "cbor": "RAECAwQ=", + "hex": "4401020304", + "roundtrip": true, + "diagnostic": "h'01020304'" + }, + { + "cbor": "YA==", + "hex": "60", + "roundtrip": true, + "decoded": "" + }, + { + "cbor": "YWE=", + "hex": "6161", + "roundtrip": true, + "decoded": "a" + }, + { + "cbor": "ZElFVEY=", + "hex": "6449455446", + "roundtrip": true, + "decoded": "IETF" + }, + { + "cbor": "YiJc", + "hex": "62225c", + "roundtrip": true, + "decoded": "\"\\" + }, + { + "cbor": "YsO8", + "hex": "62c3bc", + "roundtrip": true, + "decoded": "ü" + }, + { + "cbor": "Y+awtA==", + "hex": "63e6b0b4", + "roundtrip": true, + "decoded": "水" + }, + { + "cbor": "ZPCQhZE=", + "hex": "64f0908591", + "roundtrip": true, + "decoded": "𐅑" + }, + { + "cbor": "gA==", + "hex": "80", + "roundtrip": true, + "decoded": [ + + ] + }, + { + "cbor": "gwECAw==", + "hex": "83010203", + "roundtrip": true, + "decoded": [ + 1, + 2, + 3 + ] + }, + { + "cbor": "gwGCAgOCBAU=", + "hex": "8301820203820405", + "roundtrip": true, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=", + "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819", + "roundtrip": true, + "decoded": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25 + ] + }, + { + "cbor": "oA==", + "hex": "a0", + "roundtrip": true, + "decoded": { + } + }, + { + "cbor": "ogECAwQ=", + "hex": "a201020304", + "roundtrip": true, + "skip": true, + "diagnostic": "{1: 2, 3: 4}" + }, + { + "cbor": "omFhAWFiggID", + "hex": "a26161016162820203", + "roundtrip": true, + "decoded": { + "a": 1, + "b": [ + 2, + 3 + ] + } + }, + { + "cbor": "gmFhoWFiYWM=", + "hex": "826161a161626163", + "roundtrip": true, + "decoded": [ + "a", + { + "b": "c" + } + ] + }, + { + "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF", + "hex": "a56161614161626142616361436164614461656145", + "roundtrip": true, + "decoded": { + "a": "A", + "b": "B", + "c": "C", + "d": "D", + "e": "E" + } + }, + { + "cbor": "X0IBAkMDBAX/", + "hex": "5f42010243030405ff", + "roundtrip": false, + "skip": true, + "diagnostic": "(_ h'0102', h'030405')" + }, + { + "cbor": "f2VzdHJlYWRtaW5n/w==", + "hex": "7f657374726561646d696e67ff", + "roundtrip": false, + "decoded": "streaming" + }, + { + "cbor": "n/8=", + "hex": "9fff", + "roundtrip": false, + "decoded": [ + + ] + }, + { + "cbor": "nwGCAgOfBAX//w==", + "hex": "9f018202039f0405ffff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "nwGCAgOCBAX/", + "hex": "9f01820203820405ff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "gwGCAgOfBAX/", + "hex": "83018202039f0405ff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "gwGfAgP/ggQF", + "hex": "83019f0203ff820405", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=", + "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff", + "roundtrip": false, + "decoded": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25 + ] + }, + { + "cbor": "v2FhAWFinwID//8=", + "hex": "bf61610161629f0203ffff", + "roundtrip": false, + "decoded": { + "a": 1, + "b": [ + 2, + 3 + ] + } + }, + { + "cbor": "gmFhv2FiYWP/", + "hex": "826161bf61626163ff", + "roundtrip": false, + "decoded": [ + "a", + { + "b": "c" + } + ] + }, + { + "cbor": "v2NGdW71Y0FtdCH/", + "hex": "bf6346756ef563416d7421ff", + "roundtrip": false, + "decoded": { + "Fun": true, + "Amt": -2 + } + } +] diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py b/vendor/github.com/hashicorp/go-msgpack/codec/test.py similarity index 69% rename from vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py rename to vendor/github.com/hashicorp/go-msgpack/codec/test.py index e933838c56a89..800376f6841b4 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py +++ b/vendor/github.com/hashicorp/go-msgpack/codec/test.py @@ -4,7 +4,14 @@ # A Test calls this internally to create the golden files # So it can process them (so we don't have to checkin the files). -import msgpack, msgpackrpc, sys, os, threading +# Ensure msgpack-python and cbor are installed first, using: +# sudo apt-get install python-dev +# sudo apt-get install python-pip +# pip install --user msgpack-python msgpack-rpc-python cbor + +# Ensure all "string" keys are utf strings (else encoded as bytes) + +import cbor, msgpack, msgpackrpc, sys, os, threading def get_test_data_list(): # get list with all primitive types, and a combo type @@ -21,43 +28,52 @@ def get_test_data_list(): -3232.0, -6464646464.0, 3232.0, + 6464.0, 6464646464.0, False, True, + u"null", None, - "someday", - "", - "bytestring", + u"some&day>some 0 @@ -80,14 +96,14 @@ def myStopRpcServer(): server.start() def doRpcClientToPythonSvc(port): - address = msgpackrpc.Address('localhost', port) + address = msgpackrpc.Address('127.0.0.1', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("Echo123", "A1", "B2", "C3") print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) def doRpcClientToGoSvc(port): # print ">>>> port: ", port, " <<<<<" - address = msgpackrpc.Address('localhost', port) + address = msgpackrpc.Address('127.0.0.1', port) client = msgpackrpc.Client(address, unpack_encoding='utf-8') print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) @@ -102,7 +118,7 @@ def doMain(args): elif len(args) == 2 and args[0] == "rpc-client-go-service": doRpcClientToGoSvc(int(args[1])) else: - print("Usage: msgpack_test.py " + + print("Usage: test.py " + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") if __name__ == "__main__": diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/time.go b/vendor/github.com/hashicorp/go-msgpack/codec/time.go deleted file mode 100644 index c86d65328d76a..0000000000000 --- a/vendor/github.com/hashicorp/go-msgpack/codec/time.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "time" -) - -var ( - timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} -) - -// EncodeTime encodes a time.Time as a []byte, including -// information on the instant in time and UTC offset. -// -// Format Description -// -// A timestamp is composed of 3 components: -// -// - secs: signed integer representing seconds since unix epoch -// - nsces: unsigned integer representing fractional seconds as a -// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 -// - tz: signed integer representing timezone offset in minutes east of UTC, -// and a dst (daylight savings time) flag -// -// When encoding a timestamp, the first byte is the descriptor, which -// defines which components are encoded and how many bytes are used to -// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it -// is not encoded in the byte array explicitly*. -// -// Descriptor 8 bits are of the form `A B C DDD EE`: -// A: Is secs component encoded? 1 = true -// B: Is nsecs component encoded? 1 = true -// C: Is tz component encoded? 1 = true -// DDD: Number of extra bytes for secs (range 0-7). -// If A = 1, secs encoded in DDD+1 bytes. -// If A = 0, secs is not encoded, and is assumed to be 0. -// If A = 1, then we need at least 1 byte to encode secs. -// DDD says the number of extra bytes beyond that 1. -// E.g. if DDD=0, then secs is represented in 1 byte. -// if DDD=2, then secs is represented in 3 bytes. -// EE: Number of extra bytes for nsecs (range 0-3). -// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) -// -// Following the descriptor bytes, subsequent bytes are: -// -// secs component encoded in `DDD + 1` bytes (if A == 1) -// nsecs component encoded in `EE + 1` bytes (if B == 1) -// tz component encoded in 2 bytes (if C == 1) -// -// secs and nsecs components are integers encoded in a BigEndian -// 2-complement encoding format. -// -// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to -// Least significant bit 0 are described below: -// -// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). -// Bit 15 = have\_dst: set to 1 if we set the dst flag. -// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. -// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. -// -func encodeTime(t time.Time) []byte { - //t := rv.Interface().(time.Time) - tsecs, tnsecs := t.Unix(), t.Nanosecond() - var ( - bd byte - btmp [8]byte - bs [16]byte - i int = 1 - ) - l := t.Location() - if l == time.UTC { - l = nil - } - if tsecs != 0 { - bd = bd | 0x80 - bigen.PutUint64(btmp[:], uint64(tsecs)) - f := pruneSignExt(btmp[:], tsecs >= 0) - bd = bd | (byte(7-f) << 2) - copy(bs[i:], btmp[f:]) - i = i + (8 - f) - } - if tnsecs != 0 { - bd = bd | 0x40 - bigen.PutUint32(btmp[:4], uint32(tnsecs)) - f := pruneSignExt(btmp[:4], true) - bd = bd | byte(3-f) - copy(bs[i:], btmp[f:4]) - i = i + (4 - f) - } - if l != nil { - bd = bd | 0x20 - // Note that Go Libs do not give access to dst flag. - _, zoneOffset := t.Zone() - //zoneName, zoneOffset := t.Zone() - zoneOffset /= 60 - z := uint16(zoneOffset) - bigen.PutUint16(btmp[:2], z) - // clear dst flags - bs[i] = btmp[0] & 0x3f - bs[i+1] = btmp[1] - i = i + 2 - } - bs[0] = bd - return bs[0:i] -} - -// DecodeTime decodes a []byte into a time.Time. -func decodeTime(bs []byte) (tt time.Time, err error) { - bd := bs[0] - var ( - tsec int64 - tnsec uint32 - tz uint16 - i byte = 1 - i2 byte - n byte - ) - if bd&(1<<7) != 0 { - var btmp [8]byte - n = ((bd >> 2) & 0x7) + 1 - i2 = i + n - copy(btmp[8-n:], bs[i:i2]) - //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) - if bs[i]&(1<<7) != 0 { - copy(btmp[0:8-n], bsAll0xff) - //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } - } - i = i2 - tsec = int64(bigen.Uint64(btmp[:])) - } - if bd&(1<<6) != 0 { - var btmp [4]byte - n = (bd & 0x3) + 1 - i2 = i + n - copy(btmp[4-n:], bs[i:i2]) - i = i2 - tnsec = bigen.Uint32(btmp[:]) - } - if bd&(1<<5) == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - return - } - // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. - // However, we need name here, so it can be shown when time is printed. - // Zone name is in form: UTC-08:00. - // Note that Go Libs do not give access to dst flag, so we ignore dst bits - - i2 = i + 2 - tz = bigen.Uint16(bs[i:i2]) - i = i2 - // sign extend sign bit into top 2 MSB (which were dst bits): - if tz&(1<<13) == 0 { // positive - tz = tz & 0x3fff //clear 2 MSBs: dst bits - } else { // negative - tz = tz | 0xc000 //set 2 MSBs: dst bits - //tzname[3] = '-' (TODO: verify. this works here) - } - tzint := int16(tz) - if tzint == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - } else { - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - // var zoneName = timeLocUTCName(tzint) - tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) - } - return -} - -func timeLocUTCName(tzint int16) string { - if tzint == 0 { - return "UTC" - } - var tzname = []byte("UTC+00:00") - //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. - //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first - var tzhr, tzmin int16 - if tzint < 0 { - tzname[3] = '-' // (TODO: verify. this works here) - tzhr, tzmin = -tzint/60, (-tzint)%60 - } else { - tzhr, tzmin = tzint/60, tzint%60 - } - tzname[4] = timeDigits[tzhr/10] - tzname[5] = timeDigits[tzhr%10] - tzname[7] = timeDigits[tzmin/10] - tzname[8] = timeDigits[tzmin%10] - return string(tzname) - //return time.FixedZone(string(tzname), int(tzint)*60) -} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/LICENSE b/vendor/github.com/hashicorp/go-msgpack/v2/LICENSE new file mode 100644 index 0000000000000..95a0f0541cdaa --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/build.sh b/vendor/github.com/hashicorp/go-msgpack/v2/codec/build.sh new file mode 100644 index 0000000000000..831bd86442219 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/build.sh @@ -0,0 +1,263 @@ +#!/bin/bash + +# Run all the different permutations of all the tests and other things +# This helps ensure that nothing gets broken. + +_tests() { + local gover=$( go version | cut -f 3 -d ' ' ) + local a=( "" "codecgen" ) + for i in "${a[@]}" + do + echo ">>>> TAGS: $i" + local i2=${i:-default} + case $gover in + go1.[0-6]*) go vet -printfuncs "errorf" "$@" && + go test ${zargs[*]} -vet off -tags "$i" "$@" ;; + *) go vet -printfuncs "errorf" "$@" && + go test ${zargs[*]} -vet off -tags "alltests $i" -run "Suite" -coverprofile "${i2// /-}.cov.out" "$@" ;; + esac + if [[ "$?" != 0 ]]; then return 1; fi + done + echo "++++++++ TEST SUITES ALL PASSED ++++++++" +} + + +# is a generation needed? +_ng() { + local a="$1" + if [[ ! -e "$a" ]]; then echo 1; return; fi + for i in `ls -1 *.go.tmpl gen.go values_test.go` + do + if [[ "$a" -ot "$i" ]]; then echo 1; return; fi + done +} + +_prependbt() { + cat > ${2} <> ${2} + rm -f ${1} +} + +# _build generates gen-helper.go. +_build() { + if ! [[ "${zforce}" || $(_ng "gen-helper.generated.go") || $(_ng "gen.generated.go") ]]; then return 0; fi + + if [ "${zbak}" ]; then + _zts=`date '+%m%d%Y_%H%M%S'` + _gg=".generated.go" + [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak + [ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak + fi + rm -f gen-helper.generated.go gen.generated.go \ + *_generated_test.go *.generated_ffjson_expose.go + + cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl + cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl + cat >> gen.generated.go <> gen.generated.go < gen-enc-chan.go.tmpl + cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.generated.go < " + fnameOut + " ______") +fin, err := os.Open(fnameIn) +if err != nil { panic(err) } +defer fin.Close() +fout, err := os.Create(fnameOut) +if err != nil { panic(err) } +defer fout.Close() +err = codec.GenInternalGoFile(fin, fout) +if err != nil { panic(err) } +} + +func main() { +run("gen-helper.go.tmpl", "gen-helper.generated.go") +run("mammoth-test.go.tmpl", "mammoth_generated_test.go") +run("mammoth2-test.go.tmpl", "mammoth2_generated_test.go") +} +EOF + + sed -e 's+// __DO_NOT_REMOVE__NEEDED_FOR_REPLACING__IMPORT_PATH__FOR_CODEC_BENCH__+import . "github.com/hashicorp/go-msgpack/v2/codec"+' \ + shared_test.go > bench/shared_test.go + + # explicitly return 0 if this passes, else return 1 + go run -tags "codecgen.exec" gen-from-tmpl.generated.go && + rm -f gen-from-tmpl.*generated.go && + return 0 + return 1 +} + +_codegenerators() { + local c5="_generated_test.go" + local c7="$PWD/codecgen" + local c8="$c7/__codecgen" + local c9="codecgen-scratch.go" + + if ! [[ $zforce || $(_ng "values_codecgen${c5}") ]]; then return 0; fi + + # Note: ensure you run the codecgen for this codebase/directory i.e. ./codecgen/codecgen + true && + echo "codecgen ... " && + if [[ $zforce || ! -f "$c8" || "$c7/gen.go" -nt "$c8" ]]; then + echo "rebuilding codecgen ... " && ( cd codecgen && go build -o $c8 ${zargs[*]} . ) + fi && + $c8 -rt codecgen -t 'codecgen generated' -o values_codecgen${c5} -d 19780 $zfin $zfin2 && + cp mammoth2_generated_test.go $c9 && + $c8 -o mammoth2_codecgen${c5} -d 19781 mammoth2_generated_test.go && + rm -f $c9 && + echo "generators done!" +} + +_prebuild() { + echo "prebuild: zforce: $zforce" + local d="$PWD" + zfin="test_values.generated.go" + zfin2="test_values_flex.generated.go" + zpkg="github.com/hashicorp/go-msgpack/v2/codec" + # zpkg=${d##*/src/} + # zgobase=${d%%/src/*} + # rm -f *_generated_test.go + rm -f codecgen-*.go && + _build && + cp $d/values_test.go $d/$zfin && + cp $d/values_flex_test.go $d/$zfin2 && + _codegenerators && + if [[ "$(type -t _codegenerators_external )" = "function" ]]; then _codegenerators_external ; fi && + if [[ $zforce ]]; then go install ${zargs[*]} .; fi && + echo "prebuild done successfully" + rm -f $d/$zfin $d/$zfin2 + unset zfin zfin2 zpkg +} + +_make() { + zforce=1 + (cd codecgen && go install ${zargs[*]} .) && _prebuild && go install ${zargs[*]} . + unset zforce +} + +_clean() { + rm -f gen-from-tmpl.*generated.go \ + codecgen-*.go \ + test_values.generated.go test_values_flex.generated.go +} + +_release() { + local reply + read -p "Pre-release validation takes a few minutes and MUST be run from within GOPATH/src. Confirm y/n? " -n 1 -r reply + echo + if [[ ! $reply =~ ^[Yy]$ ]]; then return 1; fi + + # expects GOROOT, GOROOT_BOOTSTRAP to have been set. + if [[ -z "${GOROOT// }" || -z "${GOROOT_BOOTSTRAP// }" ]]; then return 1; fi + # (cd $GOROOT && git checkout -f master && git pull && git reset --hard) + (cd $GOROOT && git pull) + local f=`pwd`/make.release.out + cat > $f <>$f + if [[ "$i" != "master" ]]; then i="release-branch.go$i"; fi + (false || + (echo "===== BUILDING GO SDK for branch: $i ... =====" && + cd $GOROOT && + git checkout -f $i && git reset --hard && git clean -f . && + cd src && ./make.bash >>$f 2>&1 && sleep 1 ) ) && + echo "===== GO SDK BUILD DONE =====" && + _prebuild && + echo "===== PREBUILD DONE with exit: $? =====" && + _tests "$@" + if [[ "$?" != 0 ]]; then return 1; fi + done + unset zforce + echo "++++++++ RELEASE TEST SUITES ALL PASSED ++++++++" +} + +_usage() { + cat < [tests, make, prebuild (force) (external), inlining diagnostics, mid-stack inlining, race detector] + -v -> verbose +EOF + if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi +} + +_main() { + if [[ -z "$1" ]]; then _usage; return 1; fi + local x + unset zforce + zargs=() + zbenchflags="" + OPTIND=1 + while getopts ":ctmnrgpfvlzdb:" flag + do + case "x$flag" in + 'xf') zforce=1 ;; + 'xv') zverbose=1 ;; + 'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;; + 'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;; + 'xd') zargs+=("-race") ;; + 'xb') x='b'; zbenchflags=${OPTARG} ;; + x\?) _usage; return 1 ;; + *) x=$flag ;; + esac + done + shift $((OPTIND-1)) + # echo ">>>> _main: extra args: $@" + case "x$x" in + 'xt') _tests "$@" ;; + 'xm') _make "$@" ;; + 'xr') _release "$@" ;; + 'xg') _go ;; + 'xp') _prebuild "$@" ;; + 'xc') _clean "$@" ;; + 'xz') _analyze "$@" ;; + 'xb') _bench "$@" ;; + esac + unset zforce zargs zbenchflags +} + +[ "." = `dirname $0` ] && _main "$@" \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/codecgen.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/codecgen.go new file mode 100644 index 0000000000000..28fa810593d4d --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/codecgen.go @@ -0,0 +1,14 @@ +//go:build codecgen || generated +// +build codecgen generated + +package codec + +// this file is here, to set the codecgen variable to true +// when the build tag codecgen is set. +// +// this allows us do specific things e.g. skip missing fields tests, +// when running in codecgen mode. + +func init() { + codecgen = true +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/decode.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/decode.go new file mode 100644 index 0000000000000..e0fb62df4929c --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/decode.go @@ -0,0 +1,3111 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "errors" + "fmt" + "io" + "reflect" + "runtime" + "strconv" + "time" +) + +// Some tagging information for error messages. +const ( + msgBadDesc = "unrecognized descriptor byte" + // msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +const ( + decDefMaxDepth = 1024 // maximum depth + decDefSliceCap = 8 + decDefChanCap = 64 // should be large, as cap cannot be expanded + decScratchByteArrayLen = cacheLineSize // + (8 * 2) // - (8 * 1) +) + +var ( + errstrOnlyMapOrArrayCanDecodeIntoStruct = "only encoded map or array can be decoded into a struct" + errstrCannotDecodeIntoNil = "cannot decode into nil" + + errmsgExpandSliceOverflow = "expand slice: slice overflow" + errmsgExpandSliceCannotChange = "expand slice: cannot change" + + errDecoderNotInitialized = errors.New("Decoder not initialized") + + errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read") + errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read") + errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown") + errMaxDepthExceeded = errors.New("maximum decoding depth exceeded") +) + +/* + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +// +// Deprecated: Use decReaderSwitch instead. +type decReader interface { + unreadn1() + // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR + // just return a view of the []byte being decoded from. + // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. + readx(n int) []byte + readb([]byte) + readn1() uint8 + numread() uint // number of bytes read + track() + stopTrack() []byte + + // skip will skip any byte that matches, and return the first non-matching byte + skip(accept *bitset256) (token byte) + // readTo will read any byte that matches, stopping once no-longer matching. + readTo(in []byte, accept *bitset256) (out []byte) + // readUntil will read, only stopping once it matches the 'stop' byte. + readUntil(in []byte, stop byte) (out []byte) +} + +*/ + +type decDriver interface { + // this will check if the next token is a break. + CheckBreak() bool + // TryDecodeAsNil tries to decode as nil. + // Note: TryDecodeAsNil should be careful not to share any temporary []byte with + // the rest of the decDriver. This is because sometimes, we optimize by holding onto + // a transient []byte, and ensuring the only other call we make to the decDriver + // during that time is maybe a TryDecodeAsNil() call. + TryDecodeAsNil() bool + // ContainerType returns one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. + ContainerType() (vt valueType) + // IsBuiltinType(rt uintptr) bool + + // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. + // For maps and arrays, it will not do the decoding in-band, but will signal + // the decoder, so that is done later, by setting the decNaked.valueType field. + // + // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + // for extensions, DecodeNaked must read the tag and the []byte if it exists. + // if the []byte is not read, then kInterfaceNaked will treat it as a Handle + // that stores the subsequent value in-band, and complete reading the RawExt. + // + // extensions should also use readx to decode them, for efficiency. + // kInterface will extract the detached byte slice if it has to pass it outside its realm. + DecodeNaked() + + // Deprecated: use DecodeInt64 and DecodeUint64 instead + // DecodeInt(bitsize uint8) (i int64) + // DecodeUint(bitsize uint8) (ui uint64) + + DecodeInt64() (i int64) + DecodeUint64() (ui uint64) + + DecodeFloat64() (f float64) + DecodeBool() (b bool) + // DecodeString can also decode symbols. + // It looks redundant as DecodeBytes is available. + // However, some codecs (e.g. binc) support symbols and can + // return a pre-stored string value, meaning that it can bypass + // the cost of []byte->string conversion. + DecodeString() (s string) + DecodeStringAsBytes() (v []byte) + + // DecodeBytes may be called directly, without going through reflection. + // Consequently, it must be designed to handle possible nil. + DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) + // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) + + // decodeExt will decode into a *RawExt or into an extension. + DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) + // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + + DecodeTime() (t time.Time) + + ReadArrayStart() int + ReadArrayElem() + ReadArrayEnd() + ReadMapStart() int + ReadMapElemKey() + ReadMapElemValue() + ReadMapEnd() + + reset() + uncacheRead() +} + +type decodeError struct { + codecError + pos int +} + +func (d decodeError) Error() string { + return fmt.Sprintf("%s decode error [pos %d]: %v", d.name, d.pos, d.err) +} + +type decDriverNoopContainerReader struct{} + +func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { return } +func (x decDriverNoopContainerReader) ReadArrayElem() {} +func (x decDriverNoopContainerReader) ReadArrayEnd() {} +func (x decDriverNoopContainerReader) ReadMapStart() (v int) { return } +func (x decDriverNoopContainerReader) ReadMapElemKey() {} +func (x decDriverNoopContainerReader) ReadMapElemValue() {} +func (x decDriverNoopContainerReader) ReadMapEnd() {} +func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return } + +// func (x decNoSeparator) uncacheRead() {} + +// DecodeOptions captures configuration options during decode. +type DecodeOptions struct { + // MapType specifies type to use during schema-less decoding of a map in the stream. + // If nil (unset), we default to map[string]interface{} iff json handle and MapStringAsKey=true, + // else map[interface{}]interface{}. + MapType reflect.Type + + // SliceType specifies type to use during schema-less decoding of an array in the stream. + // If nil (unset), we default to []interface{} for all formats. + SliceType reflect.Type + + // MaxInitLen defines the maxinum initial length that we "make" a collection + // (string, slice, map, chan). If 0 or negative, we default to a sensible value + // based on the size of an element in the collection. + // + // For example, when decoding, a stream may say that it has 2^64 elements. + // We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash. + // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. + MaxInitLen int + + // ReaderBufferSize is the size of the buffer used when reading. + // + // if > 0, we use a smart buffer internally for performance purposes. + ReaderBufferSize int + + // MaxDepth defines the maximum depth when decoding nested + // maps and slices. If 0 or negative, we default to a suitably large number (currently 1024). + MaxDepth int16 + + // If ErrorIfNoField, return an error when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool + + // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. + // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, + // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). + ErrorIfNoArrayExpand bool + + // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). + SignedInteger bool + + // MapValueReset controls how we decode into a map value. + // + // By default, we MAY retrieve the mapping for a key, and then decode into that. + // However, especially with big maps, that retrieval may be expensive and unnecessary + // if the stream already contains all that is necessary to recreate the value. + // + // If true, we will never retrieve the previous mapping, + // but rather decode into a new value and set that in the map. + // + // If false, we will retrieve the previous mapping if necessary e.g. + // the previous mapping is a pointer, or is a struct or array with pre-set state, + // or is an interface. + MapValueReset bool + + // SliceElementReset: on decoding a slice, reset the element to a zero value first. + // + // concern: if the slice already contained some garbage, we will decode into that garbage. + SliceElementReset bool + + // InterfaceReset controls how we decode into an interface. + // + // By default, when we see a field that is an interface{...}, + // or a map with interface{...} value, we will attempt decoding into the + // "contained" value. + // + // However, this prevents us from reading a string into an interface{} + // that formerly contained a number. + // + // If true, we will decode into a new "blank" value, and set that in the interface. + // If false, we will decode into whatever is contained in the interface. + InterfaceReset bool + + // InternString controls interning of strings during decoding. + // + // Some handles, e.g. json, typically will read map keys as strings. + // If the set of keys are finite, it may help reduce allocation to + // look them up from a map (than to allocate them afresh). + // + // Note: Handles will be smart when using the intern functionality. + // Every string should not be interned. + // An excellent use-case for interning is struct field names, + // or map keys where key type is string. + InternString bool + + // PreferArrayOverSlice controls whether to decode to an array or a slice. + // + // This only impacts decoding into a nil interface{}. + // Consequently, it has no effect on codecgen. + // + // *Note*: This only applies if using go1.5 and above, + // as it requires reflect.ArrayOf support which was absent before go1.5. + PreferArrayOverSlice bool + + // DeleteOnNilMapValue controls how to decode a nil value in the stream. + // + // If true, we will delete the mapping of the key. + // Else, just set the mapping to the zero value of the type. + DeleteOnNilMapValue bool + + // RawToString controls how raw bytes in a stream are decoded into a nil interface{}. + // By default, they are decoded as []byte, but can be decoded as string (if configured). + RawToString bool +} + +// ------------------------------------------------ + +type unreadByteStatus uint8 + +// unreadByteStatus goes from +// undefined (when initialized) -- (read) --> canUnread -- (unread) --> canRead ... +const ( + unreadByteUndefined unreadByteStatus = iota + unreadByteCanRead + unreadByteCanUnread +) + +type ioDecReaderCommon struct { + r io.Reader // the reader passed in + + n uint // num read + + l byte // last byte + ls unreadByteStatus // last byte status + trb bool // tracking bytes turned on + _ bool + b [4]byte // tiny buffer for reading single bytes + + tr []byte // tracking bytes read +} + +func (z *ioDecReaderCommon) reset(r io.Reader) { + z.r = r + z.ls = unreadByteUndefined + z.l, z.n = 0, 0 + z.trb = false + if z.tr != nil { + z.tr = z.tr[:0] + } +} + +func (z *ioDecReaderCommon) numread() uint { + return z.n +} + +func (z *ioDecReaderCommon) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *ioDecReaderCommon) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ------------------------------------------ + +// ioDecReader is a decReader that reads off an io.Reader. +// +// It also has a fallback implementation of ByteScanner if needed. +type ioDecReader struct { + ioDecReaderCommon + + rr io.Reader + br io.ByteScanner + + x [scratchByteArrayLen]byte // for: get struct field name, swallow valueTypeBytes, etc + _ [1]uint64 // padding +} + +func (z *ioDecReader) reset(r io.Reader) { + z.ioDecReaderCommon.reset(r) + + var ok bool + z.rr = r + z.br, ok = r.(io.ByteScanner) + if !ok { + z.br = z + z.rr = z + } +} + +func (z *ioDecReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return + } + var firstByte bool + if z.ls == unreadByteCanRead { + z.ls = unreadByteCanUnread + p[0] = z.l + if len(p) == 1 { + n = 1 + return + } + firstByte = true + p = p[1:] + } + n, err = z.r.Read(p) + if n > 0 { + if err == io.EOF && n == len(p) { + err = nil // read was successful, so postpone EOF (till next time) + } + z.l = p[n-1] + z.ls = unreadByteCanUnread + } + if firstByte { + n++ + } + return +} + +func (z *ioDecReader) ReadByte() (c byte, err error) { + n, err := z.Read(z.b[:1]) + if n == 1 { + c = z.b[0] + if err == io.EOF { + err = nil // read was successful, so postpone EOF (till next time) + } + } + return +} + +func (z *ioDecReader) UnreadByte() (err error) { + switch z.ls { + case unreadByteCanUnread: + z.ls = unreadByteCanRead + case unreadByteCanRead: + err = errDecUnreadByteLastByteNotRead + case unreadByteUndefined: + err = errDecUnreadByteNothingToRead + default: + err = errDecUnreadByteUnknown + } + return +} + +func (z *ioDecReader) readx(n uint) (bs []byte) { + if n == 0 { + return + } + if n < uint(len(z.x)) { + bs = z.x[:n] + } else { + bs = make([]byte, n) + } + if _, err := decReadFull(z.rr, bs); err != nil { + panic(err) + } + z.n += uint(len(bs)) + if z.trb { + z.tr = append(z.tr, bs...) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + if len(bs) == 0 { + return + } + if _, err := decReadFull(z.rr, bs); err != nil { + panic(err) + } + z.n += uint(len(bs)) + if z.trb { + z.tr = append(z.tr, bs...) + } +} + +func (z *ioDecReader) readn1eof() (b uint8, eof bool) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + } else if err == io.EOF { + eof = true + } else { + panic(err) + } + return +} + +func (z *ioDecReader) readn1() (b uint8) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + return + } + panic(err) +} + +func (z *ioDecReader) skip(accept *bitset256) (token byte) { + var eof bool + // for { + // token, eof = z.readn1eof() + // if eof { + // return + // } + // if accept.isset(token) { + // continue + // } + // return + // } +LOOP: + token, eof = z.readn1eof() + if eof { + return + } + if accept.isset(token) { + goto LOOP + } + return +} + +func (z *ioDecReader) readTo(in []byte, accept *bitset256) []byte { + // out = in + + // for { + // token, eof := z.readn1eof() + // if eof { + // return + // } + // if accept.isset(token) { + // out = append(out, token) + // } else { + // z.unreadn1() + // return + // } + // } +LOOP: + token, eof := z.readn1eof() + if eof { + return in + } + if accept.isset(token) { + // out = append(out, token) + in = append(in, token) + goto LOOP + } + z.unreadn1() + return in +} + +func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) { + out = in + // for { + // token, eof := z.readn1eof() + // if eof { + // panic(io.EOF) + // } + // out = append(out, token) + // if token == stop { + // return + // } + // } +LOOP: + token, eof := z.readn1eof() + if eof { + panic(io.EOF) + } + out = append(out, token) + if token == stop { + return + } + goto LOOP +} + +//go:noinline +func (z *ioDecReader) unreadn1() { + err := z.br.UnreadByte() + if err != nil { + panic(err) + } + z.n-- + if z.trb { + if l := len(z.tr) - 1; l >= 0 { + z.tr = z.tr[:l] + } + } +} + +// ------------------------------------ + +type bufioDecReader struct { + ioDecReaderCommon + + c uint // cursor + buf []byte + + bytesBufPooler + + // err error + + // Extensions can call Decode() within a current Decode() call. + // We need to know when the top level Decode() call returns, + // so we can decide whether to Release() or not. + calls uint16 // what depth in mustDecode are we in now. + + _ [6]uint8 // padding + + _ [1]uint64 // padding +} + +func (z *bufioDecReader) reset(r io.Reader, bufsize int) { + z.ioDecReaderCommon.reset(r) + z.c = 0 + z.calls = 0 + if cap(z.buf) >= bufsize { + z.buf = z.buf[:0] + } else { + z.buf = z.bytesBufPooler.get(bufsize)[:0] + // z.buf = make([]byte, 0, bufsize) + } +} + +func (z *bufioDecReader) release() { + z.buf = nil + z.bytesBufPooler.end() +} + +func (z *bufioDecReader) readb(p []byte) { + var n = uint(copy(p, z.buf[z.c:])) + z.n += n + z.c += n + if len(p) == int(n) { + if z.trb { + z.tr = append(z.tr, p...) // cost=9 + } + } else { + z.readbFill(p, n) + } +} + +//go:noinline - fallback when z.buf is consumed +func (z *bufioDecReader) readbFill(p0 []byte, n uint) { + // at this point, there's nothing in z.buf to read (z.buf is fully consumed) + p := p0[n:] + var n2 uint + var err error + if len(p) > cap(z.buf) { + n2, err = decReadFull(z.r, p) + if err != nil { + panic(err) + } + n += n2 + z.n += n2 + // always keep last byte in z.buf + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return + } + // z.c is now 0, and len(p) <= cap(z.buf) +LOOP: + // for len(p) > 0 && z.err == nil { + if len(p) > 0 { + z.buf = z.buf[0:cap(z.buf)] + var n1 int + n1, err = z.r.Read(z.buf) + n2 = uint(n1) + if n2 == 0 && err != nil { + panic(err) + } + z.buf = z.buf[:n2] + n2 = uint(copy(p, z.buf)) + z.c = n2 + n += n2 + z.n += n2 + p = p[n2:] + goto LOOP + } + if z.c == 0 { + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + } + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } +} + +func (z *bufioDecReader) readn1() (b byte) { + // fast-path, so we elide calling into Read() most of the time + if z.c < uint(len(z.buf)) { + b = z.buf[z.c] + z.c++ + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + } else { // meaning z.c == len(z.buf) or greater ... so need to fill + z.readbFill(z.b[:1], 0) + b = z.b[0] + } + return +} + +func (z *bufioDecReader) unreadn1() { + if z.c == 0 { + panic(errDecUnreadByteNothingToRead) + } + z.c-- + z.n-- + if z.trb { + z.tr = z.tr[:len(z.tr)-1] + } +} + +func (z *bufioDecReader) readx(n uint) (bs []byte) { + if n == 0 { + // return + } else if z.c+n <= uint(len(z.buf)) { + bs = z.buf[z.c : z.c+n] + z.n += n + z.c += n + if z.trb { + z.tr = append(z.tr, bs...) + } + } else { + bs = make([]byte, n) + // n no longer used - can reuse + n = uint(copy(bs, z.buf[z.c:])) + z.n += n + z.c += n + z.readbFill(bs, n) + } + return +} + +//go:noinline - track called by Decoder.nextValueBytes() (called by jsonUnmarshal,rawBytes) +func (z *bufioDecReader) doTrack(y uint) { + z.tr = append(z.tr, z.buf[z.c:y]...) // cost=14??? +} + +func (z *bufioDecReader) skipLoopFn(i uint) { + z.n += (i - z.c) - 1 + i++ + if z.trb { + // z.tr = append(z.tr, z.buf[z.c:i]...) + z.doTrack(i) + } + z.c = i +} + +func (z *bufioDecReader) skip(accept *bitset256) (token byte) { + // token, _ = z.search(nil, accept, 0, 1); return + + // for i := z.c; i < len(z.buf); i++ { + // if token = z.buf[i]; !accept.isset(token) { + // z.skipLoopFn(i) + // return + // } + // } + + i := z.c +LOOP: + if i < uint(len(z.buf)) { + // inline z.skipLoopFn(i) and refactor, so cost is within inline budget + token = z.buf[i] + i++ + if accept.isset(token) { + goto LOOP + } + z.n += i - 2 - z.c + if z.trb { + z.doTrack(i) + } + z.c = i + return + } + return z.skipFill(accept) +} + +func (z *bufioDecReader) skipFill(accept *bitset256) (token byte) { + z.n += uint(len(z.buf)) - z.c + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n2 int + var err error + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, err = z.r.Read(z.buf) + if n2 == 0 && err != nil { + panic(err) + } + z.buf = z.buf[:n2] + var i int + for i, token = range z.buf { + if !accept.isset(token) { + z.skipLoopFn(uint(i)) + return + } + } + // for i := 0; i < n2; i++ { + // if token = z.buf[i]; !accept.isset(token) { + // z.skipLoopFn(i) + // return + // } + // } + z.n += uint(n2) + if z.trb { + z.tr = append(z.tr, z.buf...) + } + } +} + +func (z *bufioDecReader) readToLoopFn(i uint, out0 []byte) (out []byte) { + // out0 is never nil + z.n += (i - z.c) - 1 + out = append(out0, z.buf[z.c:i]...) + if z.trb { + z.doTrack(i) + } + z.c = i + return +} + +func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { + // _, out = z.search(in, accept, 0, 2); return + + // for i := z.c; i < len(z.buf); i++ { + // if !accept.isset(z.buf[i]) { + // return z.readToLoopFn(i, nil) + // } + // } + + i := z.c +LOOP: + if i < uint(len(z.buf)) { + if !accept.isset(z.buf[i]) { + // return z.readToLoopFn(i, nil) + // inline readToLoopFn here (for performance) + z.n += (i - z.c) - 1 + out = z.buf[z.c:i] + if z.trb { + z.doTrack(i) + } + z.c = i + return + } + i++ + goto LOOP + } + return z.readToFill(in, accept) +} + +func (z *bufioDecReader) readToFill(in []byte, accept *bitset256) (out []byte) { + z.n += uint(len(z.buf)) - z.c + out = append(in, z.buf[z.c:]...) + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n2 int + var err error + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, err = z.r.Read(z.buf) + if n2 == 0 && err != nil { + if err == io.EOF { + return // readTo should read until it matches or end is reached + } + panic(err) + } + z.buf = z.buf[:n2] + for i, token := range z.buf { + if !accept.isset(token) { + return z.readToLoopFn(uint(i), out) + } + } + // for i := 0; i < n2; i++ { + // if !accept.isset(z.buf[i]) { + // return z.readToLoopFn(i, out) + // } + // } + out = append(out, z.buf...) + z.n += uint(n2) + if z.trb { + z.tr = append(z.tr, z.buf...) + } + } +} + +func (z *bufioDecReader) readUntilLoopFn(i uint, out0 []byte) (out []byte) { + z.n += (i - z.c) - 1 + i++ + out = append(out0, z.buf[z.c:i]...) + if z.trb { + // z.tr = append(z.tr, z.buf[z.c:i]...) + z.doTrack(i) + } + z.c = i + return +} + +func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) { + // _, out = z.search(in, nil, stop, 4); return + + // for i := z.c; i < len(z.buf); i++ { + // if z.buf[i] == stop { + // return z.readUntilLoopFn(i, nil) + // } + // } + + i := z.c +LOOP: + if i < uint(len(z.buf)) { + if z.buf[i] == stop { + // inline readUntilLoopFn + // return z.readUntilLoopFn(i, nil) + z.n += (i - z.c) - 1 + i++ + out = z.buf[z.c:i] + if z.trb { + z.doTrack(i) + } + z.c = i + return + } + i++ + goto LOOP + } + return z.readUntilFill(in, stop) +} + +func (z *bufioDecReader) readUntilFill(in []byte, stop byte) (out []byte) { + z.n += uint(len(z.buf)) - z.c + out = append(in, z.buf[z.c:]...) + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n1 int + var n2 uint + var err error + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n1, err = z.r.Read(z.buf) + n2 = uint(n1) + if n2 == 0 && err != nil { + panic(err) + } + z.buf = z.buf[:n2] + for i, token := range z.buf { + if token == stop { + return z.readUntilLoopFn(uint(i), out) + } + } + // for i := 0; i < n2; i++ { + // if z.buf[i] == stop { + // return z.readUntilLoopFn(i, out) + // } + // } + out = append(out, z.buf...) + z.n += n2 + if z.trb { + z.tr = append(z.tr, z.buf...) + } + } +} + +// ------------------------------------ + +var errBytesDecReaderCannotUnread = errors.New("cannot unread last byte read") + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c uint // cursor + t uint // track start + // a int // available +} + +func (z *bytesDecReader) reset(in []byte) { + z.b = in + // z.a = len(in) + z.c = 0 + z.t = 0 +} + +func (z *bytesDecReader) numread() uint { + return z.c +} + +func (z *bytesDecReader) unreadn1() { + if z.c == 0 || len(z.b) == 0 { + panic(errBytesDecReaderCannotUnread) + } + z.c-- + // z.a++ +} + +func (z *bytesDecReader) readx(n uint) (bs []byte) { + // slicing from a non-constant start position is more expensive, + // as more computation is required to decipher the pointer start position. + // However, we do it only once, and it's better than reslicing both z.b and return value. + + // if n <= 0 { + // } else if z.a == 0 { + // panic(io.EOF) + // } else if n > z.a { + // panic(io.ErrUnexpectedEOF) + // } else { + // c0 := z.c + // z.c = c0 + n + // z.a = z.a - n + // bs = z.b[c0:z.c] + // } + // return + + if n != 0 { + z.c += n + if z.c > uint(len(z.b)) { + z.c = uint(len(z.b)) + panic(io.EOF) + } + bs = z.b[z.c-n : z.c] + } + return + + // if n == 0 { + // } else if z.c+n > uint(len(z.b)) { + // z.c = uint(len(z.b)) + // panic(io.EOF) + // } else { + // z.c += n + // bs = z.b[z.c-n : z.c] + // } + // return + + // if n == 0 { + // return + // } + // if z.c == uint(len(z.b)) { + // panic(io.EOF) + // } + // if z.c+n > uint(len(z.b)) { + // panic(io.ErrUnexpectedEOF) + // } + // // z.a -= n + // z.c += n + // return z.b[z.c-n : z.c] +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readx(uint(len(bs)))) +} + +func (z *bytesDecReader) readn1() (v uint8) { + if z.c == uint(len(z.b)) { + panic(io.EOF) + } + v = z.b[z.c] + z.c++ + // z.a-- + return +} + +// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { +// if z.a == 0 { +// eof = true +// return +// } +// v = z.b[z.c] +// z.c++ +// z.a-- +// return +// } + +func (z *bytesDecReader) skip(accept *bitset256) (token byte) { + i := z.c + // if i == len(z.b) { + // goto END + // // panic(io.EOF) + // } + + // Replace loop with goto construct, so that this can be inlined + // for i := z.c; i < blen; i++ { + // if !accept.isset(z.b[i]) { + // token = z.b[i] + // i++ + // z.a -= (i - z.c) + // z.c = i + // return + // } + // } + + // i := z.c +LOOP: + if i < uint(len(z.b)) { + token = z.b[i] + i++ + if accept.isset(token) { + goto LOOP + } + // z.a -= (i - z.c) + z.c = i + return + } + // END: + panic(io.EOF) + // // z.a = 0 + // z.c = blen + // return +} + +func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) { + return z.readToNoInput(accept) +} + +func (z *bytesDecReader) readToNoInput(accept *bitset256) (out []byte) { + i := z.c + if i == uint(len(z.b)) { + panic(io.EOF) + } + + // Replace loop with goto construct, so that this can be inlined + // for i := z.c; i < blen; i++ { + // if !accept.isset(z.b[i]) { + // out = z.b[z.c:i] + // z.a -= (i - z.c) + // z.c = i + // return + // } + // } + // out = z.b[z.c:] + // z.a, z.c = 0, blen + // return + + // i := z.c + // LOOP: + // if i < blen { + // if accept.isset(z.b[i]) { + // i++ + // goto LOOP + // } + // out = z.b[z.c:i] + // z.a -= (i - z.c) + // z.c = i + // return + // } + // out = z.b[z.c:] + // // z.a, z.c = 0, blen + // z.a = 0 + // z.c = blen + // return + + // c := i +LOOP: + if i < uint(len(z.b)) { + if accept.isset(z.b[i]) { + i++ + goto LOOP + } + } + + out = z.b[z.c:i] + // z.a -= (i - z.c) + z.c = i + return // z.b[c:i] + // z.c, i = i, z.c + // return z.b[i:z.c] +} + +func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) { + return z.readUntilNoInput(stop) +} + +func (z *bytesDecReader) readUntilNoInput(stop byte) (out []byte) { + i := z.c + // if i == len(z.b) { + // panic(io.EOF) + // } + + // Replace loop with goto construct, so that this can be inlined + // for i := z.c; i < blen; i++ { + // if z.b[i] == stop { + // i++ + // out = z.b[z.c:i] + // z.a -= (i - z.c) + // z.c = i + // return + // } + // } +LOOP: + if i < uint(len(z.b)) { + if z.b[i] == stop { + i++ + out = z.b[z.c:i] + // z.a -= (i - z.c) + z.c = i + return + } + i++ + goto LOOP + } + // z.a = 0 + // z.c = blen + panic(io.EOF) +} + +func (z *bytesDecReader) track() { + z.t = z.c +} + +func (z *bytesDecReader) stopTrack() (bs []byte) { + return z.b[z.t:z.c] +} + +// ---------------------------------------- + +// func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) { +// d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv)) +// } + +func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), 0, nil) +} + +func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn) +} + +func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(d) +} + +func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs := d.d.DecodeBytes(nil, true) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := rv2i(rv).(jsonUnmarshaler) + // bs := d.d.DecodeBytes(d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) { + d.errorf("no decoding function defined for kind %v", rv.Kind()) +} + +// var kIntfCtr uint64 + +func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { + // nil interface: + // use some hieristics to decode it appropriately + // based on the detected next value in the stream. + n := d.naked() + d.d.DecodeNaked() + if n.v == valueTypeNil { + return + } + // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). + if f.ti.numMeth > 0 { + d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + return + } + // var useRvn bool + switch n.v { + case valueTypeMap: + // if json, default to a map type with string keys + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapIntfIntfTypId { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } else if mtid == mapStrIntfTypId { // for json performance + var v2 map[string]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } else { + if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = reflect.New(d.h.MapType).Elem() + d.decodeValue(rvn, nil, true) + } + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + var v2 []interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + if d.stid == 0 && d.h.PreferArrayOverSlice { + rvn2 := reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem() + reflect.Copy(rvn2, rvn) + rvn = rvn2 + } + } else { + if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = reflect.New(d.h.SliceType).Elem() + d.decodeValue(rvn, nil, true) + } + } + case valueTypeExt: + var v interface{} + tag, bytes := n.u, n.l // calling decode below might taint the values + if bytes == nil { + d.decode(&v) + } + bfn := d.h.getExtForTag(tag) + if bfn == nil { + var re RawExt + re.Tag = tag + re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) + re.Value = v + rvn = reflect.ValueOf(&re).Elem() + } else { + rvnA := reflect.New(bfn.rt) + if bytes != nil { + bfn.ext.ReadExt(rv2i(rvnA), bytes) + } else { + bfn.ext.UpdateExt(rv2i(rvnA), v) + } + rvn = rvnA.Elem() + } + case valueTypeNil: + // no-op + case valueTypeInt: + rvn = n.ri() + case valueTypeUint: + rvn = n.ru() + case valueTypeFloat: + rvn = n.rf() + case valueTypeBool: + rvn = n.rb() + case valueTypeString, valueTypeSymbol: + rvn = n.rs() + case valueTypeBytes: + rvn = n.rl() + case valueTypeTime: + rvn = n.rt() + default: + panicv.errorf("kInterfaceNaked: unexpected valueType: %d", n.v) + } + return +} + +func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) { + // Note: + // A consequence of how kInterface works, is that + // if an interface already contains something, we try + // to decode into what was there before. + // We do not replace with a generic value (as got from decodeNaked). + + // every interface passed here MUST be settable. + var rvn reflect.Value + if rv.IsNil() || d.h.InterfaceReset { + // check if mapping to a type: if so, initialize it and move on + rvn = d.h.intf2impl(f.ti.rtid) + if rvn.IsValid() { + rv.Set(rvn) + } else { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rv.Set(rvn) + } else if d.h.InterfaceReset { + // reset to zero value based on current type in there. + rv.Set(reflect.Zero(rv.Elem().Type())) + } + return + } + } else { + // now we have a non-nil interface value, meaning it contains a type + rvn = rv.Elem() + } + if d.d.TryDecodeAsNil() { + rv.Set(reflect.Zero(rvn.Type())) + return + } + + // Note: interface{} is settable, but underlying type may not be. + // Consequently, we MAY have to create a decodable value out of the underlying value, + // decode into it, and reset the interface itself. + // fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type()) + + rvn2, canDecode := isDecodeable(rvn) + if canDecode { + d.decodeValue(rvn2, nil, true) + return + } + + rvn2 = reflect.New(rvn.Type()).Elem() + rvn2.Set(rvn) + d.decodeValue(rvn2, nil, true) + rv.Set(rvn2) +} + +func decStructFieldKey(dd decDriver, keyType valueType, b *[decScratchByteArrayLen]byte) (rvkencname []byte) { + // use if-else-if, not switch (which compiles to binary-search) + // since keyType is typically valueTypeString, branch prediction is pretty good. + + if keyType == valueTypeString { + rvkencname = dd.DecodeStringAsBytes() + } else if keyType == valueTypeInt { + rvkencname = strconv.AppendInt(b[:0], dd.DecodeInt64(), 10) + } else if keyType == valueTypeUint { + rvkencname = strconv.AppendUint(b[:0], dd.DecodeUint64(), 10) + } else if keyType == valueTypeFloat { + rvkencname = strconv.AppendFloat(b[:0], dd.DecodeFloat64(), 'f', -1, 64) + } else { + rvkencname = dd.DecodeStringAsBytes() + } + return rvkencname +} + +func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + dd := d.d + elemsep := d.esep + sfn := structFieldNode{v: rv, update: true} + ctyp := dd.ContainerType() + var mf MissingFielder + if fti.mf { + mf = rv2i(rv).(MissingFielder) + } else if fti.mfp { + mf = rv2i(rv.Addr()).(MissingFielder) + } + if ctyp == valueTypeMap { + containerLen := dd.ReadMapStart() + if containerLen == 0 { + dd.ReadMapEnd() + return + } + d.depthIncr() + tisfi := fti.sfiSort + hasLen := containerLen >= 0 + + var rvkencname []byte + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadMapElemKey() + } + rvkencname = decStructFieldKey(dd, fti.keyType, &d.b) + if elemsep { + dd.ReadMapElemValue() + } + if k := fti.indexForEncName(rvkencname); k > -1 { + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(sfn.field(si), nil, true) + } + } else if mf != nil { + // store rvkencname in new []byte, as it previously shares Decoder.b, which is used in decode + name2 := rvkencname + rvkencname = make([]byte, len(rvkencname)) + copy(rvkencname, name2) + + var f interface{} + // xdebugf("kStruct: mf != nil: before decode: rvkencname: %s", rvkencname) + d.decode(&f) + // xdebugf("kStruct: mf != nil: after decode: rvkencname: %s", rvkencname) + if !mf.CodecMissingField(rvkencname, f) && d.h.ErrorIfNoField { + d.errorf("no matching struct field found when decoding stream map with key: %s ", + stringView(rvkencname)) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + // keepAlive4StringView(rvkencnameB) // not needed, as reference is outside loop + } + dd.ReadMapEnd() + d.depthDecr() + } else if ctyp == valueTypeArray { + containerLen := dd.ReadArrayStart() + if containerLen == 0 { + dd.ReadArrayEnd() + return + } + d.depthIncr() + // Not much gain from doing it two ways for array. + // Arrays are not used as much for structs. + hasLen := containerLen >= 0 + var checkbreak bool + for j, si := range fti.sfiSrc { + if hasLen && j == containerLen { + break + } + if !hasLen && dd.CheckBreak() { + checkbreak = true + break + } + if elemsep { + dd.ReadArrayElem() + } + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(sfn.field(si), nil, true) + } + } + if (hasLen && containerLen > len(fti.sfiSrc)) || (!hasLen && !checkbreak) { + // read remaining values and throw away + for j := len(fti.sfiSrc); ; j++ { + if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) { + break + } + if elemsep { + dd.ReadArrayElem() + } + d.structFieldNotFound(j, "") + } + } + dd.ReadArrayEnd() + d.depthDecr() + } else { + d.errorstr(errstrOnlyMapOrArrayCanDecodeIntoStruct) + return + } +} + +func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { + // A slice can be set from a map or array in stream. + // This way, the order can be kept (as order is lost with map). + ti := f.ti + if f.seq == seqTypeChan && ti.chandir&uint8(reflect.SendDir) == 0 { + d.errorf("receive-only channel cannot be decoded") + } + dd := d.d + rtelem0 := ti.elem + ctyp := dd.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + // you can only decode bytes or string in the stream into a slice or array of bytes + if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { + d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + if f.seq == seqTypeChan { + bs2 := dd.DecodeBytes(nil, true) + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + } else { + rvbs := rv.Bytes() + bs2 := dd.DecodeBytes(rvbs, false) + // if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { + if !(len(bs2) > 0 && len(bs2) == len(rvbs) && &bs2[0] == &rvbs[0]) { + if rv.CanSet() { + rv.SetBytes(bs2) + } else if len(rvbs) > 0 && len(bs2) > 0 { + copy(rvbs, bs2) + } + } + } + return + } + + // array := f.seq == seqTypeChan + + slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) + + // an array can never return a nil slice. so no need to check f.array here. + if containerLenS == 0 { + if rv.CanSet() { + if f.seq == seqTypeSlice { + if rv.IsNil() { + rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) + } else { + rv.SetLen(0) + } + } else if f.seq == seqTypeChan { + if rv.IsNil() { + rv.Set(reflect.MakeChan(ti.rt, 0)) + } + } + } + slh.End() + return + } + + d.depthIncr() + + rtelem0Size := int(rtelem0.Size()) + rtElem0Kind := rtelem0.Kind() + rtelem0Mut := !isImmutableKind(rtElem0Kind) + rtelem := rtelem0 + rtelemkind := rtelem.Kind() + for rtelemkind == reflect.Ptr { + rtelem = rtelem.Elem() + rtelemkind = rtelem.Kind() + } + + var fn *codecFn + + var rvCanset = rv.CanSet() + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + rvlen := rv.Len() + rvcap := rv.Cap() + hasLen := containerLenS > 0 + if hasLen && f.seq == seqTypeSlice { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size())) + if rvlen <= rvcap { + if rvCanset { + rv.SetLen(rvlen) + } + } else if rvCanset { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvcap = rvlen + rvChanged = true + } else { + d.errorf("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { + reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) + } + } else if containerLenS != rvlen { + rvlen = containerLenS + if rvCanset { + rv.SetLen(rvlen) + } + // else { + // rv = rv.Slice(0, rvlen) + // rvChanged = true + // d.errorf("cannot decode into non-settable slice") + // } + } + } + + // consider creating new element once, and just decoding into it. + var rtelem0Zero reflect.Value + var rtelem0ZeroValid bool + var decodeAsNil bool + var j int + + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() { + if hasLen { + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size) + } else if f.seq == seqTypeSlice { + rvlen = decDefSliceCap + } else { + rvlen = decDefChanCap + } + if rvCanset { + if f.seq == seqTypeSlice { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } else { // chan + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } + } else { + d.errorf("cannot decode into non-settable slice") + } + } + slh.ElemContainerState(j) + decodeAsNil = dd.TryDecodeAsNil() + if f.seq == seqTypeChan { + if decodeAsNil { + rv.Send(reflect.Zero(rtelem0)) + continue + } + if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) { + rv9 = reflect.New(rtelem0).Elem() + } + if fn == nil { + fn = d.h.fn(rtelem, true, true) + } + d.decodeValue(rv9, fn, true) + rv.Send(rv9) + } else { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= rvlen { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, j+1) + decodeIntoBlank = true + } else { // if f.seq == seqTypeSlice + // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // append logic + varargs + var rvcap2 int + var rvErrmsg2 string + rv9, rvcap2, rvChanged, rvErrmsg2 = + expandSliceRV(rv, ti.rt, rvCanset, rtelem0Size, 1, rvlen, rvcap) + if rvErrmsg2 != "" { + d.errorf(rvErrmsg2) + } + rvlen++ + if rvChanged { + rv = rv9 + rvcap = rvcap2 + } + } + } + if decodeIntoBlank { + if !decodeAsNil { + d.swallow() + } + } else { + rv9 = rv.Index(j) + if d.h.SliceElementReset || decodeAsNil { + if !rtelem0ZeroValid { + rtelem0ZeroValid = true + rtelem0Zero = reflect.Zero(rtelem0) + } + rv9.Set(rtelem0Zero) + if decodeAsNil { + continue + } + } + + if fn == nil { + fn = d.h.fn(rtelem, true, true) + } + d.decodeValue(rv9, fn, true) + } + } + } + if f.seq == seqTypeSlice { + if j < rvlen { + if rv.CanSet() { + rv.SetLen(j) + } else if rvCanset { + rv = rv.Slice(0, j) + rvChanged = true + } // else { d.errorf("kSlice: cannot change non-settable slice") } + rvlen = j + } else if j == 0 && rv.IsNil() { + if rvCanset { + rv = reflect.MakeSlice(ti.rt, 0, 0) + rvChanged = true + } // else { d.errorf("kSlice: cannot change non-settable slice") } + } + } + slh.End() + + if rvChanged { // infers rvCanset=true, so it can be reset + rv0.Set(rv) + } + + d.depthDecr() +} + +// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) { +// // d.decodeValueFn(rv.Slice(0, rv.Len())) +// f.kSlice(rv.Slice(0, rv.Len())) +// } + +func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { + dd := d.d + containerLen := dd.ReadMapStart() + elemsep := d.esep + ti := f.ti + if rv.IsNil() { + rvlen := decInferLen(containerLen, d.h.MaxInitLen, int(ti.key.Size()+ti.elem.Size())) + rv.Set(makeMapReflect(ti.rt, rvlen)) + } + + if containerLen == 0 { + dd.ReadMapEnd() + return + } + + d.depthIncr() + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := vtype.Kind() + + var keyFn, valFn *codecFn + var ktypeLo, vtypeLo reflect.Type + + for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + + for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + + var mapGet, mapSet bool + rvvImmut := isImmutableKind(vtypeKind) + if !d.h.MapValueReset { + // if pointer, mapGet = true + // if interface, mapGet = true if !DecodeNakedAlways (else false) + // if builtin, mapGet = false + // else mapGet = true + if vtypeKind == reflect.Ptr { + mapGet = true + } else if vtypeKind == reflect.Interface { + if !d.h.InterfaceReset { + mapGet = true + } + } else if !rvvImmut { + mapGet = true + } + } + + var rvk, rvkp, rvv, rvz reflect.Value + rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk. + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen > 0 + var kstrbs []byte + + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if rvkMut || !rvkp.IsValid() { + rvkp = reflect.New(ktype) + rvk = rvkp.Elem() + } + if elemsep { + dd.ReadMapElemKey() + } + // if false && dd.TryDecodeAsNil() { // nil cannot be a map key, so disregard this block + // // Previously, if a nil key, we just ignored the mapped value and continued. + // // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil} + // // to be an empty map. + // // Instead, we treat a nil key as the zero value of the type. + // rvk.Set(reflect.Zero(ktype)) + // } else if ktypeIsString { + if ktypeIsString { + kstrbs = dd.DecodeStringAsBytes() + rvk.SetString(stringView(kstrbs)) + // NOTE: if doing an insert, you MUST use a real string (not stringview) + } else { + if keyFn == nil { + keyFn = d.h.fn(ktypeLo, true, true) + } + d.decodeValue(rvk, keyFn, true) + } + // special case if a byte array. + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() { + if rvk2.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk2.Bytes())) + } else { + rvk = rvk2 + } + } + } + + if elemsep { + dd.ReadMapElemValue() + } + + // Brittle, but OK per TryDecodeAsNil() contract. + // i.e. TryDecodeAsNil never shares slices with other decDriver procedures + if dd.TryDecodeAsNil() { + if ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if d.h.DeleteOnNilMapValue { + rv.SetMapIndex(rvk, reflect.Value{}) + } else { + rv.SetMapIndex(rvk, reflect.Zero(vtype)) + } + continue + } + + mapSet = true // set to false if u do a get, and its a non-nil pointer + if mapGet { + // mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable. + rvv = rv.MapIndex(rvk) + if !rvv.IsValid() { + rvv = reflect.New(vtype).Elem() + } else if vtypeKind == reflect.Ptr { + if rvv.IsNil() { + rvv = reflect.New(vtype).Elem() + } else { + mapSet = false + } + } else if vtypeKind == reflect.Interface { + // not addressable, and thus not settable. + // e MUST create a settable/addressable variant + rvv2 := reflect.New(rvv.Type()).Elem() + if !rvv.IsNil() { + rvv2.Set(rvv) + } + rvv = rvv2 + } + // else it is ~mutable, and we can just decode into it directly + } else if rvvImmut { + if !rvz.IsValid() { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } else { + rvv = reflect.New(vtype).Elem() + } + + // We MUST be done with the stringview of the key, before decoding the value + // so that we don't bastardize the reused byte array. + if mapSet && ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if valFn == nil { + valFn = d.h.fn(vtypeLo, true, true) + } + d.decodeValue(rvv, valFn, true) + // d.decodeValueFn(rvv, valFn) + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + // if ktypeIsString { + // // keepAlive4StringView(kstrbs) // not needed, as reference is outside loop + // } + } + + dd.ReadMapEnd() + + d.depthDecr() +} + +// decNaked is used to keep track of the primitives decoded. +// Without it, we would have to decode each primitive and wrap it +// in an interface{}, causing an allocation. +// In this model, the primitives are decoded in a "pseudo-atomic" fashion, +// so we can rest assured that no other decoding happens while these +// primitives are being decoded. +// +// maps and arrays are not handled by this mechanism. +// However, RawExt is, and we accommodate for extensions that decode +// RawExt from DecodeNaked, but need to decode the value subsequently. +// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. +// +// However, decNaked also keeps some arrays of default maps and slices +// used in DecodeNaked. This way, we can get a pointer to it +// without causing a new heap allocation. +// +// kInterfaceNaked will ensure that there is no allocation for the common +// uses. + +type decNaked struct { + // r RawExt // used for RawExt, uint, []byte. + + // primitives below + u uint64 + i int64 + f float64 + l []byte + s string + + // ---- cpu cache line boundary? + t time.Time + b bool + + // state + v valueType + _ [6]bool // padding + + // ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above + // + // _ [3]uint64 // padding +} + +// func (n *decNaked) init() { +// n.ru = reflect.ValueOf(&n.u).Elem() +// n.ri = reflect.ValueOf(&n.i).Elem() +// n.rf = reflect.ValueOf(&n.f).Elem() +// n.rl = reflect.ValueOf(&n.l).Elem() +// n.rs = reflect.ValueOf(&n.s).Elem() +// n.rt = reflect.ValueOf(&n.t).Elem() +// n.rb = reflect.ValueOf(&n.b).Elem() +// // n.rr[] = reflect.ValueOf(&n.) +// } + +// type decNakedPooler struct { +// n *decNaked +// nsp *sync.Pool +// } + +// // naked must be called before each call to .DecodeNaked, as they will use it. +// func (d *decNakedPooler) naked() *decNaked { +// if d.n == nil { +// // consider one of: +// // - get from sync.Pool (if GC is frequent, there's no value here) +// // - new alloc (safest. only init'ed if it a naked decode will be done) +// // - field in Decoder (makes the Decoder struct very big) +// // To support using a decoder where a DecodeNaked is not needed, +// // we prefer #1 or #2. +// // d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool +// // d.n.init() +// var v interface{} +// d.nsp, v = pool.decNaked() +// d.n = v.(*decNaked) +// } +// return d.n +// } + +// func (d *decNakedPooler) end() { +// if d.n != nil { +// // if n != nil, then nsp != nil (they are always set together) +// d.nsp.Put(d.n) +// d.n, d.nsp = nil, nil +// } +// } + +// type rtid2rv struct { +// rtid uintptr +// rv reflect.Value +// } + +// -------------- + +type decReaderSwitch struct { + rb bytesDecReader + // ---- cpu cache line boundary? + ri *ioDecReader + bi *bufioDecReader + + mtr, str bool // whether maptype or slicetype are known types + + be bool // is binary encoding + js bool // is json handle + jsms bool // is json handle, and MapKeyAsString + esep bool // has elem separators + + // typ entryType + bytes bool // is bytes reader + bufio bool // is this a bufioDecReader? +} + +// numread, track and stopTrack are always inlined, as they just check int fields, etc. + +/* +func (z *decReaderSwitch) numread() int { + switch z.typ { + case entryTypeBytes: + return z.rb.numread() + case entryTypeIo: + return z.ri.numread() + default: + return z.bi.numread() + } +} +func (z *decReaderSwitch) track() { + switch z.typ { + case entryTypeBytes: + z.rb.track() + case entryTypeIo: + z.ri.track() + default: + z.bi.track() + } +} +func (z *decReaderSwitch) stopTrack() []byte { + switch z.typ { + case entryTypeBytes: + return z.rb.stopTrack() + case entryTypeIo: + return z.ri.stopTrack() + default: + return z.bi.stopTrack() + } +} + +func (z *decReaderSwitch) unreadn1() { + switch z.typ { + case entryTypeBytes: + z.rb.unreadn1() + case entryTypeIo: + z.ri.unreadn1() + default: + z.bi.unreadn1() + } +} +func (z *decReaderSwitch) readx(n int) []byte { + switch z.typ { + case entryTypeBytes: + return z.rb.readx(n) + case entryTypeIo: + return z.ri.readx(n) + default: + return z.bi.readx(n) + } +} +func (z *decReaderSwitch) readb(s []byte) { + switch z.typ { + case entryTypeBytes: + z.rb.readb(s) + case entryTypeIo: + z.ri.readb(s) + default: + z.bi.readb(s) + } +} +func (z *decReaderSwitch) readn1() uint8 { + switch z.typ { + case entryTypeBytes: + return z.rb.readn1() + case entryTypeIo: + return z.ri.readn1() + default: + return z.bi.readn1() + } +} +func (z *decReaderSwitch) skip(accept *bitset256) (token byte) { + switch z.typ { + case entryTypeBytes: + return z.rb.skip(accept) + case entryTypeIo: + return z.ri.skip(accept) + default: + return z.bi.skip(accept) + } +} +func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) { + switch z.typ { + case entryTypeBytes: + return z.rb.readTo(in, accept) + case entryTypeIo: + return z.ri.readTo(in, accept) + default: + return z.bi.readTo(in, accept) + } +} +func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) { + switch z.typ { + case entryTypeBytes: + return z.rb.readUntil(in, stop) + case entryTypeIo: + return z.ri.readUntil(in, stop) + default: + return z.bi.readUntil(in, stop) + } +} + +*/ + +// the if/else-if/else block is expensive to inline. +// Each node of this construct costs a lot and dominates the budget. +// Best to only do an if fast-path else block (so fast-path is inlined). +// This is irrespective of inlineExtraCallCost set in $GOROOT/src/cmd/compile/internal/gc/inl.go +// +// In decReaderSwitch methods below, we delegate all IO functions into their own methods. +// This allows for the inlining of the common path when z.bytes=true. +// Go 1.12+ supports inlining methods with up to 1 inlined function (or 2 if no other constructs). + +func (z *decReaderSwitch) numread() uint { + if z.bytes { + return z.rb.numread() + } else if z.bufio { + return z.bi.numread() + } else { + return z.ri.numread() + } +} +func (z *decReaderSwitch) track() { + if z.bytes { + z.rb.track() + } else if z.bufio { + z.bi.track() + } else { + z.ri.track() + } +} +func (z *decReaderSwitch) stopTrack() []byte { + if z.bytes { + return z.rb.stopTrack() + } else if z.bufio { + return z.bi.stopTrack() + } else { + return z.ri.stopTrack() + } +} + +// func (z *decReaderSwitch) unreadn1() { +// if z.bytes { +// z.rb.unreadn1() +// } else { +// z.unreadn1IO() +// } +// } +// func (z *decReaderSwitch) unreadn1IO() { +// if z.bufio { +// z.bi.unreadn1() +// } else { +// z.ri.unreadn1() +// } +// } + +func (z *decReaderSwitch) unreadn1() { + if z.bytes { + z.rb.unreadn1() + } else if z.bufio { + z.bi.unreadn1() + } else { + z.ri.unreadn1() // not inlined + } +} + +func (z *decReaderSwitch) readx(n uint) []byte { + if z.bytes { + return z.rb.readx(n) + } + return z.readxIO(n) +} +func (z *decReaderSwitch) readxIO(n uint) []byte { + if z.bufio { + return z.bi.readx(n) + } + return z.ri.readx(n) +} + +func (z *decReaderSwitch) readb(s []byte) { + if z.bytes { + z.rb.readb(s) + } else { + z.readbIO(s) + } +} + +//go:noinline - fallback for io, ensures z.bytes path is inlined +func (z *decReaderSwitch) readbIO(s []byte) { + if z.bufio { + z.bi.readb(s) + } else { + z.ri.readb(s) + } +} + +func (z *decReaderSwitch) readn1() uint8 { + if z.bytes { + return z.rb.readn1() + } + return z.readn1IO() +} +func (z *decReaderSwitch) readn1IO() uint8 { + if z.bufio { + return z.bi.readn1() + } + return z.ri.readn1() +} + +func (z *decReaderSwitch) skip(accept *bitset256) (token byte) { + if z.bytes { + return z.rb.skip(accept) + } + return z.skipIO(accept) +} +func (z *decReaderSwitch) skipIO(accept *bitset256) (token byte) { + if z.bufio { + return z.bi.skip(accept) + } + return z.ri.skip(accept) +} + +func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) { + if z.bytes { + return z.rb.readToNoInput(accept) // z.rb.readTo(in, accept) + } + return z.readToIO(in, accept) +} + +//go:noinline - fallback for io, ensures z.bytes path is inlined +func (z *decReaderSwitch) readToIO(in []byte, accept *bitset256) (out []byte) { + if z.bufio { + return z.bi.readTo(in, accept) + } + return z.ri.readTo(in, accept) +} +func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) { + if z.bytes { + return z.rb.readUntilNoInput(stop) + } + return z.readUntilIO(in, stop) +} + +func (z *decReaderSwitch) readUntilIO(in []byte, stop byte) (out []byte) { + if z.bufio { + return z.bi.readUntil(in, stop) + } + return z.ri.readUntil(in, stop) +} + +// Decoder reads and decodes an object from an input stream in a supported format. +// +// Decoder is NOT safe for concurrent use i.e. a Decoder cannot be used +// concurrently in multiple goroutines. +// +// However, as Decoder could be allocation heavy to initialize, a Reset method is provided +// so its state can be reused to decode new input streams repeatedly. +// This is the idiomatic way to use. +type Decoder struct { + panicHdl + // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. + // Try to put things that go together to fit within a cache line (8 words). + + d decDriver + + // NOTE: Decoder shouldn't call it's read methods, + // as the handler MAY need to do some coordination. + r *decReaderSwitch + + // bi *bufioDecReader + // cache the mapTypeId and sliceTypeId for faster comparisons + mtid uintptr + stid uintptr + + hh Handle + h *BasicHandle + + // ---- cpu cache line boundary? + decReaderSwitch + + // ---- cpu cache line boundary? + n decNaked + + // cr containerStateRecv + err error + + depth int16 + maxdepth int16 + + _ [4]uint8 // padding + + is map[string]string // used for interning strings + + // ---- cpu cache line boundary? + b [decScratchByteArrayLen]byte // scratch buffer, used by Decoder and xxxEncDrivers + + // padding - false sharing help // modify 232 if Decoder struct changes. + // _ [cacheLineSize - 232%cacheLineSize]byte +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to configure ReaderBufferSize on the handle +// OR pass in a memory buffered reader (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + d := newDecoder(h) + d.Reset(r) + return d +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + d := newDecoder(h) + d.ResetBytes(in) + return d +} + +// var defaultDecNaked decNaked + +func newDecoder(h Handle) *Decoder { + d := &Decoder{h: basicHandle(h), err: errDecoderNotInitialized} + d.bytes = true + if useFinalizers { + runtime.SetFinalizer(d, (*Decoder).finalize) + // xdebugf(">>>> new(Decoder) with finalizer") + } + d.r = &d.decReaderSwitch + d.hh = h + d.be = h.isBinary() + // NOTE: do not initialize d.n here. It is lazily initialized in d.naked() + var jh *JsonHandle + jh, d.js = h.(*JsonHandle) + if d.js { + d.jsms = jh.MapKeyAsString + } + d.esep = d.hh.hasElemSeparators() + if d.h.InternString { + d.is = make(map[string]string, 32) + } + d.d = h.newDecDriver(d) + // d.cr, _ = d.d.(containerStateRecv) + return d +} + +func (d *Decoder) resetCommon() { + // d.r = &d.decReaderSwitch + d.d.reset() + d.err = nil + d.depth = 0 + d.maxdepth = d.h.MaxDepth + if d.maxdepth <= 0 { + d.maxdepth = decDefMaxDepth + } + // reset all things which were cached from the Handle, but could change + d.mtid, d.stid = 0, 0 + d.mtr, d.str = false, false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + d.mtr = fastpathAV.index(d.mtid) != -1 + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + d.str = fastpathAV.index(d.stid) != -1 + } +} + +// Reset the Decoder with a new Reader to decode from, +// clearing all state from last run(s). +func (d *Decoder) Reset(r io.Reader) { + if r == nil { + return + } + d.bytes = false + // d.typ = entryTypeUnset + if d.h.ReaderBufferSize > 0 { + if d.bi == nil { + d.bi = new(bufioDecReader) + } + d.bi.reset(r, d.h.ReaderBufferSize) + // d.r = d.bi + // d.typ = entryTypeBufio + d.bufio = true + } else { + // d.ri.x = &d.b + // d.s = d.sa[:0] + if d.ri == nil { + d.ri = new(ioDecReader) + } + d.ri.reset(r) + // d.r = d.ri + // d.typ = entryTypeIo + d.bufio = false + } + d.resetCommon() +} + +// ResetBytes resets the Decoder with a new []byte to decode from, +// clearing all state from last run(s). +func (d *Decoder) ResetBytes(in []byte) { + if in == nil { + return + } + d.bytes = true + d.bufio = false + // d.typ = entryTypeBytes + d.rb.reset(in) + // d.r = &d.rb + d.resetCommon() +} + +func (d *Decoder) naked() *decNaked { + return &d.n +} + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container i.e. the values +// in these containers will not be zero'ed before decoding. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// This in-place update maintains consistency in the decoding philosophy (i.e. we ALWAYS update +// in place by default). However, the consequence of this is that values in slices or maps +// which are not zero'ed before hand, will have part of the prior values in place after decode +// if the stream doesn't contain an update for those parts. +// +// This in-place update can be disabled by configuring the MapValueReset and SliceElementReset +// decode options available on every handle. +// +// Furthermore, when decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +// Note: we allow nil values in the stream anywhere except for map keys. +// A nil value in the encoded stream where a map key is expected is treated as an error. +func (d *Decoder) Decode(v interface{}) (err error) { + // tried to use closure, as runtime optimizes defer with no params. + // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc). + // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139 + // defer func() { d.deferred(&err) }() + // { x, y := d, &err; defer func() { x.deferred(y) }() } + if d.err != nil { + return d.err + } + if recoverPanicToErr { + defer func() { + if x := recover(); x != nil { + panicValToErr(d, x, &d.err) + err = d.err + } + }() + } + + // defer d.deferred(&err) + d.mustDecode(v) + return +} + +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) MustDecode(v interface{}) { + if d.err != nil { + panic(d.err) + } + d.mustDecode(v) +} + +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) mustDecode(v interface{}) { + // TODO: Top-level: ensure that v is a pointer and not nil. + if d.d.TryDecodeAsNil() { + setZero(v) + return + } + if d.bi == nil { + d.decode(v) + return + } + + d.bi.calls++ + d.decode(v) + // xprintf.(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn) + d.bi.calls-- + if !d.h.ExplicitRelease && d.bi.calls == 0 { + d.bi.release() + } +} + +// func (d *Decoder) deferred(err1 *error) { +// if recoverPanicToErr { +// if x := recover(); x != nil { +// panicValToErr(d, x, err1) +// panicValToErr(d, x, &d.err) +// } +// } +// } + +//go:noinline -- as it is run by finalizer +func (d *Decoder) finalize() { + // xdebugf("finalizing Decoder") + d.Release() +} + +// Release releases shared (pooled) resources. +// +// It is important to call Release() when done with a Decoder, so those resources +// are released instantly for use by subsequently created Decoders. +// +// By default, Release() is automatically called unless the option ExplicitRelease is set. +func (d *Decoder) Release() { + if d.bi != nil { + d.bi.release() + } + // d.decNakedPooler.end() +} + +// // this is not a smart swallow, as it allocates objects and does unnecessary work. +// func (d *Decoder) swallowViaHammer() { +// var blank interface{} +// d.decodeValueNoFn(reflect.ValueOf(&blank).Elem()) +// } + +func (d *Decoder) swallow() { + // smarter decode that just swallows the content + dd := d.d + if dd.TryDecodeAsNil() { + return + } + elemsep := d.esep + switch dd.ContainerType() { + case valueTypeMap: + containerLen := dd.ReadMapStart() + d.depthIncr() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + // if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break} + if elemsep { + dd.ReadMapElemKey() + } + d.swallow() + if elemsep { + dd.ReadMapElemValue() + } + d.swallow() + } + dd.ReadMapEnd() + d.depthDecr() + case valueTypeArray: + containerLen := dd.ReadArrayStart() + d.depthIncr() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadArrayElem() + } + d.swallow() + } + dd.ReadArrayEnd() + d.depthDecr() + case valueTypeBytes: + dd.DecodeBytes(d.b[:], true) + case valueTypeString: + dd.DecodeStringAsBytes() + default: + // these are all primitives, which we can get from decodeNaked + // if RawExt using Value, complete the processing. + n := d.naked() + dd.DecodeNaked() + if n.v == valueTypeExt && n.l == nil { + var v2 interface{} + d.decode(&v2) + } + } +} + +func setZero(iv interface{}) { + if iv == nil || definitelyNil(iv) { + return + } + var canDecode bool + switch v := iv.(type) { + case *string: + *v = "" + case *bool: + *v = false + case *int: + *v = 0 + case *int8: + *v = 0 + case *int16: + *v = 0 + case *int32: + *v = 0 + case *int64: + *v = 0 + case *uint: + *v = 0 + case *uint8: + *v = 0 + case *uint16: + *v = 0 + case *uint32: + *v = 0 + case *uint64: + *v = 0 + case *float32: + *v = 0 + case *float64: + *v = 0 + case *[]uint8: + *v = nil + case *Raw: + *v = nil + case *time.Time: + *v = time.Time{} + case reflect.Value: + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + default: + if !fastpathDecodeSetZeroTypeSwitch(iv) { + v := reflect.ValueOf(iv) + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + } + } +} + +func (d *Decoder) decode(iv interface{}) { + // a switch with only concrete types can be optimized. + // consequently, we deal with nil and interfaces outside the switch. + + if iv == nil { + d.errorstr(errstrCannotDecodeIntoNil) + return + } + + switch v := iv.(type) { + // case nil: + // case Selfer: + case reflect.Value: + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, true) + + case *string: + *v = d.d.DecodeString() + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *float32: + f64 := d.d.DecodeFloat64() + if chkOvf.Float32(f64) { + d.errorf("float32 overflow: %v", f64) + } + *v = float32(f64) + case *float64: + *v = d.d.DecodeFloat64() + case *[]uint8: + *v = d.d.DecodeBytes(*v, false) + case []uint8: + b := d.d.DecodeBytes(v, false) + if !(len(b) > 0 && len(b) == len(v) && &b[0] == &v[0]) { + copy(v, b) + } + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem(), nil, true) + // d.decodeValueNotNil(reflect.ValueOf(iv).Elem()) + + default: + if v, ok := iv.(Selfer); ok { + v.CodecDecodeSelf(d) + } else if !fastpathDecodeTypeSwitch(iv, d) { + v := reflect.ValueOf(iv) + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, false) + // d.decodeValueFallback(v) + } + } +} + +func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, chkAll bool) { + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + var rvp reflect.Value + var rvpValid bool + if rv.Kind() == reflect.Ptr { + rvpValid = true + for { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + if rv.Kind() != reflect.Ptr { + break + } + } + } + + if fn == nil { + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = d.h.fn(rv.Type(), chkAll, true) // chkAll, chkAll) + } + if fn.i.addrD { + if rvpValid { + fn.fd(d, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fd(d, &fn.i, rv.Addr()) + } else if !fn.i.addrF { + fn.fd(d, &fn.i, rv) + } else { + d.errorf("cannot decode into a non-pointer value") + } + } else { + fn.fd(d, &fn.i, rv) + } + // return rv +} + +func (d *Decoder) structFieldNotFound(index int, rvkencname string) { + // NOTE: rvkencname may be a stringView, so don't pass it to another function. + if d.h.ErrorIfNoField { + if index >= 0 { + d.errorf("no matching struct field found when decoding stream array at index %v", index) + return + } else if rvkencname != "" { + d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) + return + } + } + d.swallow() +} + +func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { + if d.h.ErrorIfNoArrayExpand { + d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) + } +} + +func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) { + switch rv.Kind() { + case reflect.Array: + return rv, rv.CanAddr() + case reflect.Ptr: + if !rv.IsNil() { + return rv.Elem(), true + } + case reflect.Slice, reflect.Chan, reflect.Map: + if !rv.IsNil() { + return rv, true + } + } + return +} + +func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) { + // decode can take any reflect.Value that is a inherently addressable i.e. + // - array + // - non-nil chan (we will SEND to it) + // - non-nil slice (we will set its elements) + // - non-nil map (we will put into it) + // - non-nil pointer (we can "update" it) + rv2, canDecode := isDecodeable(rv) + if canDecode { + return + } + if !rv.IsValid() { + d.errorstr(errstrCannotDecodeIntoNil) + return + } + if !rv.CanInterface() { + d.errorf("cannot decode into a value without an interface: %v", rv) + return + } + rvi := rv2i(rv) + rvk := rv.Kind() + d.errorf("cannot decode into value of kind: %v, type: %T, %v", rvk, rvi, rvi) + return +} + +func (d *Decoder) depthIncr() { + d.depth++ + if d.depth >= d.maxdepth { + panic(errMaxDepthExceeded) + } +} + +func (d *Decoder) depthDecr() { + d.depth-- +} + +// Possibly get an interned version of a string +// +// This should mostly be used for map keys, where the key type is string. +// This is because keys of a map/struct are typically reused across many objects. +func (d *Decoder) string(v []byte) (s string) { + if d.is == nil { + return string(v) // don't return stringView, as we need a real string here. + } + s, ok := d.is[string(v)] // no allocation here, per go implementation + if !ok { + s = string(v) // new allocation here + d.is[s] = s + } + return s +} + +// nextValueBytes returns the next value in the stream as a set of bytes. +func (d *Decoder) nextValueBytes() (bs []byte) { + d.d.uncacheRead() + d.r.track() + d.swallow() + bs = d.r.stopTrack() + return +} + +func (d *Decoder) rawBytes() []byte { + // ensure that this is not a view into the bytes + // i.e. make new copy always. + bs := d.nextValueBytes() + bs2 := make([]byte, len(bs)) + copy(bs2, bs) + return bs2 +} + +func (d *Decoder) wrapErr(v interface{}, err *error) { + *err = decodeError{codecError: codecError{name: d.hh.Name(), err: v}, pos: int(d.r.numread())} +} + +// NumBytesRead returns the number of bytes read +func (d *Decoder) NumBytesRead() int { + return int(d.r.numread()) +} + +// -------------------------------------------------- + +// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. +// A slice can be set from a map or array in stream. This supports the MapBySlice interface. +type decSliceHelper struct { + d *Decoder + // ct valueType + array bool +} + +func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { + dd := d.d + ctyp := dd.ContainerType() + switch ctyp { + case valueTypeArray: + x.array = true + clen = dd.ReadArrayStart() + case valueTypeMap: + clen = dd.ReadMapStart() * 2 + default: + d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) + } + // x.ct = ctyp + x.d = d + return +} + +func (x decSliceHelper) End() { + if x.array { + x.d.d.ReadArrayEnd() + } else { + x.d.d.ReadMapEnd() + } +} + +func (x decSliceHelper) ElemContainerState(index int) { + if x.array { + x.d.d.ReadArrayElem() + } else if index%2 == 0 { + x.d.d.ReadMapElemKey() + } else { + x.d.d.ReadMapElemValue() + } +} + +func decByteSlice(r *decReaderSwitch, clen, maxInitLen int, bs []byte) (bsOut []byte) { + if clen == 0 { + return zeroByteSlice + } + if len(bs) == clen { + bsOut = bs + r.readb(bsOut) + } else if cap(bs) >= clen { + bsOut = bs[:clen] + r.readb(bsOut) + } else { + // bsOut = make([]byte, clen) + len2 := decInferLen(clen, maxInitLen, 1) + bsOut = make([]byte, len2) + r.readb(bsOut) + for len2 < clen { + len3 := decInferLen(clen-len2, maxInitLen, 1) + bs3 := bsOut + bsOut = make([]byte, len2+len3) + copy(bsOut, bs3) + r.readb(bsOut[len2:]) + len2 += len3 + } + } + return +} + +// func decByteSliceZeroCopy(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { +// if _, ok := r.(*bytesDecReader); ok && clen <= maxInitLen { +// return r.readx(clen) +// } +// return decByteSlice(r, clen, maxInitLen, bs) +// } + +func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { + if xlen := len(in); xlen > 0 { + if isBytesReader || xlen <= scratchByteArrayLen { + if cap(dest) >= xlen { + out = dest[:xlen] + } else { + out = make([]byte, xlen) + } + copy(out, in) + return + } + } + return in +} + +// decInferLen will infer a sensible length, given the following: +// - clen: length wanted. +// - maxlen: max length to be returned. +// if <= 0, it is unset, and we infer it based on the unit size +// - unit: number of bytes for each element of the collection +func decInferLen(clen, maxlen, unit int) (rvlen int) { + // handle when maxlen is not set i.e. <= 0 + if clen <= 0 { + return + } + if unit == 0 { + return clen + } + if maxlen <= 0 { + // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. + // maxlen = 256 * 1024 / unit + // if maxlen < (4 * 1024) { + // maxlen = 4 * 1024 + // } + if unit < (256 / 4) { + maxlen = 256 * 1024 / unit + } else { + maxlen = 4 * 1024 + } + } + if clen > maxlen { + rvlen = maxlen + } else { + rvlen = clen + } + return +} + +func expandSliceRV(s reflect.Value, st reflect.Type, canChange bool, stElemSize, num, slen, scap int) ( + s2 reflect.Value, scap2 int, changed bool, err string) { + l1 := slen + num // new slice length + if l1 < slen { + err = errmsgExpandSliceOverflow + return + } + if l1 <= scap { + if s.CanSet() { + s.SetLen(l1) + } else if canChange { + s2 = s.Slice(0, l1) + scap2 = scap + changed = true + } else { + err = errmsgExpandSliceCannotChange + return + } + return + } + if !canChange { + err = errmsgExpandSliceCannotChange + return + } + scap2 = growCap(scap, stElemSize, num) + s2 = reflect.MakeSlice(st, l1, scap2) + changed = true + reflect.Copy(s2, s) + return +} + +func decReadFull(r io.Reader, bs []byte) (n uint, err error) { + var nn int + for n < uint(len(bs)) && err == nil { + nn, err = r.Read(bs[n:]) + if nn > 0 { + if err == io.EOF { + // leave EOF for next time + err = nil + } + n += uint(nn) + } + } + // xdebugf("decReadFull: len(bs): %v, n: %v, err: %v", len(bs), n, err) + // do not do this - it serves no purpose + // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF } + return +} + +func decNakedReadRawBytes(dr decDriver, d *Decoder, n *decNaked, rawToString bool) { + if rawToString { + n.v = valueTypeString + n.s = string(dr.DecodeBytes(d.b[:], true)) + } else { + n.v = valueTypeBytes + n.l = dr.DecodeBytes(nil, false) + } +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/doc.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/doc.go new file mode 100644 index 0000000000000..deac00010c2bf --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/doc.go @@ -0,0 +1,233 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +Package codec provides a High Performance, Feature-Rich Idiomatic +codec/encoding library for msgpack, json. + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - json: http://json.org http://tools.ietf.org/html/rfc7159 + +For detailed usage information, read the primer at +http://ugorji.net/blog/go-codec-primer . + +The idiomatic Go support is as seen in other encoding packages in the +standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Excellent code coverage ( > 90% ) + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Lock-free (sans mutex) concurrency for scaling to 100's of cores + - In-place updates during decode, with option to zero value in maps and slices prior to decode + - Coerce types where appropriate + e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Support IsZero() bool to determine if a value is a zero value. + Analogous to time.Time.IsZero() bool. + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Mapping a non-interface type to an interface, so we can decode appropriately + into any interface type with a correctly configured non-interface value. + - Encode a struct as an array, and decode struct from an array in the data stream + - Option to encode struct keys as numbers (instead of strings) + (to support structured streams with fields encoded as numeric codes) + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + + +## Extension Support + +Users can register a function to handle the encoding or decoding of their +custom types. + +There are no restrictions on what the custom type can be. Some examples: + +```go + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } +``` + +As an illustration, MyStructWithUnexportedFields would normally be encoded +as an empty map because it has no exported fields, while UUID would be +encoded as a string. However, with extension support, you can encode any of +these however you like. + + +## Custom Encoding and Decoding + +This package maintains symmetry in the encoding and decoding halfs. We +determine how to encode or decode by walking this decision tree + + - is type a codec.Selfer? + - is there an extension registered for the type? + - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler? + - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler? + - is format text-based, and type an encoding.TextMarshaler and TextUnmarshaler? + - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc + +This symmetry is important to reduce chances of issues happening because the +encoding and decoding sides are out of sync e.g. decoded via very specific +encoding.TextUnmarshaler but encoded via kind-specific generalized mode. + +Consequently, if a type only defines one-half of the symmetry (e.g. it +implements UnmarshalJSON() but not MarshalJSON() ), then that type doesn't +satisfy the check and we will continue walking down the decision tree. + + +## RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used with +the standard net/rpc package. + + +## Usage + +The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent +modification. + +The Encoder and Decoder are NOT safe for concurrent use. + +Consequently, the usage model is basically: + + - Create and initialize the Handle before any use. + Once created, DO NOT modify it. + - Multiple Encoders or Decoders can now use the Handle concurrently. + They only read information off the Handle (never write). + - However, each Encoder or Decoder MUST not be used concurrently + - To re-use an Encoder/Decoder, call Reset(...) on it first. + This allows you use state maintained on the Encoder/Decoder. + +Sample usage model: + +```go + // create and configure Handle + var ( + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &mh + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) +``` + + +## Running Tests + +To run tests, use the following: + +``` + go test +``` + +To run the full suite of tests, use the following: + +``` + go test -tags alltests -run Suite +``` + +You can run the tag 'safe' to run tests or build in safe mode. e.g. + +``` + go test -tags safe -run Json + go test -tags "alltests safe" -run Suite +``` + +## Running Benchmarks + +``` + cd codec/bench + ./bench.sh -d + ./bench.sh -c + ./bench.sh -s + go test -bench . -benchmem -benchtime 1s +``` + +Please see http://github.com/hashicorp/go-codec-bench . + + +## Caveats + +Struct fields matching the following are ignored during encoding and +decoding + + - struct tag value set to - + - func, complex numbers, unsafe pointers + - unexported and not embedded + - unexported and embedded and not struct kind + - unexported and embedded pointers + +Every other field in a struct will be encoded/decoded. + +Embedded fields are encoded as if they exist in the top-level struct, with +some caveats. See Encode documentation. +*/ +package codec diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/encode.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/encode.go new file mode 100644 index 0000000000000..29c723e13e5f6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/encode.go @@ -0,0 +1,1812 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "errors" + "fmt" + "io" + "reflect" + "runtime" + "sort" + "strconv" + "time" +) + +// defEncByteBufSize is the default size of []byte used +// for bufio buffer or []byte (when nil passed) +const defEncByteBufSize = 1 << 10 // 4:16, 6:64, 8:256, 10:1024 + +var errEncoderNotInitialized = errors.New("Encoder not initialized") + +/* + +// encWriter abstracts writing to a byte array or to an io.Writer. +// +// +// Deprecated: Use encWriterSwitch instead. +type encWriter interface { + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + end() +} + +*/ + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + EncodeNil() + EncodeInt(i int64) + EncodeUint(i uint64) + EncodeBool(b bool) + EncodeFloat32(f float32) + EncodeFloat64(f float64) + // encodeExtPreamble(xtag byte, length int) + EncodeRawExt(re *RawExt, e *Encoder) + EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) + // Deprecated: use EncodeStringEnc instead + EncodeString(c charEncoding, v string) + // Deprecated: use EncodeStringBytesRaw instead + EncodeStringBytes(c charEncoding, v []byte) + EncodeStringEnc(c charEncoding, v string) // c cannot be cRAW + // EncodeSymbol(v string) + EncodeStringBytesRaw(v []byte) + EncodeTime(time.Time) + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) + WriteArrayStart(length int) + WriteArrayElem() + WriteArrayEnd() + WriteMapStart(length int) + WriteMapElemKey() + WriteMapElemValue() + WriteMapEnd() + + reset() + atEndOfEncode() +} + +type encDriverAsis interface { + EncodeAsis(v []byte) +} + +type encodeError struct { + codecError +} + +func (e encodeError) Error() string { + return fmt.Sprintf("%s encode error: %v", e.name, e.err) +} + +type encDriverNoopContainerWriter struct{} + +func (encDriverNoopContainerWriter) WriteArrayStart(length int) {} +func (encDriverNoopContainerWriter) WriteArrayElem() {} +func (encDriverNoopContainerWriter) WriteArrayEnd() {} +func (encDriverNoopContainerWriter) WriteMapStart(length int) {} +func (encDriverNoopContainerWriter) WriteMapElemKey() {} +func (encDriverNoopContainerWriter) WriteMapElemValue() {} +func (encDriverNoopContainerWriter) WriteMapEnd() {} +func (encDriverNoopContainerWriter) atEndOfEncode() {} + +type encDriverTrackContainerWriter struct { + c containerState +} + +func (e *encDriverTrackContainerWriter) WriteArrayStart(length int) { e.c = containerArrayStart } +func (e *encDriverTrackContainerWriter) WriteArrayElem() { e.c = containerArrayElem } +func (e *encDriverTrackContainerWriter) WriteArrayEnd() { e.c = containerArrayEnd } +func (e *encDriverTrackContainerWriter) WriteMapStart(length int) { e.c = containerMapStart } +func (e *encDriverTrackContainerWriter) WriteMapElemKey() { e.c = containerMapKey } +func (e *encDriverTrackContainerWriter) WriteMapElemValue() { e.c = containerMapValue } +func (e *encDriverTrackContainerWriter) WriteMapEnd() { e.c = containerMapEnd } +func (e *encDriverTrackContainerWriter) atEndOfEncode() {} + +// type ioEncWriterWriter interface { +// WriteByte(c byte) error +// WriteString(s string) (n int, err error) +// Write(p []byte) (n int, err error) +// } + +// EncodeOptions captures configuration options during encode. +type EncodeOptions struct { + // WriterBufferSize is the size of the buffer used when writing. + // + // if > 0, we use a smart buffer internally for performance purposes. + WriterBufferSize int + + // ChanRecvTimeout is the timeout used when selecting from a chan. + // + // Configuring this controls how we receive from a chan during the encoding process. + // - If ==0, we only consume the elements currently available in the chan. + // - if <0, we consume until the chan is closed. + // - If >0, we consume until this timeout. + ChanRecvTimeout time.Duration + + // StructToArray specifies to encode a struct as an array, and not as a map + StructToArray bool + + // Canonical representation means that encoding a value will always result in the same + // sequence of bytes. + // + // This only affects maps, as the iteration order for maps is random. + // + // The implementation MAY use the natural sort order for the map keys if possible: + // + // - If there is a natural sort order (ie for number, bool, string or []byte keys), + // then the map keys are first sorted in natural order and then written + // with corresponding map values to the strema. + // - If there is no natural sort order, then the map keys will first be + // encoded into []byte, and then sorted, + // before writing the sorted keys and the corresponding map values to the stream. + // + Canonical bool + + // CheckCircularRef controls whether we check for circular references + // and error fast during an encode. + // + // If enabled, an error is received if a pointer to a struct + // references itself either directly or through one of its fields (iteratively). + // + // This is opt-in, as there may be a performance hit to checking circular references. + CheckCircularRef bool + + // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers + // when checking if a value is empty. + // + // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. + RecursiveEmptyCheck bool + + // Raw controls whether we encode Raw values. + // This is a "dangerous" option and must be explicitly set. + // If set, we blindly encode Raw values as-is, without checking + // if they are a correct representation of a value in that format. + // If unset, we error out. + Raw bool + + // StringToRaw controls how strings are encoded. + // + // As a go string is just an (immutable) sequence of bytes, + // it can be encoded either as raw bytes or as a UTF string. + // + // By default, strings are encoded as UTF-8. + // but can be treated as []byte during an encode. + // + // Note that things which we know (by definition) to be UTF-8 + // are ALWAYS encoded as UTF-8 strings. + // These include encoding.TextMarshaler, time.Format calls, struct field names, etc. + StringToRaw bool + + // // AsSymbols defines what should be encoded as symbols. + // // + // // Encoding as symbols can reduce the encoded size significantly. + // // + // // However, during decoding, each string to be encoded as a symbol must + // // be checked to see if it has been seen before. Consequently, encoding time + // // will increase if using symbols, because string comparisons has a clear cost. + // // + // // Sample values: + // // AsSymbolNone + // // AsSymbolAll + // // AsSymbolMapStringKeys + // // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + // AsSymbols AsSymbolFlag +} + +// --------------------------------------------- + +/* + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w io.Writer + ww io.Writer + bw io.ByteWriter + sw ioEncStringWriter + fw ioFlusher + b [8]byte +} + +func (z *ioEncWriter) reset(w io.Writer) { + z.w = w + var ok bool + if z.bw, ok = w.(io.ByteWriter); !ok { + z.bw = z + } + if z.sw, ok = w.(ioEncStringWriter); !ok { + z.sw = z + } + z.fw, _ = w.(ioFlusher) + z.ww = w +} + +func (z *ioEncWriter) WriteByte(b byte) (err error) { + z.b[0] = b + _, err = z.w.Write(z.b[:1]) + return +} + +func (z *ioEncWriter) WriteString(s string) (n int, err error) { + return z.w.Write(bytesView(s)) +} + +func (z *ioEncWriter) writeb(bs []byte) { + if _, err := z.ww.Write(bs); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writestr(s string) { + if _, err := z.sw.WriteString(s); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.bw.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1, b2 byte) { + var err error + if err = z.bw.WriteByte(b1); err == nil { + if err = z.bw.WriteByte(b2); err == nil { + return + } + } + panic(err) +} + +// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) { +// z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5 +// if _, err := z.ww.Write(z.b[:5]); err != nil { +// panic(err) +// } +// } + +//go:noinline - so *encWriterSwitch.XXX has the bytesEncAppender.XXX inlined +func (z *ioEncWriter) end() { + if z.fw != nil { + if err := z.fw.Flush(); err != nil { + panic(err) + } + } +} + +*/ + +// --------------------------------------------- + +// bufioEncWriter +type bufioEncWriter struct { + buf []byte + w io.Writer + n int + sz int // buf size + + // Extensions can call Encode() within a current Encode() call. + // We need to know when the top level Encode() call returns, + // so we can decide whether to Release() or not. + calls uint16 // what depth in mustDecode are we in now. + + _ [6]uint8 // padding + + bytesBufPooler + + _ [1]uint64 // padding + // a int + // b [4]byte + // err +} + +func (z *bufioEncWriter) reset(w io.Writer, bufsize int) { + z.w = w + z.n = 0 + z.calls = 0 + if bufsize <= 0 { + bufsize = defEncByteBufSize + } + z.sz = bufsize + if cap(z.buf) >= bufsize { + z.buf = z.buf[:cap(z.buf)] + } else { + z.buf = z.bytesBufPooler.get(bufsize) + // z.buf = make([]byte, bufsize) + } +} + +func (z *bufioEncWriter) release() { + z.buf = nil + z.bytesBufPooler.end() +} + +//go:noinline - flush only called intermittently +func (z *bufioEncWriter) flushErr() (err error) { + n, err := z.w.Write(z.buf[:z.n]) + z.n -= n + if z.n > 0 && err == nil { + err = io.ErrShortWrite + } + if n > 0 && z.n > 0 { + copy(z.buf, z.buf[n:z.n+n]) + } + return err +} + +func (z *bufioEncWriter) flush() { + if err := z.flushErr(); err != nil { + panic(err) + } +} + +func (z *bufioEncWriter) writeb(s []byte) { +LOOP: + a := len(z.buf) - z.n + if len(s) > a { + z.n += copy(z.buf[z.n:], s[:a]) + s = s[a:] + z.flush() + goto LOOP + } + z.n += copy(z.buf[z.n:], s) +} + +func (z *bufioEncWriter) writestr(s string) { + // z.writeb(bytesView(s)) // inlined below +LOOP: + a := len(z.buf) - z.n + if len(s) > a { + z.n += copy(z.buf[z.n:], s[:a]) + s = s[a:] + z.flush() + goto LOOP + } + z.n += copy(z.buf[z.n:], s) +} + +func (z *bufioEncWriter) writen1(b1 byte) { + if 1 > len(z.buf)-z.n { + z.flush() + } + z.buf[z.n] = b1 + z.n++ +} + +func (z *bufioEncWriter) writen2(b1, b2 byte) { + if 2 > len(z.buf)-z.n { + z.flush() + } + z.buf[z.n+1] = b2 + z.buf[z.n] = b1 + z.n += 2 +} + +func (z *bufioEncWriter) endErr() (err error) { + if z.n > 0 { + err = z.flushErr() + } + return +} + +// --------------------------------------------- + +// bytesEncAppender implements encWriter and can write to an byte slice. +type bytesEncAppender struct { + b []byte + out *[]byte +} + +func (z *bytesEncAppender) writeb(s []byte) { + z.b = append(z.b, s...) +} +func (z *bytesEncAppender) writestr(s string) { + z.b = append(z.b, s...) +} +func (z *bytesEncAppender) writen1(b1 byte) { + z.b = append(z.b, b1) +} +func (z *bytesEncAppender) writen2(b1, b2 byte) { + z.b = append(z.b, b1, b2) +} +func (z *bytesEncAppender) endErr() error { + *(z.out) = z.b + return nil +} +func (z *bytesEncAppender) reset(in []byte, out *[]byte) { + z.b = in[:0] + z.out = out +} + +// --------------------------------------------- + +func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeRawExt(rv2i(rv).(*RawExt), e) +} + +func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e) +} + +func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(e) +} + +func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshalRaw(bs, fnerr) +} + +func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshalUtf8(bs, fnerr) +} + +func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshalAsis(bs, fnerr) +} + +func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeNil() +} + +func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) { + e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) +} + +func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) { + ti := f.ti + ee := e.e + // array may be non-addressable, so we have to manage with care + // (don't call rv.Bytes, rv.Slice, etc). + // E.g. type struct S{B [2]byte}; + // Encode(S{}) will bomb on "panic: slice of unaddressable array". + if f.seq != seqTypeArray { + if rv.IsNil() { + ee.EncodeNil() + return + } + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if ti.rtid == uint8SliceTypId { + ee.EncodeStringBytesRaw(rv.Bytes()) + return + } + } + if f.seq == seqTypeChan && ti.chandir&uint8(reflect.RecvDir) == 0 { + e.errorf("send-only channel cannot be encoded") + } + elemsep := e.esep + rtelem := ti.elem + rtelemIsByte := uint8TypId == rt2id(rtelem) // NOT rtelem.Kind() == reflect.Uint8 + var l int + // if a slice, array or chan of bytes, treat specially + if rtelemIsByte { + switch f.seq { + case seqTypeSlice: + ee.EncodeStringBytesRaw(rv.Bytes()) + case seqTypeArray: + l = rv.Len() + if rv.CanAddr() { + ee.EncodeStringBytesRaw(rv.Slice(0, l).Bytes()) + } else { + var bs []byte + if l <= cap(e.b) { + bs = e.b[:l] + } else { + bs = make([]byte, l) + } + reflect.Copy(reflect.ValueOf(bs), rv) + ee.EncodeStringBytesRaw(bs) + } + case seqTypeChan: + // do not use range, so that the number of elements encoded + // does not change, and encoding does not hang waiting on someone to close chan. + // for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) } + // ch := rv2i(rv).(<-chan byte) // fix error - that this is a chan byte, not a <-chan byte. + + if rv.IsNil() { + ee.EncodeNil() + break + } + bs := e.b[:0] + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + + L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: // only consume available + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: // consume until timeout + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + // close(tt.C) + break L1 + } + } + default: // consume until close + for b := range ch { + bs = append(bs, b) + } + } + + ee.EncodeStringBytesRaw(bs) + } + return + } + + // if chan, consume chan into a slice, and work off that slice. + if f.seq == seqTypeChan { + rvcs := reflect.Zero(reflect.SliceOf(rtelem)) + timeout := e.h.ChanRecvTimeout + if timeout < 0 { // consume until close + for { + recv, recvOk := rv.Recv() + if !recvOk { + break + } + rvcs = reflect.Append(rvcs, recv) + } + } else { + cases := make([]reflect.SelectCase, 2) + cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv} + if timeout == 0 { + cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault} + } else { + tt := time.NewTimer(timeout) + cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)} + } + for { + chosen, recv, recvOk := reflect.Select(cases) + if chosen == 1 || !recvOk { + break + } + rvcs = reflect.Append(rvcs, recv) + } + } + rv = rvcs // TODO: ensure this doesn't mess up anywhere that rv of kind chan is expected + } + + l = rv.Len() + if ti.mbs { + if l%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", l) + return + } + ee.WriteMapStart(l / 2) + } else { + ee.WriteArrayStart(l) + } + + if l > 0 { + var fn *codecFn + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + // if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + if rtelem.Kind() != reflect.Interface { + fn = e.h.fn(rtelem, true, true) + } + for j := 0; j < l; j++ { + if elemsep { + if ti.mbs { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } else { + ee.WriteArrayElem() + } + } + e.encodeValue(rv.Index(j), fn, true) + } + } + + if ti.mbs { + ee.WriteMapEnd() + } else { + ee.WriteArrayEnd() + } +} + +func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + tisfi := fti.sfiSrc + toMap := !(fti.toArray || e.h.StructToArray) + if toMap { + tisfi = fti.sfiSort + } + + ee := e.e + + sfn := structFieldNode{v: rv, update: false} + if toMap { + ee.WriteMapStart(len(tisfi)) + if e.esep { + for _, si := range tisfi { + ee.WriteMapElemKey() + e.kStructFieldKey(fti.keyType, si.encNameAsciiAlphaNum, si.encName) + ee.WriteMapElemValue() + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + e.kStructFieldKey(fti.keyType, si.encNameAsciiAlphaNum, si.encName) + e.encodeValue(sfn.field(si), nil, true) + } + } + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(len(tisfi)) + if e.esep { + for _, si := range tisfi { + ee.WriteArrayElem() + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + e.encodeValue(sfn.field(si), nil, true) + } + } + ee.WriteArrayEnd() + } +} + +func (e *Encoder) kStructFieldKey(keyType valueType, encNameAsciiAlphaNum bool, encName string) { + encStructFieldKey(encName, e.e, e.w, keyType, encNameAsciiAlphaNum, e.js) +} + +func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + elemsep := e.esep + tisfi := fti.sfiSrc + var newlen int + toMap := !(fti.toArray || e.h.StructToArray) + var mf map[string]interface{} + if f.ti.mf { + mf = rv2i(rv).(MissingFielder).CodecMissingFields() + toMap = true + newlen += len(mf) + } else if f.ti.mfp { + if rv.CanAddr() { + mf = rv2i(rv.Addr()).(MissingFielder).CodecMissingFields() + } else { + // make a new addressable value of same one, and use it + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + mf = rv2i(rv2).(MissingFielder).CodecMissingFields() + } + toMap = true + newlen += len(mf) + } + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfiSort + } + newlen += len(tisfi) + ee := e.e + + // Use sync.Pool to reduce allocating slices unnecessarily. + // The cost of sync.Pool is less than the cost of new allocation. + // + // Each element of the array pools one of encStructPool(8|16|32|64). + // It allows the re-use of slices up to 64 in length. + // A performance cost of encoding structs was collecting + // which values were empty and should be omitted. + // We needed slices of reflect.Value and string to collect them. + // This shared pool reduces the amount of unnecessary creation we do. + // The cost is that of locking sometimes, but sync.Pool is efficient + // enough to reduce thread contention. + + // fmt.Printf(">>>>>>>>>>>>>> encode.kStruct: newlen: %d\n", newlen) + var spool sfiRvPooler + var fkvs = spool.get(newlen) + + var kv sfiRv + recur := e.h.RecursiveEmptyCheck + sfn := structFieldNode{v: rv, update: false} + newlen = 0 + for _, si := range tisfi { + // kv.r = si.field(rv, false) + kv.r = sfn.field(si) + if toMap { + if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) { + continue + } + kv.v = si // si.encName + } else { + // use the zero value. + // if a reference or struct, set to nil (so you do not output too much) + if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) { + switch kv.r.Kind() { + case reflect.Struct, reflect.Interface, reflect.Ptr, + reflect.Array, reflect.Map, reflect.Slice: + kv.r = reflect.Value{} //encode as nil + } + } + } + fkvs[newlen] = kv + newlen++ + } + fkvs = fkvs[:newlen] + + var mflen int + for k, v := range mf { + if k == "" { + delete(mf, k) + continue + } + if fti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur, recur) { + delete(mf, k) + continue + } + mflen++ + } + + var j int + if toMap { + ee.WriteMapStart(newlen + mflen) + if elemsep { + for j = 0; j < len(fkvs); j++ { + kv = fkvs[j] + ee.WriteMapElemKey() + e.kStructFieldKey(fti.keyType, kv.v.encNameAsciiAlphaNum, kv.v.encName) + ee.WriteMapElemValue() + e.encodeValue(kv.r, nil, true) + } + } else { + for j = 0; j < len(fkvs); j++ { + kv = fkvs[j] + e.kStructFieldKey(fti.keyType, kv.v.encNameAsciiAlphaNum, kv.v.encName) + e.encodeValue(kv.r, nil, true) + } + } + // now, add the others + for k, v := range mf { + ee.WriteMapElemKey() + e.kStructFieldKey(fti.keyType, false, k) + ee.WriteMapElemValue() + e.encode(v) + } + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(newlen) + if elemsep { + for j = 0; j < len(fkvs); j++ { + ee.WriteArrayElem() + e.encodeValue(fkvs[j].r, nil, true) + } + } else { + for j = 0; j < len(fkvs); j++ { + e.encodeValue(fkvs[j].r, nil, true) + } + } + ee.WriteArrayEnd() + } + + // do not use defer. Instead, use explicit pool return at end of function. + // defer has a cost we are trying to avoid. + // If there is a panic and these slices are not returned, it is ok. + spool.end() +} + +func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { + ee := e.e + if rv.IsNil() { + ee.EncodeNil() + return + } + + l := rv.Len() + ee.WriteMapStart(l) + if l == 0 { + ee.WriteMapEnd() + return + } + // var asSymbols bool + // determine the underlying key and val encFn's for the map. + // This eliminates some work which is done for each loop iteration i.e. + // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. + // + // However, if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var keyFn, valFn *codecFn + ti := f.ti + rtkey0 := ti.key + rtkey := rtkey0 + rtval0 := ti.elem + rtval := rtval0 + // rtkeyid := rt2id(rtkey0) + for rtval.Kind() == reflect.Ptr { + rtval = rtval.Elem() + } + if rtval.Kind() != reflect.Interface { + valFn = e.h.fn(rtval, true, true) + } + mks := rv.MapKeys() + + if e.h.Canonical { + e.kMapCanonical(rtkey, rv, mks, valFn) + ee.WriteMapEnd() + return + } + + var keyTypeIsString = stringTypId == rt2id(rtkey0) // rtkeyid + if !keyTypeIsString { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + // rtkeyid = rt2id(rtkey) + keyFn = e.h.fn(rtkey, true, true) + } + } + + // for j, lmks := 0, len(mks); j < lmks; j++ { + for j := range mks { + if e.esep { + ee.WriteMapElemKey() + } + if keyTypeIsString { + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(mks[j].String())) + } else { + ee.EncodeStringEnc(cUTF8, mks[j].String()) + } + } else { + e.encodeValue(mks[j], keyFn, true) + } + if e.esep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mks[j]), valFn, true) + + } + ee.WriteMapEnd() +} + +func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn) { + ee := e.e + elemsep := e.esep + // we previously did out-of-band if an extension was registered. + // This is not necessary, as the natural kind is sufficient for ordering. + + switch rtkey.Kind() { + case reflect.Bool: + mksv := make([]boolRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bool() + } + sort.Sort(boolRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeBool(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.String: + mksv := make([]stringRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.String() + } + sort.Sort(stringRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + if e.h.StringToRaw { + ee.EncodeStringBytesRaw(bytesView(mksv[i].v)) + } else { + ee.EncodeStringEnc(cUTF8, mksv[i].v) + } + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]uintRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + sort.Sort(uintRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeUint(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]intRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + sort.Sort(intRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeInt(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float32: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(mksv[i].v)) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float64: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Struct: + if rv.Type() == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + sort.Sort(timeRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeTime(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + break + } + fallthrough + default: + // out-of-band + // first encode each key to a []byte first, then sort them, then record + var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + mksbv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + e2.MustEncode(k) + v.r = k + v.v = mksv[l:] + } + sort.Sort(bytesRvSlice(mksbv)) + for j := range mksbv { + if elemsep { + ee.WriteMapElemKey() + } + e.asis(mksbv[j].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true) + } + } +} + +// // -------------------------------------------------- + +type encWriterSwitch struct { + // wi *ioEncWriter + wb bytesEncAppender + wf *bufioEncWriter + // typ entryType + bytes bool // encoding to []byte + esep bool // whether it has elem separators + isas bool // whether e.as != nil + js bool // is json encoder? + be bool // is binary encoder? + _ [2]byte // padding + // _ [2]uint64 // padding + // _ uint64 // padding +} + +func (z *encWriterSwitch) writeb(s []byte) { + if z.bytes { + z.wb.writeb(s) + } else { + z.wf.writeb(s) + } +} +func (z *encWriterSwitch) writestr(s string) { + if z.bytes { + z.wb.writestr(s) + } else { + z.wf.writestr(s) + } +} +func (z *encWriterSwitch) writen1(b1 byte) { + if z.bytes { + z.wb.writen1(b1) + } else { + z.wf.writen1(b1) + } +} +func (z *encWriterSwitch) writen2(b1, b2 byte) { + if z.bytes { + z.wb.writen2(b1, b2) + } else { + z.wf.writen2(b1, b2) + } +} +func (z *encWriterSwitch) endErr() error { + if z.bytes { + return z.wb.endErr() + } + return z.wf.endErr() +} + +func (z *encWriterSwitch) end() { + if err := z.endErr(); err != nil { + panic(err) + } +} + +/* + +// ------------------------------------------ +func (z *encWriterSwitch) writeb(s []byte) { + switch z.typ { + case entryTypeBytes: + z.wb.writeb(s) + case entryTypeIo: + z.wi.writeb(s) + default: + z.wf.writeb(s) + } +} +func (z *encWriterSwitch) writestr(s string) { + switch z.typ { + case entryTypeBytes: + z.wb.writestr(s) + case entryTypeIo: + z.wi.writestr(s) + default: + z.wf.writestr(s) + } +} +func (z *encWriterSwitch) writen1(b1 byte) { + switch z.typ { + case entryTypeBytes: + z.wb.writen1(b1) + case entryTypeIo: + z.wi.writen1(b1) + default: + z.wf.writen1(b1) + } +} +func (z *encWriterSwitch) writen2(b1, b2 byte) { + switch z.typ { + case entryTypeBytes: + z.wb.writen2(b1, b2) + case entryTypeIo: + z.wi.writen2(b1, b2) + default: + z.wf.writen2(b1, b2) + } +} +func (z *encWriterSwitch) end() { + switch z.typ { + case entryTypeBytes: + z.wb.end() + case entryTypeIo: + z.wi.end() + default: + z.wf.end() + } +} + +// ------------------------------------------ +func (z *encWriterSwitch) writeb(s []byte) { + if z.bytes { + z.wb.writeb(s) + } else { + z.wi.writeb(s) + } +} +func (z *encWriterSwitch) writestr(s string) { + if z.bytes { + z.wb.writestr(s) + } else { + z.wi.writestr(s) + } +} +func (z *encWriterSwitch) writen1(b1 byte) { + if z.bytes { + z.wb.writen1(b1) + } else { + z.wi.writen1(b1) + } +} +func (z *encWriterSwitch) writen2(b1, b2 byte) { + if z.bytes { + z.wb.writen2(b1, b2) + } else { + z.wi.writen2(b1, b2) + } +} +func (z *encWriterSwitch) end() { + if z.bytes { + z.wb.end() + } else { + z.wi.end() + } +} + +*/ + +// Encoder writes an object to an output stream in a supported format. +// +// Encoder is NOT safe for concurrent use i.e. a Encoder cannot be used +// concurrently in multiple goroutines. +// +// However, as Encoder could be allocation heavy to initialize, a Reset method is provided +// so its state can be reused to decode new input streams repeatedly. +// This is the idiomatic way to use. +type Encoder struct { + panicHdl + // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder + e encDriver + + // NOTE: Encoder shouldn't call it's write methods, + // as the handler MAY need to do some coordination. + w *encWriterSwitch + + // bw *bufio.Writer + as encDriverAsis + + err error + + h *BasicHandle + hh Handle + // ---- cpu cache line boundary? + 3 + encWriterSwitch + + ci set + + b [(5 * 8)]byte // for encoding chan or (non-addressable) [N]byte + + // ---- writable fields during execution --- *try* to keep in sep cache line + + // ---- cpu cache line boundary? + // b [scratchByteArrayLen]byte + // _ [cacheLineSize - scratchByteArrayLen]byte // padding + // b [cacheLineSize - (8 * 0)]byte // used for encoding a chan or (non-addressable) array of bytes +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to configure WriterBufferSize on the handle +// OR pass in a memory buffered writer (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + e := newEncoder(h) + e.Reset(w) + return e +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + e := newEncoder(h) + e.ResetBytes(out) + return e +} + +func newEncoder(h Handle) *Encoder { + e := &Encoder{h: basicHandle(h), err: errEncoderNotInitialized} + e.bytes = true + if useFinalizers { + runtime.SetFinalizer(e, (*Encoder).finalize) + // xdebugf(">>>> new(Encoder) with finalizer") + } + e.w = &e.encWriterSwitch + e.hh = h + e.esep = h.hasElemSeparators() + + return e +} + +func (e *Encoder) resetCommon() { + // e.w = &e.encWriterSwitch + if e.e == nil || e.hh.recreateEncDriver(e.e) { + e.e = e.hh.newEncDriver(e) + e.as, e.isas = e.e.(encDriverAsis) + // e.cr, _ = e.e.(containerStateRecv) + } + e.be = e.hh.isBinary() + _, e.js = e.hh.(*JsonHandle) + e.e.reset() + e.err = nil +} + +// Reset resets the Encoder with a new output stream. +// +// This accommodates using the state of the Encoder, +// where it has "cached" information about sub-engines. +func (e *Encoder) Reset(w io.Writer) { + if w == nil { + return + } + // var ok bool + e.bytes = false + if e.wf == nil { + e.wf = new(bufioEncWriter) + } + // e.typ = entryTypeUnset + // if e.h.WriterBufferSize > 0 { + // // bw := bufio.NewWriterSize(w, e.h.WriterBufferSize) + // // e.wi.bw = bw + // // e.wi.sw = bw + // // e.wi.fw = bw + // // e.wi.ww = bw + // if e.wf == nil { + // e.wf = new(bufioEncWriter) + // } + // e.wf.reset(w, e.h.WriterBufferSize) + // e.typ = entryTypeBufio + // } else { + // if e.wi == nil { + // e.wi = new(ioEncWriter) + // } + // e.wi.reset(w) + // e.typ = entryTypeIo + // } + e.wf.reset(w, e.h.WriterBufferSize) + // e.typ = entryTypeBufio + + // e.w = e.wi + e.resetCommon() +} + +// ResetBytes resets the Encoder with a new destination output []byte. +func (e *Encoder) ResetBytes(out *[]byte) { + if out == nil { + return + } + var in []byte = *out + if in == nil { + in = make([]byte, defEncByteBufSize) + } + e.bytes = true + // e.typ = entryTypeBytes + e.wb.reset(in, out) + // e.w = &e.wb + e.resetCommon() +} + +// Encode writes an object into a stream. +// +// Encoding can be configured via the struct tag for the fields. +// The key (in the struct tags) that we look at is configurable. +// +// By default, we look up the "codec" key in the struct field's tags, +// and fall bak to the "json" key if "codec" is absent. +// That key in struct field's tag value is the key name, +// followed by an optional comma and options. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. The options +// which can be set on _struct are: +// - omitempty: so all fields are omitted if empty +// - toarray: so struct is encoded as an array +// - int: so struct key names are encoded as signed integers (instead of strings) +// - uint: so struct key names are encoded as unsigned integers (instead of strings) +// - float: so struct key names are encoded as floats (instead of strings) +// +// More details on these below. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's tag is "-", OR +// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// ... +// This key is typically encoded as a string. +// However, there are instances where the encoded stream has mapping keys encoded as numbers. +// For example, some cbor streams have keys as integer codes in the stream, but they should map +// to fields in a structured object. Consequently, a struct is the natural representation in code. +// For these, configure the struct to encode/decode the keys as numbers (instead of string). +// This is done with the int,uint or float option on the _struct field (see above). +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the tag on the _struct field sets the "toarray" option +// +// Note that omitempty is ignored when encoding struct values as arrays, +// as an entry must be encoded for each field, to maintain its position. +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline except: +// - the struct tag specifies a replacement name (first value) +// - the field is of an interface type +// +// Examples: +// +// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// io.Reader //use key "Reader". +// MyStruct `codec:"my1" //use key "my1". +// MyStruct //inline it +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",toarray"` //encode struct as an array +// } +// +// type MyStruct struct { +// _struct bool `codec:",uint"` //encode struct with "unsigned integer" keys +// Field1 string `codec:"1"` //encode Field1 key using: EncodeInt(1) +// Field2 string `codec:"2"` //encode Field2 key using: EncodeInt(2) +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If a Selfer, call its CodecEncodeSelf method +// - If an extension is registered for it, call that extension function +// - If implements encoding.(Binary|Text|JSON)Marshaler, call Marshal(Binary|Text|JSON) method +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + // tried to use closure, as runtime optimizes defer with no params. + // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc). + // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139 + // defer func() { e.deferred(&err) }() } + // { x, y := e, &err; defer func() { x.deferred(y) }() } + if e.err != nil { + return e.err + } + if recoverPanicToErr { + defer func() { + // if error occurred during encoding, return that error; + // else if error occurred on end'ing (i.e. during flush), return that error. + err = e.w.endErr() + x := recover() + if x == nil { + e.err = err + } else { + panicValToErr(e, x, &e.err) + err = e.err + } + }() + } + + // defer e.deferred(&err) + e.mustEncode(v) + return +} + +// MustEncode is like Encode, but panics if unable to Encode. +// This provides insight to the code location that triggered the error. +func (e *Encoder) MustEncode(v interface{}) { + if e.err != nil { + panic(e.err) + } + e.mustEncode(v) +} + +func (e *Encoder) mustEncode(v interface{}) { + if e.wf == nil { + e.encode(v) + e.e.atEndOfEncode() + e.w.end() + return + } + + if e.wf.buf == nil { + e.wf.buf = e.wf.bytesBufPooler.get(e.wf.sz) + } + e.wf.calls++ + + e.encode(v) + + e.wf.calls-- + + if e.wf.calls == 0 { + e.e.atEndOfEncode() + e.w.end() + if !e.h.ExplicitRelease { + e.wf.release() + } + } +} + +// func (e *Encoder) deferred(err1 *error) { +// e.w.end() +// if recoverPanicToErr { +// if x := recover(); x != nil { +// panicValToErr(e, x, err1) +// panicValToErr(e, x, &e.err) +// } +// } +// } + +//go:noinline -- as it is run by finalizer +func (e *Encoder) finalize() { + // xdebugf("finalizing Encoder") + e.Release() +} + +// Release releases shared (pooled) resources. +// +// It is important to call Release() when done with an Encoder, so those resources +// are released instantly for use by subsequently created Encoders. +func (e *Encoder) Release() { + if e.wf != nil { + e.wf.release() + } +} + +func (e *Encoder) encode(iv interface{}) { + // a switch with only concrete types can be optimized. + // consequently, we deal with nil and interfaces outside the switch. + + if iv == nil || definitelyNil(iv) { + e.e.EncodeNil() + return + } + + switch v := iv.(type) { + // case nil: + // case Selfer: + case Raw: + e.rawBytes(v) + case reflect.Value: + e.encodeValue(v, nil, true) + + case string: + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(v)) + } else { + e.e.EncodeStringEnc(cUTF8, v) + } + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case time.Time: + e.e.EncodeTime(v) + case []uint8: + e.e.EncodeStringBytesRaw(v) + + case *Raw: + e.rawBytes(*v) + + case *string: + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(*v)) + } else { + e.e.EncodeStringEnc(cUTF8, *v) + } + case *bool: + e.e.EncodeBool(*v) + case *int: + e.e.EncodeInt(int64(*v)) + case *int8: + e.e.EncodeInt(int64(*v)) + case *int16: + e.e.EncodeInt(int64(*v)) + case *int32: + e.e.EncodeInt(int64(*v)) + case *int64: + e.e.EncodeInt(*v) + case *uint: + e.e.EncodeUint(uint64(*v)) + case *uint8: + e.e.EncodeUint(uint64(*v)) + case *uint16: + e.e.EncodeUint(uint64(*v)) + case *uint32: + e.e.EncodeUint(uint64(*v)) + case *uint64: + e.e.EncodeUint(*v) + case *uintptr: + e.e.EncodeUint(uint64(*v)) + case *float32: + e.e.EncodeFloat32(*v) + case *float64: + e.e.EncodeFloat64(*v) + case *time.Time: + e.e.EncodeTime(*v) + + case *[]uint8: + e.e.EncodeStringBytesRaw(*v) + + default: + if v, ok := iv.(Selfer); ok { + v.CodecEncodeSelf(e) + } else if !fastpathEncodeTypeSwitch(iv, e) { + // checkfastpath=true (not false), as underlying slice/map type may be fast-path + e.encodeValue(reflect.ValueOf(iv), nil, true) + } + } +} + +func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) { + // if a valid fn is passed, it MUST BE for the dereferenced type of rv + var sptr uintptr + var rvp reflect.Value + var rvpValid bool +TOP: + switch rv.Kind() { + case reflect.Ptr: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rvpValid = true + rvp = rv + rv = rv.Elem() + if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { + // TODO: Movable pointers will be an issue here. Future problem. + sptr = rv.UnsafeAddr() + break TOP + } + goto TOP + case reflect.Interface: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + goto TOP + case reflect.Slice, reflect.Map: + if rv.IsNil() { + e.e.EncodeNil() + return + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + return + } + + if sptr != 0 && (&e.ci).add(sptr) { + e.errorf("circular reference found: # %d", sptr) + } + + if fn == nil { + rt := rv.Type() + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = e.h.fn(rt, checkFastpath, true) + } + if fn.i.addrE { + if rvpValid { + fn.fe(e, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fe(e, &fn.i, rv.Addr()) + } else { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + fn.fe(e, &fn.i, rv2) + } + } else { + fn.fe(e, &fn.i, rv) + } + if sptr != 0 { + (&e.ci).remove(sptr) + } +} + +// func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { +// if fnerr != nil { +// panic(fnerr) +// } +// if bs == nil { +// e.e.EncodeNil() +// } else if asis { +// e.asis(bs) +// } else { +// e.e.EncodeStringBytesRaw(bs) +// } +// } + +func (e *Encoder) marshalUtf8(bs []byte, fnerr error) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeStringEnc(cUTF8, stringView(bs)) + } +} + +func (e *Encoder) marshalAsis(bs []byte, fnerr error) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else { + e.asis(bs) + } +} + +func (e *Encoder) marshalRaw(bs []byte, fnerr error) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else { + e.e.EncodeStringBytesRaw(bs) + } +} + +func (e *Encoder) asis(v []byte) { + if e.isas { + e.as.EncodeAsis(v) + } else { + e.w.writeb(v) + } +} + +func (e *Encoder) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + e.errorf("Raw values cannot be encoded: %v", v) + } + e.asis(v) +} + +func (e *Encoder) wrapErr(v interface{}, err *error) { + *err = encodeError{codecError{name: e.hh.Name(), err: v}} +} + +func encStructFieldKey(encName string, ee encDriver, w *encWriterSwitch, + keyType valueType, encNameAsciiAlphaNum bool, js bool) { + var m must + // use if-else-if, not switch (which compiles to binary-search) + // since keyType is typically valueTypeString, branch prediction is pretty good. + if keyType == valueTypeString { + if js && encNameAsciiAlphaNum { // keyType == valueTypeString + // w.writen1('"') + // w.writestr(encName) + // w.writen1('"') + // ---- + // w.writestr(`"` + encName + `"`) + // ---- + // do concat myself, so it is faster than the generic string concat + b := make([]byte, len(encName)+2) + copy(b[1:], encName) + b[0] = '"' + b[len(b)-1] = '"' + w.writeb(b) + } else { // keyType == valueTypeString + ee.EncodeStringEnc(cUTF8, encName) + } + } else if keyType == valueTypeInt { + ee.EncodeInt(m.Int(strconv.ParseInt(encName, 10, 64))) + } else if keyType == valueTypeUint { + ee.EncodeUint(m.Uint(strconv.ParseUint(encName, 10, 64))) + } else if keyType == valueTypeFloat { + ee.EncodeFloat64(m.Float(strconv.ParseFloat(encName, 64))) + } +} + +// func encStringAsRawBytesMaybe(ee encDriver, s string, stringToRaw bool) { +// if stringToRaw { +// ee.EncodeStringBytesRaw(bytesView(s)) +// } else { +// ee.EncodeStringEnc(cUTF8, s) +// } +// } diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/fast-path.not.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/fast-path.not.go new file mode 100644 index 0000000000000..93cb754a0377c --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/fast-path.not.go @@ -0,0 +1,39 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import "reflect" + +// fastpath was removed for safety reasons + +const fastpathEnabled = false + +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } +func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false } + +type fastpathT struct{} +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} +type fastpathA [0]fastpathE + +func (x fastpathA) index(rtid uintptr) int { return -1 } + +func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) { + fn := d.h.fn(uint8SliceTyp, true, true) + d.kSlice(&fn.i, reflect.ValueOf(&v).Elem()) + return v, true +} + +var fastpathAV fastpathA +var fastpathTV fastpathT + +// ---- +// type TestMammoth2Wrapper struct{} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-dec-array.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-dec-array.go.tmpl new file mode 100644 index 0000000000000..790e914e13cb3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-dec-array.go.tmpl @@ -0,0 +1,78 @@ +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{else if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int + _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { + if {{var "l"}} > cap({{var "v"}}) { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + } else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for {{var "j"}} = 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { // bounds-check-elimination + {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} + } + {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} + {{var "h"}}.ElemContainerState({{var "j"}}) + {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}} + {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this + {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}} + var {{var "db"}} bool + if {{var "j"}} >= len({{var "v"}}) { + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) + {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} + } + if {{var "db"}} { + z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{end}} + } + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = make([]{{ .Typ }}, 0) + {{var "c"}} = true + } {{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-dec-map.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-dec-map.go.tmpl new file mode 100644 index 0000000000000..8323b54940d04 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-dec-map.go.tmpl @@ -0,0 +1,42 @@ +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-enc-chan.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-enc-chan.go.tmpl new file mode 100644 index 0000000000000..4249588a3cf8d --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-enc-chan.go.tmpl @@ -0,0 +1,27 @@ +{{.Label}}: +switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { +case timeout{{.Sfx}} == 0: // only consume available + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) + default: + break {{.Label}} + } + } +case timeout{{.Sfx}} > 0: // consume until timeout + tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + case <-tt{{.Sfx}}.C: + // close(tt.C) + break {{.Label}} + } + } +default: // consume until close + for b{{.Sfx}} := range {{.Chan}} { + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + } +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-helper.generated.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-helper.generated.go new file mode 100644 index 0000000000000..2a7d1aab70b3a --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-helper.generated.go @@ -0,0 +1,343 @@ +// comment this out // + build ignore + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from gen-helper.go.tmpl - DO NOT EDIT. + +package codec + +import ( + "encoding" + "reflect" +) + +// GenVersion is the current version of codecgen. +const GenVersion = 10 + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { + ge = genHelperEncoder{e: e} + ee = genHelperEncDriver{encDriver: e.e} + return +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { + gd = genHelperDecoder{d: d} + dd = genHelperDecDriver{decDriver: d.d} + return +} + +type genHelperEncDriver struct { + encDriver +} + +func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) { + encStructFieldKey(s, x.encDriver, nil, keyType, false, false) +} +func (x genHelperEncDriver) EncodeSymbol(s string) { + x.encDriver.EncodeStringEnc(cUTF8, s) +} + +type genHelperDecDriver struct { + decDriver + C checkOverflow +} + +func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte { + return decStructFieldKey(x.decDriver, keyType, buf) +} +func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) { + return x.C.IntV(x.decDriver.DecodeInt64(), bitsize) +} +func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + return x.C.UintV(x.decDriver.DecodeUint64(), bitsize) +} +func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + f = x.DecodeFloat64() + if chkOverflow32 && chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} +func (x genHelperDecDriver) DecodeFloat32As64() (f float64) { + f = x.DecodeFloat64() + if chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + M must + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + C checkOverflow + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshalUtf8(bs, fnerr) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshalAsis(bs, fnerr) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshalRaw(bs, fnerr) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return } + +// func (f genHelperEncoder) TimeRtidIfBinc() uintptr { +// if _, ok := f.e.hh.(*BincHandle); ok { +// return timeTypId +// } +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.e.h.getExt(rtid) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) WriteStr(s string) { + f.e.w.writestr(s) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) BytesView(v string) []byte { return bytesView(v) } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { f.d.swallow() } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { + return &f.d.b +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false) + // f.d.decodeValueFallback(rv) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeStringAsBytes() + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } + +// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { +// // Note: builtin is no longer supported - so make this a no-op +// if _, ok := f.d.hh.(*BincHandle); ok { +// return timeTypId +// } +// return 0 +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.d.h.getExt(rtid) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { + return decInferLen(clen, maxlen, unit) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: no longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) } diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-helper.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-helper.go.tmpl new file mode 100644 index 0000000000000..f5d0634e6a1c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-helper.go.tmpl @@ -0,0 +1,308 @@ +// comment this out // + build ignore + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from gen-helper.go.tmpl - DO NOT EDIT. + +package codec + +import ( + "encoding" + "reflect" +) + +// GenVersion is the current version of codecgen. +const GenVersion = {{ .Version }} + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { + ge = genHelperEncoder{e: e} + ee = genHelperEncDriver{encDriver: e.e} + return +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { + gd = genHelperDecoder{d: d} + dd = genHelperDecDriver{decDriver: d.d} + return +} + +type genHelperEncDriver struct { + encDriver +} + +func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) { + encStructFieldKey(s, x.encDriver, nil, keyType, false, false) +} +func (x genHelperEncDriver) EncodeSymbol(s string) { + x.encDriver.EncodeStringEnc(cUTF8, s) +} + +type genHelperDecDriver struct { + decDriver + C checkOverflow +} + +func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte { + return decStructFieldKey(x.decDriver, keyType, buf) +} +func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) { + return x.C.IntV(x.decDriver.DecodeInt64(), bitsize) +} +func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + return x.C.UintV(x.decDriver.DecodeUint64(), bitsize) +} +func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + f = x.DecodeFloat64() + if chkOverflow32 && chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} +func (x genHelperDecDriver) DecodeFloat32As64() (f float64) { + f = x.DecodeFloat64() + if chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + M must + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + C checkOverflow + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshalUtf8(bs, fnerr) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshalAsis(bs, fnerr) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshalRaw(bs, fnerr) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return } +// func (f genHelperEncoder) TimeRtidIfBinc() uintptr { +// if _, ok := f.e.hh.(*BincHandle); ok { +// return timeTypId +// } +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.e.h.getExt(rtid) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) WriteStr(s string) { + f.e.w.writestr(s) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) BytesView(v string) []byte { return bytesView(v) } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { f.d.swallow() } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { + return &f.d.b +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false) + // f.d.decodeValueFallback(rv) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeStringAsBytes() + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } +// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { +// // Note: builtin is no longer supported - so make this a no-op +// if _, ok := f.d.hh.(*BincHandle); ok { +// return timeTypId +// } +// return 0 +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.d.h.getExt(rtid) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { + return decInferLen(clen, maxlen, unit) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: no longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) } + diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-internal.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-internal.go new file mode 100644 index 0000000000000..d3c51a537b408 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen-internal.go @@ -0,0 +1,284 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bytes" + "errors" + "go/format" + "io" + "io/ioutil" + "strings" + "sync" + "text/template" +) + +const genVersion = 10 + +func genInternalEncCommandAsString(s string, vname string) string { + switch s { + case "uint", "uint8", "uint16", "uint32", "uint64": + return "ee.EncodeUint(uint64(" + vname + "))" + case "int", "int8", "int16", "int32", "int64": + return "ee.EncodeInt(int64(" + vname + "))" + case "string": + return "if e.h.StringToRaw { ee.EncodeStringBytesRaw(bytesView(" + vname + ")) " + + "} else { ee.EncodeStringEnc(cUTF8, " + vname + ") }" + case "float32": + return "ee.EncodeFloat32(" + vname + ")" + case "float64": + return "ee.EncodeFloat64(" + vname + ")" + case "bool": + return "ee.EncodeBool(" + vname + ")" + // case "symbol": + // return "ee.EncodeSymbol(" + vname + ")" + default: + return "e.encode(" + vname + ")" + } +} + +func genInternalDecCommandAsString(s string) string { + switch s { + case "uint": + return "uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))" + case "uint8": + return "uint8(chkOvf.UintV(dd.DecodeUint64(), 8))" + case "uint16": + return "uint16(chkOvf.UintV(dd.DecodeUint64(), 16))" + case "uint32": + return "uint32(chkOvf.UintV(dd.DecodeUint64(), 32))" + case "uint64": + return "dd.DecodeUint64()" + case "uintptr": + return "uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))" + case "int": + return "int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))" + case "int8": + return "int8(chkOvf.IntV(dd.DecodeInt64(), 8))" + case "int16": + return "int16(chkOvf.IntV(dd.DecodeInt64(), 16))" + case "int32": + return "int32(chkOvf.IntV(dd.DecodeInt64(), 32))" + case "int64": + return "dd.DecodeInt64()" + + case "string": + return "dd.DecodeString()" + case "float32": + return "float32(chkOvf.Float32V(dd.DecodeFloat64()))" + case "float64": + return "dd.DecodeFloat64()" + case "bool": + return "dd.DecodeBool()" + default: + panic(errors.New("gen internal: unknown type for decode: " + s)) + } +} + +func genInternalZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + return "nil" + case "bool": + return "false" + case "string": + return `""` + default: + return "0" + } +} + +var genInternalNonZeroValueIdx [5]uint64 +var genInternalNonZeroValueStrs = [2][5]string{ + {`"string-is-an-interface"`, "true", `"some-string"`, "11.1", "33"}, + {`"string-is-an-interface-2"`, "true", `"some-string-2"`, "22.2", "44"}, +} + +func genInternalNonZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + genInternalNonZeroValueIdx[0]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[0]%2][0] // return string, to remove ambiguity + case "bool": + genInternalNonZeroValueIdx[1]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[1]%2][1] + case "string": + genInternalNonZeroValueIdx[2]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[2]%2][2] + case "float32", "float64", "float", "double": + genInternalNonZeroValueIdx[3]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[3]%2][3] + default: + genInternalNonZeroValueIdx[4]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[4]%2][4] + } +} + +func genInternalSortType(s string, elem bool) string { + for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { + if strings.HasPrefix(s, v) { + if elem { + if v == "int" || v == "uint" || v == "float" { + return v + "64" + } else { + return v + } + } + return v + "Slice" + } + } + panic("sorttype: unexpected type: " + s) +} + +type genV struct { + // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice + MapKey string + Elem string + Primitive string + Size int +} + +type genInternal struct { + Version int + Values []genV +} + +func (x genInternal) FastpathLen() (l int) { + for _, v := range x.Values { + if v.Primitive == "" && !(v.MapKey == "" && v.Elem == "uint8") { + l++ + } + } + return +} + +// var genInternalMu sync.Mutex +var genInternalV = genInternal{Version: genVersion} +var genInternalTmplFuncs template.FuncMap +var genInternalOnce sync.Once + +func genInternalInit() { + types := [...]string{ + "interface{}", + "string", + "float32", + "float64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "bool", + } + // keep as slice, so it is in specific iteration order. + // Initial order was uint64, string, interface{}, int, int64 + mapvaltypes := [...]string{ + "interface{}", + "string", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + "bool", + } + wordSizeBytes := int(intBitsize) / 8 + + mapvaltypes2 := map[string]int{ + "interface{}": 2 * wordSizeBytes, + "string": 2 * wordSizeBytes, + "uint": 1 * wordSizeBytes, + "uint8": 1, + "uint16": 2, + "uint32": 4, + "uint64": 8, + "uintptr": 1 * wordSizeBytes, + "int": 1 * wordSizeBytes, + "int8": 1, + "int16": 2, + "int32": 4, + "int64": 8, + "float32": 4, + "float64": 8, + "bool": 1, + } + var gt = genInternal{Version: genVersion} + + // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function + for _, s := range types { + gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) + // if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. + // gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + // } + gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + if _, ok := mapvaltypes2[s]; !ok { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) + } + for _, ms := range mapvaltypes { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) + } + } + + funcs := make(template.FuncMap) + // funcs["haspfx"] = strings.HasPrefix + funcs["encmd"] = genInternalEncCommandAsString + funcs["decmd"] = genInternalDecCommandAsString + funcs["zerocmd"] = genInternalZeroValue + funcs["nonzerocmd"] = genInternalNonZeroValue + funcs["hasprefix"] = strings.HasPrefix + funcs["sorttype"] = genInternalSortType + + genInternalV = gt + genInternalTmplFuncs = funcs +} + +// genInternalGoFile is used to generate source files from templates. +// It is run by the program author alone. +// Unfortunately, it has to be exported so that it can be called from a command line tool. +// *** DO NOT USE *** +func genInternalGoFile(r io.Reader, w io.Writer) (err error) { + genInternalOnce.Do(genInternalInit) + + gt := genInternalV + + t := template.New("").Funcs(genInternalTmplFuncs) + + tmplstr, err := ioutil.ReadAll(r) + if err != nil { + return + } + + if t, err = t.Parse(string(tmplstr)); err != nil { + return + } + + var out bytes.Buffer + err = t.Execute(&out, gt) + if err != nil { + return + } + + bout, err := format.Source(out.Bytes()) + if err != nil { + w.Write(out.Bytes()) // write out if error, so we can still see. + // w.Write(bout) // write out if error, as much as possible, so we can still see. + return + } + w.Write(bout) + return +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen.generated.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen.generated.go new file mode 100644 index 0000000000000..2178efd5b7189 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen.generated.go @@ -0,0 +1,165 @@ +//go:build codecgen.exec +// +build codecgen.exec + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl + +const genDecMapTmpl = ` +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} +` + +const genDecListTmpl = ` +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{else if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int + _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { + if {{var "l"}} > cap({{var "v"}}) { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + } else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for {{var "j"}} = 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { // bounds-check-elimination + {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} + } + {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} + {{var "h"}}.ElemContainerState({{var "j"}}) + {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}} + {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this + {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}} + var {{var "db"}} bool + if {{var "j"}} >= len({{var "v"}}) { + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) + {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} + } + if {{var "db"}} { + z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{end}} + } + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = make([]{{ .Typ }}, 0) + {{var "c"}} = true + } {{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} +` + +const genEncChanTmpl = ` +{{.Label}}: +switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { +case timeout{{.Sfx}} == 0: // only consume available + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) + default: + break {{.Label}} + } + } +case timeout{{.Sfx}} > 0: // consume until timeout + tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + case <-tt{{.Sfx}}.C: + // close(tt.C) + break {{.Label}} + } + } +default: // consume until close + for b{{.Sfx}} := range {{.Chan}} { + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + } +} +` diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen.go new file mode 100644 index 0000000000000..efaff02e3ab24 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/gen.go @@ -0,0 +1,1874 @@ +//go:build codecgen.exec +// +build codecgen.exec + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "math/rand" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "text/template" + "time" + "unicode" + "unicode/utf8" +) + +// --------------------------------------------------- +// codecgen supports the full cycle of reflection-based codec: +// - RawExt +// - Raw +// - Extensions +// - (Binary|Text|JSON)(Unm|M)arshal +// - generic by-kind +// +// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. +// In those areas, we try to only do reflection or interface-conversion when NECESSARY: +// - Extensions, only if Extensions are configured. +// +// However, codecgen doesn't support the following: +// - Canonical option. (codecgen IGNORES it currently) +// This is just because it has not been implemented. +// - MissingFielder implementation. +// If a type implements MissingFielder, it is completely ignored by codecgen. +// +// During encode/decode, Selfer takes precedence. +// A type implementing Selfer will know how to encode/decode itself statically. +// +// The following field types are supported: +// +// array: [n]T +// slice: []T +// map: map[K]V +// primitive: [u]int[n], float(32|64), bool, string +// struct +// +// --------------------------------------------------- +// Note that a Selfer cannot call (e|d).(En|De)code on itself, +// as this will cause a circular reference, as (En|De)code will call Selfer methods. +// Any type that implements Selfer must implement completely and not fallback to (En|De)code. +// +// In addition, code in this file manages the generation of fast-path implementations of +// encode/decode of slices/maps of primitive keys/values. +// +// Users MUST re-generate their implementations whenever the code shape changes. +// The generated code will panic if it was generated with a version older than the supporting library. +// --------------------------------------------------- +// +// codec framework is very feature rich. +// When encoding or decoding into an interface, it depends on the runtime type of the interface. +// The type of the interface may be a named type, an extension, etc. +// Consequently, we fallback to runtime codec for encoding/decoding interfaces. +// In addition, we fallback for any value which cannot be guaranteed at runtime. +// This allows us support ANY value, including any named types, specifically those which +// do not implement our interfaces (e.g. Selfer). +// +// This explains some slowness compared to other code generation codecs (e.g. msgp). +// This reduction in speed is only seen when your refers to interfaces, +// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } +// +// codecgen will panic if the file was generated with an old version of the library in use. +// +// Note: +// +// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. +// This way, there isn't a function call overhead just to see that we should not enter a block of code. +// +// GenVersion is the current version of codecgen. +// +// NOTE: Increment this value each time codecgen changes fundamentally. +// Fundamental changes are: +// - helper methods change (signature change, new ones added, some removed, etc) +// - codecgen command line changes +// +// v1: Initial Version +// v2: +// v3: Changes for Kubernetes: +// +// changes in signature of some unpublished helper methods and codecgen cmdline arguments. +// +// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) +// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. +// v6: removed unsafe from gen, and now uses codecgen.exec tag +// v7: +// v8: current - we now maintain compatibility with old generated code. +// v9: skipped +// v10: modified encDriver and decDriver interfaces. Remove deprecated methods after Jan 1, 2019 +const ( + genCodecPkg = "codec1978" + genTempVarPfx = "yy" + genTopLevelVarName = "x" + + // ignore canBeNil parameter, and always set to true. + // This is because nil can appear anywhere, so we should always check. + genAnythingCanBeNil = true + + // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; + // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals + // are not executed a lot. + // + // From testing, it didn't make much difference in runtime, so keep as true (one function only) + genUseOneFunctionForDecStructMap = true +) + +type genStructMapStyle uint8 + +const ( + genStructMapStyleConsolidated genStructMapStyle = iota + genStructMapStyleLenPrefix + genStructMapStyleCheckBreak +) + +var ( + errGenAllTypesSamePkg = errors.New("All types must be in the same package") + errGenExpectArrayOrMap = errors.New("unexpected type. Expecting array/map/slice") + + genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") + genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) +) + +type genBuf struct { + buf []byte +} + +func (x *genBuf) s(s string) *genBuf { x.buf = append(x.buf, s...); return x } +func (x *genBuf) b(s []byte) *genBuf { x.buf = append(x.buf, s...); return x } +func (x *genBuf) v() string { return string(x.buf) } +func (x *genBuf) f(s string, args ...interface{}) { x.s(fmt.Sprintf(s, args...)) } +func (x *genBuf) reset() { + if x.buf != nil { + x.buf = x.buf[:0] + } +} + +// genRunner holds some state used during a Gen run. +type genRunner struct { + w io.Writer // output + c uint64 // counter used for generating varsfx + t []reflect.Type // list of types to run selfer on + + tc reflect.Type // currently running selfer on this type + te map[uintptr]bool // types for which the encoder has been created + td map[uintptr]bool // types for which the decoder has been created + cp string // codec import path + + im map[string]reflect.Type // imports to add + imn map[string]string // package names of imports to add + imc uint64 // counter for import numbers + + is map[reflect.Type]struct{} // types seen during import search + bp string // base PkgPath, for which we are generating for + + cpfx string // codec package prefix + + tm map[reflect.Type]struct{} // types for which enc/dec must be generated + ts []reflect.Type // types for which enc/dec must be generated + + xs string // top level variable/constant suffix + hn string // fn helper type name + + ti *TypeInfos + // rr *rand.Rand // random generator for file-specific types + + nx bool // no extensions +} + +// Gen will write a complete go file containing Selfer implementations for each +// type passed. All the types must be in the same package. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINUOUSLY WITHOUT NOTICE. +func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool, + ti *TypeInfos, typ ...reflect.Type) { + // All types passed to this method do not have a codec.Selfer method implemented directly. + // codecgen already checks the AST and skips any types that define the codec.Selfer methods. + // Consequently, there's no need to check and trim them if they implement codec.Selfer + + if len(typ) == 0 { + return + } + x := genRunner{ + w: w, + t: typ, + te: make(map[uintptr]bool), + td: make(map[uintptr]bool), + im: make(map[string]reflect.Type), + imn: make(map[string]string), + is: make(map[reflect.Type]struct{}), + tm: make(map[reflect.Type]struct{}), + ts: []reflect.Type{}, + bp: genImportPath(typ[0]), + xs: uid, + ti: ti, + nx: noExtensions, + } + if x.ti == nil { + x.ti = defTypeInfos + } + if x.xs == "" { + rr := rand.New(rand.NewSource(time.Now().UnixNano())) + x.xs = strconv.FormatInt(rr.Int63n(9999), 10) + } + + // gather imports first: + x.cp = genImportPath(reflect.TypeOf(x)) + x.imn[x.cp] = genCodecPkg + for _, t := range typ { + // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + if genImportPath(t) != x.bp { + panic(errGenAllTypesSamePkg) + } + x.genRefPkgs(t) + } + if buildTags != "" { + x.line("// +build " + buildTags) + x.line("") + } + x.line(` + +// Code generated by codecgen - DO NOT EDIT. + +`) + x.line("package " + pkgName) + x.line("") + x.line("import (") + if x.cp != x.bp { + x.cpfx = genCodecPkg + "." + x.linef("%s \"%s\"", genCodecPkg, x.cp) + } + // use a sorted set of im keys, so that we can get consistent output + imKeys := make([]string, 0, len(x.im)) + for k := range x.im { + imKeys = append(imKeys, k) + } + sort.Strings(imKeys) + for _, k := range imKeys { // for k, _ := range x.im { + if k == x.imn[k] { + x.linef("\"%s\"", k) + } else { + x.linef("%s \"%s\"", x.imn[k], k) + } + } + // add required packages + for _, k := range [...]string{"runtime", "errors", "strconv"} { // "reflect", "fmt" + if _, ok := x.im[k]; !ok { + x.line("\"" + k + "\"") + } + } + x.line(")") + x.line("") + + x.line("const (") + x.linef("// ----- content types ----") + x.linef("codecSelferCcUTF8%s = %v", x.xs, int64(cUTF8)) + x.linef("codecSelferCcRAW%s = %v", x.xs, int64(cRAW)) + x.linef("// ----- value types used ----") + for _, vt := range [...]valueType{ + valueTypeArray, valueTypeMap, valueTypeString, + valueTypeInt, valueTypeUint, valueTypeFloat} { + x.linef("codecSelferValueType%s%s = %v", vt.String(), x.xs, int64(vt)) + } + + x.linef("codecSelferBitsize%s = uint8(32 << (^uint(0) >> 63))", x.xs) + x.line(")") + x.line("var (") + x.line("errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") + x.line(")") + x.line("") + + x.hn = "codecSelfer" + x.xs + x.line("type " + x.hn + " struct{}") + x.line("") + + x.varsfxreset() + x.line("func init() {") + x.linef("if %sGenVersion != %v {", x.cpfx, genVersion) + x.line("_, file, _, _ := runtime.Caller(0)") + x.outf(`panic("codecgen version mismatch: current: %v, need " + strconv.FormatInt(int64(%sGenVersion), 10) + ". Re-generate file: " + file)`, genVersion, x.cpfx) + // x.out(`panic(fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) + // x.linef(`%v, %sGenVersion, file))`, genVersion, x.cpfx) + x.linef("}") + x.line("if false { var _ byte = 0; // reference the types, but skip this branch at build/run time") + // x.line("_ = strconv.ParseInt") + var n int + // for k, t := range x.im { + for _, k := range imKeys { + t := x.im[k] + x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) + n++ + } + if n > 0 { + x.out("_") + for i := 1; i < n; i++ { + x.out(", _") + } + x.out(" = v0") + for i := 1; i < n; i++ { + x.outf(", v%v", i) + } + } + x.line("} ") // close if false + x.line("}") // close init + x.line("") + + // generate rest of type info + for _, t := range typ { + x.tc = t + x.selfer(true) + x.selfer(false) + } + + for _, t := range x.ts { + rtid := rt2id(t) + // generate enc functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(true) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.encListFallback("v", t) + case reflect.Map: + x.encMapFallback("v", t) + default: + panic(errGenExpectArrayOrMap) + } + x.line("}") + x.line("") + + // generate dec functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(false) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.decListFallback("v", rtid, t) + case reflect.Map: + x.decMapFallback("v", rtid, t) + default: + panic(errGenExpectArrayOrMap) + } + x.line("}") + x.line("") + } + + x.line("") +} + +func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { + // return varname != genTopLevelVarName && t != x.tc + // the only time we checkForSelfer is if we are not at the TOP of the generated code. + return varname != genTopLevelVarName +} + +func (x *genRunner) arr2str(t reflect.Type, s string) string { + if t.Kind() == reflect.Array { + return s + } + return "" +} + +func (x *genRunner) genRequiredMethodVars(encode bool) { + x.line("var h " + x.hn) + if encode { + x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") + } else { + x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") + } + x.line("_, _, _ = h, z, r") +} + +func (x *genRunner) genRefPkgs(t reflect.Type) { + if _, ok := x.is[t]; ok { + return + } + x.is[t] = struct{}{} + tpkg, tname := genImportPath(t), t.Name() + if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { + if _, ok := x.im[tpkg]; !ok { + x.im[tpkg] = t + if idx := strings.LastIndex(tpkg, "/"); idx < 0 { + x.imn[tpkg] = tpkg + } else { + x.imc++ + x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) + } + } + } + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: + x.genRefPkgs(t.Elem()) + case reflect.Map: + x.genRefPkgs(t.Elem()) + x.genRefPkgs(t.Key()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { + x.genRefPkgs(t.Field(i).Type) + } + } + } +} + +func (x *genRunner) varsfx() string { + x.c++ + return strconv.FormatUint(x.c, 10) +} + +func (x *genRunner) varsfxreset() { + x.c = 0 +} + +func (x *genRunner) out(s string) { + _, err := io.WriteString(x.w, s) + if err != nil { + panic(err) + } +} + +func (x *genRunner) outf(s string, params ...interface{}) { + _, err := fmt.Fprintf(x.w, s, params...) + if err != nil { + panic(err) + } +} + +func (x *genRunner) line(s string) { + x.out(s) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) linef(s string, params ...interface{}) { + x.outf(s, params...) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) genTypeName(t reflect.Type) (n string) { + // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() + + // if the type has a PkgPath, which doesn't match the current package, + // then include it. + // We cannot depend on t.String() because it includes current package, + // or t.PkgPath because it includes full import path, + // + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "*" + t = t.Elem() + } + if tn := t.Name(); tn != "" { + return ptrPfx + x.genTypeNamePrim(t) + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) + case reflect.Slice: + return ptrPfx + "[]" + x.genTypeName(t.Elem()) + case reflect.Array: + return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) + case reflect.Chan: + return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) + default: + if t == intfTyp { + return ptrPfx + "interface{}" + } else { + return ptrPfx + x.genTypeNamePrim(t) + } + } +} + +func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { + if t.Name() == "" { + return t.String() + } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { + return t.Name() + } else { + return x.imn[genImportPath(t)] + "." + t.Name() + // return t.String() // best way to get the package name inclusive + } +} + +func (x *genRunner) genZeroValueR(t reflect.Type) string { + // if t is a named type, w + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, + reflect.Slice, reflect.Map, reflect.Invalid: + return "nil" + case reflect.Bool: + return "false" + case reflect.String: + return `""` + case reflect.Struct, reflect.Array: + return x.genTypeName(t) + "{}" + default: // all numbers + return "0" + } +} + +func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { + return genMethodNameT(t, x.tc) +} + +func (x *genRunner) selfer(encode bool) { + t := x.tc + t0 := t + // always make decode use a pointer receiver, + // and structs/arrays always use a ptr receiver (encode|decode) + isptr := !encode || t.Kind() == reflect.Array || (t.Kind() == reflect.Struct && t != timeTyp) + x.varsfxreset() + + fnSigPfx := "func (" + genTopLevelVarName + " " + if isptr { + fnSigPfx += "*" + } + fnSigPfx += x.genTypeName(t) + x.out(fnSigPfx) + + if isptr { + t = reflect.PtrTo(t) + } + if encode { + x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") + x.genRequiredMethodVars(true) + x.encVar(genTopLevelVarName, t) + } else { + x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + // do not use decVar, as there is no need to check TryDecodeAsNil + // or way to elegantly handle that, and also setting it to a + // non-nil value doesn't affect the pointer passed. + // x.decVar(genTopLevelVarName, t, false) + x.dec(genTopLevelVarName, t0, true) + } + x.line("}") + x.line("") + + if encode || t0.Kind() != reflect.Struct { + return + } + + // write is containerMap + if genUseOneFunctionForDecStructMap { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated) + x.line("}") + x.line("") + } else { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix) + x.line("}") + x.line("") + + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak) + x.line("}") + x.line("") + } + + // write containerArray + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0) + x.line("}") + x.line("") + +} + +// used for chan, array, slice, map +func (x *genRunner) xtraSM(varname string, t reflect.Type, encode, isptr bool) { + var ptrPfx, addrPfx string + if isptr { + ptrPfx = "*" + } else { + addrPfx = "&" + } + if encode { + x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), ptrPfx, x.genTypeName(t), varname) + } else { + x.linef("h.dec%s((*%s)(%s%s), d)", x.genMethodNameT(t), x.genTypeName(t), addrPfx, varname) + } + x.registerXtraT(t) +} + +func (x *genRunner) registerXtraT(t reflect.Type) { + // recursively register the types + if _, ok := x.tm[t]; ok { + return + } + var tkey reflect.Type + switch t.Kind() { + case reflect.Chan, reflect.Slice, reflect.Array: + case reflect.Map: + tkey = t.Key() + default: + return + } + x.tm[t] = struct{}{} + x.ts = append(x.ts, t) + // check if this refers to any xtra types eg. a slice of array: add the array + x.registerXtraT(t.Elem()) + if tkey != nil { + x.registerXtraT(tkey) + } +} + +// encVar will encode a variable. +// The parameter, t, is the reflect.Type of the variable itself +func (x *genRunner) encVar(varname string, t reflect.Type) { + // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) + var checkNil bool + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: + checkNil = true + } + if checkNil { + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + } + + switch t.Kind() { + case reflect.Ptr: + telem := t.Elem() + tek := telem.Kind() + if tek == reflect.Array || (tek == reflect.Struct && telem != timeTyp) { + x.enc(varname, genNonPtr(t)) + break + } + i := x.varsfx() + x.line(genTempVarPfx + i + " := *" + varname) + x.enc(genTempVarPfx+i, genNonPtr(t)) + case reflect.Struct, reflect.Array: + if t == timeTyp { + x.enc(varname, t) + break + } + i := x.varsfx() + x.line(genTempVarPfx + i + " := &" + varname) + x.enc(genTempVarPfx+i, t) + default: + x.enc(varname, t) + } + + if checkNil { + x.line("}") + } + +} + +// enc will encode a variable (varname) of type t, where t represents T. +// if t is !time.Time and t is of kind reflect.Struct or reflect.Array, varname is of type *T +// (to prevent copying), +// else t is of type T +func (x *genRunner) enc(varname string, t reflect.Type) { + rtid := rt2id(t) + ti2 := x.ti.get(rtid, t) + // We call CodecEncodeSelf if one of the following are honored: + // - the type already implements Selfer, call that + // - the type has a Selfer implementation just created, use that + // - the type is in the list of the ones we will generate for, but it is not currently being generated + + mi := x.varsfx() + // tptr := reflect.PtrTo(t) + tk := t.Kind() + if x.checkForSelfer(t, varname) { + if tk == reflect.Array || (tk == reflect.Struct && rtid != timeTypId) { // varname is of type *T + // if tptr.Implements(selferTyp) || t.Implements(selferTyp) { + if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } else { // varname is of type T + if ti2.cs { // t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } else if ti2.csp { // tptr.Implements(selferTyp) { + x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) + x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) + return + } + } + + if _, ok := x.te[rtid]; ok { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.te[rtid] = true + rtidAdded = true + } + + // check if + // - type is time.Time, RawExt, Raw + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == timeTyp { + x.linef("} else if !z.EncBasicHandle().TimeNotBuiltin { r.EncodeTime(%s)", varname) + // return + } + if t == rawTyp { + x.linef("} else { z.EncRaw(%s)", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.EncodeRawExt(%s, e)", varname) + return + } + // only check for extensions if the type is named, and has a packagePath. + var arrayOrStruct = tk == reflect.Array || tk == reflect.Struct // meaning varname if of type *T + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) + x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.EncExtension(%s, %s) ", yy, varname, yy, varname, yy) + } + if arrayOrStruct { // varname is of type *T + if ti2.bm || ti2.bmp { // t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname) + } + if ti2.jm || ti2.jmp { // t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname) + } else if ti2.tm || ti2.tmp { // t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname) + } + } else { // varname is of type T + if ti2.bm { // t.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname) + } else if ti2.bmp { // tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(&%v) ", varname) + } + if ti2.jm { // t.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname) + } else if ti2.jmp { // tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", varname) + } else if ti2.tm { // t.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname) + } else if ti2.tmp { // tptr.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(&%v) ", varname) + } + } + x.line("} else {") + + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(int64(" + varname + "))") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(uint64(" + varname + "))") + case reflect.Float32: + x.line("r.EncodeFloat32(float32(" + varname + "))") + case reflect.Float64: + x.line("r.EncodeFloat64(float64(" + varname + "))") + case reflect.Bool: + x.line("r.EncodeBool(bool(" + varname + "))") + case reflect.String: + x.linef("if z.EncBasicHandle().StringToRaw { r.EncodeStringBytesRaw(z.BytesView(string(%s))) } else { r.EncodeStringEnc(codecSelferCcUTF8%s, string(%s)) }", varname, x.xs, varname) + case reflect.Chan: + x.xtraSM(varname, t, true, false) + // x.encListFallback(varname, rtid, t) + case reflect.Array: + x.xtraSM(varname, t, true, true) + case reflect.Slice: + // if nil, call dedicated function + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("r.EncodeStringBytesRaw([]byte(" + varname + "))") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, t, true, false) + // x.encListFallback(varname, rtid, t) + } + case reflect.Map: + // if nil, call dedicated function + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, t, true, false) + // x.encMapFallback(varname, rtid, t) + } + case reflect.Struct: + if !inlist { + delete(x.te, rtid) + x.line("z.EncFallback(" + varname + ")") + break + } + x.encStruct(varname, rtid, t) + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.EncFallback(" + varname + ")") + } +} + +func (x *genRunner) encZero(t reflect.Type) { + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(0)") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(0)") + case reflect.Float32: + x.line("r.EncodeFloat32(0)") + case reflect.Float64: + x.line("r.EncodeFloat64(0)") + case reflect.Bool: + x.line("r.EncodeBool(false)") + case reflect.String: + x.linef(`if z.EncBasicHandle().StringToRaw { r.EncodeStringBytesRaw([]byte{}) } else { r.EncodeStringEnc(codecSelferCcUTF8%s, "") }`, x.xs) + default: + x.line("r.EncodeNil()") + } +} + +func (x *genRunner) encOmitEmptyLine(t2 reflect.StructField, varname string, buf *genBuf) { + // smartly check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. + // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) + varname2 := varname + "." + t2.Name + switch t2.Type.Kind() { + case reflect.Struct: + rtid2 := rt2id(t2.Type) + ti2 := x.ti.get(rtid2, t2.Type) + // fmt.Printf(">>>> structfield: omitempty: type: %s, field: %s\n", t2.Type.Name(), t2.Name) + if ti2.rtid == timeTypId { + buf.s("!(").s(varname2).s(".IsZero())") + break + } + if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) { + buf.s("!(").s(varname2).s(".IsZero())") + break + } + if ti2.isFlag(typeInfoFlagComparable) { + buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) + break + } + // buf.s("(") + buf.s("false") + for i, n := 0, t2.Type.NumField(); i < n; i++ { + f := t2.Type.Field(i) + if f.PkgPath != "" { // unexported + continue + } + buf.s(" || ") + x.encOmitEmptyLine(f, varname2, buf) + } + //buf.s(")") + case reflect.Bool: + buf.s(varname2) + case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: + buf.s("len(").s(varname2).s(") != 0") + default: + buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) + } +} + +func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { + // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) + // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it + + // if t === type currently running selfer on, do for all + ti := x.ti.get(rtid, t) + i := x.varsfx() + sepVarname := genTempVarPfx + "sep" + i + numfieldsvar := genTempVarPfx + "q" + i + ti2arrayvar := genTempVarPfx + "r" + i + struct2arrvar := genTempVarPfx + "2arr" + i + + x.line(sepVarname + " := !z.EncBinary()") + x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) + x.linef("_, _ = %s, %s", sepVarname, struct2arrvar) + x.linef("const %s bool = %v // struct tag has 'toArray'", ti2arrayvar, ti.toArray) + + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + + // var nn int + // due to omitEmpty, we need to calculate the + // number of non-empty things we write out first. + // This is required as we need to pre-determine the size of the container, + // to support length-prefixing. + if ti.anyOmitEmpty { + x.linef("var %s = [%v]bool{ // should field at this index be written?", numfieldsvar, len(tisfi)) + + for j, si := range tisfi { + _ = j + if !si.omitEmpty() { + // x.linef("%s[%v] = true // %s", numfieldsvar, j, si.fieldName) + x.linef("true, // %s", si.fieldName) + // nn++ + continue + } + var t2 reflect.StructField + var omitline genBuf + { + t2typ := t + varname3 := varname + // go through the loop, record the t2 field explicitly, + // and gather the omit line if embedded in pointers. + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + // do not include actual field in the omit line. + // that is done subsequently (right after - below). + if uint8(ij+1) < si.nis && t2typ.Kind() == reflect.Ptr { + omitline.s(varname3).s(" != nil && ") + } + } + } + x.encOmitEmptyLine(t2, varname, &omitline) + x.linef("%s, // %s", omitline.v(), si.fieldName) + } + x.line("}") + x.linef("_ = %s", numfieldsvar) + } + // x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.linef("r.WriteArrayStart(%d)", len(tisfi)) + x.linef("} else {") // if not ti.toArray + if ti.anyOmitEmpty { + // nn = 0 + // x.linef("var %snn%s = %v", genTempVarPfx, i, nn) + x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) + x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i) + x.linef("%snn%s = %v", genTempVarPfx, i, 0) + } else { + x.linef("r.WriteMapStart(%d)", len(tisfi)) + } + x.line("}") // close if not StructToArray + + for j, si := range tisfi { + i := x.varsfx() + isNilVarName := genTempVarPfx + "n" + i + var labelUsed bool + var t2 reflect.StructField + { + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + if !labelUsed { + x.line("var " + isNilVarName + " bool") + } + x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") + x.line("goto LABEL" + i) + x.line("}") + labelUsed = true + // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") + } + } + // t2 = t.FieldByIndex(si.is) + } + if labelUsed { + x.line("LABEL" + i + ":") + } + // if the type of the field is a Selfer, or one of the ones + + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray + if labelUsed { + x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName) + } + x.line("r.WriteArrayElem()") + if si.omitEmpty() { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.encVar(varname+"."+t2.Name, t2.Type) + if si.omitEmpty() { + x.linef("} else {") + x.encZero(t2.Type) + x.linef("}") + } + if labelUsed { + x.line("}") + } + + x.linef("} else {") // if not ti.toArray + + if si.omitEmpty() { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.line("r.WriteMapElemKey()") + + // emulate EncStructFieldKey + switch ti.keyType { + case valueTypeInt: + x.linef("r.EncodeInt(z.M.Int(strconv.ParseInt(`%s`, 10, 64)))", si.encName) + case valueTypeUint: + x.linef("r.EncodeUint(z.M.Uint(strconv.ParseUint(`%s`, 10, 64)))", si.encName) + case valueTypeFloat: + x.linef("r.EncodeFloat64(z.M.Float(strconv.ParseFloat(`%s`, 64)))", si.encName) + default: // string + if si.encNameAsciiAlphaNum { + x.linef(`if z.IsJSONHandle() { z.WriteStr("\"%s\"") } else { `, si.encName) + } + x.linef("r.EncodeStringEnc(codecSelferCcUTF8%s, `%s`)", x.xs, si.encName) + if si.encNameAsciiAlphaNum { + x.linef("}") + } + } + // x.linef("r.EncStructFieldKey(codecSelferValueType%s%s, `%s`)", ti.keyType.String(), x.xs, si.encName) + x.line("r.WriteMapElemValue()") + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + x.encVar(varname+"."+t2.Name, t2.Type) + x.line("}") + } else { + x.encVar(varname+"."+t2.Name, t2.Type) + } + if si.omitEmpty() { + x.line("}") + } + x.linef("} ") // end if/else ti.toArray + } + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.line("r.WriteArrayEnd()") + x.line("} else {") + x.line("r.WriteMapEnd()") + x.line("}") + +} + +func (x *genRunner) encListFallback(varname string, t reflect.Type) { + elemBytes := t.Elem().Kind() == reflect.Uint8 + if t.AssignableTo(uint8SliceTyp) { + x.linef("r.EncodeStringBytesRaw([]byte(%s))", varname) + return + } + if t.Kind() == reflect.Array && elemBytes { + x.linef("r.EncodeStringBytesRaw(((*[%d]byte)(%s))[:])", t.Len(), varname) + return + } + i := x.varsfx() + if t.Kind() == reflect.Chan { + type ts struct { + Label, Chan, Slice, Sfx string + } + tm, err := template.New("").Parse(genEncChanTmpl) + if err != nil { + panic(err) + } + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + x.linef("var sch%s []%s", i, x.genTypeName(t.Elem())) + err = tm.Execute(x.w, &ts{"Lsch" + i, varname, "sch" + i, i}) + if err != nil { + panic(err) + } + // x.linef("%s = sch%s", varname, i) + if elemBytes { + x.linef("r.EncodeStringBytesRaw([]byte(%s))", "sch"+i) + x.line("}") + return + } + varname = "sch" + i + } + + x.line("r.WriteArrayStart(len(" + varname + "))") + x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) + x.line("r.WriteArrayElem()") + + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteArrayEnd()") + if t.Kind() == reflect.Chan { + x.line("}") + } +} + +func (x *genRunner) encMapFallback(varname string, t reflect.Type) { + // TODO: expand this to handle canonical. + i := x.varsfx() + x.line("r.WriteMapStart(len(" + varname + "))") + x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + x.line("r.WriteMapElemKey()") + x.encVar(genTempVarPfx+"k"+i, t.Key()) + x.line("r.WriteMapElemValue()") + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteMapEnd()") +} + +func (x *genRunner) decVarInitPtr(varname, nilvar string, t reflect.Type, si *structFieldInfo, + newbuf, nilbuf *genBuf) (t2 reflect.StructField) { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + t2kind := t2typ.Kind() + var nilbufed bool + if si != nil { + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + t2kind = t2typ.Kind() + if t2kind != reflect.Ptr { + continue + } + if newbuf != nil { + newbuf.f("if %s == nil { %s = new(%s) }\n", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + if nilbuf != nil { + if !nilbufed { + nilbuf.s("if true") + nilbufed = true + } + nilbuf.s(" && ").s(varname3).s(" != nil") + } + } + } + // if t2typ.Kind() == reflect.Ptr { + // varname3 = varname3 + t2.Name + // } + if nilbuf != nil { + if nilbufed { + nilbuf.s(" { ") + } + if nilvar != "" { + nilbuf.s(nilvar).s(" = true") + } else if tk := t2typ.Kind(); tk == reflect.Ptr { + if strings.IndexByte(varname3, '.') != -1 || strings.IndexByte(varname3, '[') != -1 { + nilbuf.s(varname3).s(" = nil") + } else { + nilbuf.s("*").s(varname3).s(" = ").s(x.genZeroValueR(t2typ.Elem())) + } + } else { + nilbuf.s(varname3).s(" = ").s(x.genZeroValueR(t2typ)) + } + if nilbufed { + nilbuf.s("}") + } + } + return t2 +} + +// decVar takes a variable called varname, of type t +func (x *genRunner) decVarMain(varname, rand string, t reflect.Type, checkNotNil bool) { + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + var varname2 string + if t.Kind() != reflect.Ptr { + if t.PkgPath() != "" || !x.decTryAssignPrimitive(varname, t, false) { + x.dec(varname, t, false) + } + } else { + if checkNotNil { + x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) + } + // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). + // There's a chance of a **T in here which is nil. + var ptrPfx string + for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { + ptrPfx += "*" + if checkNotNil { + x.linef("if %s%s == nil { %s%s = new(%s)}", + ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) + } + } + // Should we create temp var if a slice/map indexing? No. dec(...) can now handle it. + + if ptrPfx == "" { + x.dec(varname, t, true) + } else { + varname2 = genTempVarPfx + "z" + rand + x.line(varname2 + " := " + ptrPfx + varname) + x.dec(varname2, t, true) + } + } +} + +// decVar takes a variable called varname, of type t +func (x *genRunner) decVar(varname, nilvar string, t reflect.Type, canBeNil, checkNotNil bool) { + i := x.varsfx() + + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + + if !canBeNil { + canBeNil = genAnythingCanBeNil || !genIsImmutable(t) + } + + if canBeNil { + var buf genBuf + x.decVarInitPtr(varname, nilvar, t, nil, nil, &buf) + x.linef("if r.TryDecodeAsNil() { %s } else {", buf.buf) + } else { + x.line("// cannot be nil") + } + + x.decVarMain(varname, i, t, checkNotNil) + + if canBeNil { + x.line("} ") + } +} + +// dec will decode a variable (varname) of type t or ptrTo(t) if isptr==true. +// t is always a basetype (i.e. not of kind reflect.Ptr). +func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) { + // assumptions: + // - the varname is to a pointer already. No need to take address of it + // - t is always a baseType T (not a *T, etc). + rtid := rt2id(t) + ti2 := x.ti.get(rtid, t) + // tptr := reflect.PtrTo(t) + if x.checkForSelfer(t, varname) { + if ti2.cs || ti2.csp { // t.Implements(selferTyp) || tptr.Implements(selferTyp) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + if _, ok := x.td[rtid]; ok { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.td[rtid] = true + rtidAdded = true + } + + // check if + // - type is time.Time, Raw, RawExt + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + + mi := x.varsfx() + // x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) + // x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + var ptrPfx, addrPfx string + if isptr { + ptrPfx = "*" + } else { + addrPfx = "&" + } + if t == timeTyp { + x.linef("} else if !z.DecBasicHandle().TimeNotBuiltin { %s%v = r.DecodeTime()", ptrPfx, varname) + // return + } + if t == rawTyp { + x.linef("} else { %s%v = z.DecRaw()", ptrPfx, varname) + return + } + + if t == rawExtTyp { + x.linef("} else { r.DecodeExt(%s%v, 0, nil)", addrPfx, varname) + return + } + + // only check for extensions if the type is named, and has a packagePath. + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + // x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) + yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) + x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.DecExtension(%s, %s) ", yy, varname, yy, varname, yy) + } + + if ti2.bu || ti2.bup { // t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { + x.linef("} else if z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", addrPfx, varname) + } + if ti2.ju || ti2.jup { // t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { + x.linef("} else if !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", addrPfx, varname) + } else if ti2.tu || ti2.tup { // t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { + x.linef("} else if !z.DecBinary() { z.DecTextUnmarshal(%s%v)", addrPfx, varname) + } + + x.line("} else {") + + if x.decTryAssignPrimitive(varname, t, isptr) { + return + } + + switch t.Kind() { + case reflect.Array, reflect.Chan: + x.xtraSM(varname, t, false, isptr) + case reflect.Slice: + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.linef("%s%s = r.DecodeBytes(%s(%s[]byte)(%s), false)", + ptrPfx, varname, ptrPfx, ptrPfx, varname) + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) + } else { + x.xtraSM(varname, t, false, isptr) + // x.decListFallback(varname, rtid, false, t) + } + case reflect.Map: + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) + } else { + x.xtraSM(varname, t, false, isptr) + // x.decMapFallback(varname, rtid, t) + } + case reflect.Struct: + if inlist { + // no need to create temp variable if isptr, or x.F or x[F] + if isptr || strings.IndexByte(varname, '.') != -1 || strings.IndexByte(varname, '[') != -1 { + x.decStruct(varname, rtid, t) + } else { + varname2 := genTempVarPfx + "j" + mi + x.line(varname2 + " := &" + varname) + x.decStruct(varname2, rtid, t) + } + } else { + // delete(x.td, rtid) + x.line("z.DecFallback(" + addrPfx + varname + ", false)") + } + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.DecFallback(" + addrPfx + varname + ", true)") + } +} + +func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type, isptr bool) (done bool) { + // This should only be used for exact primitives (ie un-named types). + // Named types may be implementations of Selfer, Unmarshaler, etc. + // They should be handled by dec(...) + + var ptr string + if isptr { + ptr = "*" + } + switch t.Kind() { + case reflect.Int: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + case reflect.Int8: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 8))", ptr, varname, x.genTypeName(t)) + case reflect.Int16: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 16))", ptr, varname, x.genTypeName(t)) + case reflect.Int32: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 32))", ptr, varname, x.genTypeName(t)) + case reflect.Int64: + x.linef("%s%s = (%s)(r.DecodeInt64())", ptr, varname, x.genTypeName(t)) + + case reflect.Uint: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + case reflect.Uint8: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 8))", ptr, varname, x.genTypeName(t)) + case reflect.Uint16: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 16))", ptr, varname, x.genTypeName(t)) + case reflect.Uint32: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 32))", ptr, varname, x.genTypeName(t)) + case reflect.Uint64: + x.linef("%s%s = (%s)(r.DecodeUint64())", ptr, varname, x.genTypeName(t)) + case reflect.Uintptr: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + + case reflect.Float32: + x.linef("%s%s = (%s)(r.DecodeFloat32As64())", ptr, varname, x.genTypeName(t)) + case reflect.Float64: + x.linef("%s%s = (%s)(r.DecodeFloat64())", ptr, varname, x.genTypeName(t)) + + case reflect.Bool: + x.linef("%s%s = (%s)(r.DecodeBool())", ptr, varname, x.genTypeName(t)) + case reflect.String: + x.linef("%s%s = (%s)(r.DecodeString())", ptr, varname, x.genTypeName(t)) + default: + return false + } + return true +} + +func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)") + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.DecodeBytes( ((*[%d]byte)(%s))[:], true)", t.Len(), varname) + return + } + type tstruc struct { + TempVar string + Rand string + Varname string + CTyp string + Typ string + Immutable bool + Size int + } + telem := t.Elem() + ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} + + funcs := make(template.FuncMap) + + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, "", telem, false, true) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + funcs["zero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["isArray"] = func() bool { + return t.Kind() == reflect.Array + } + funcs["isSlice"] = func() bool { + return t.Kind() == reflect.Slice + } + funcs["isChan"] = func() bool { + return t.Kind() == reflect.Chan + } + tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { + type tstruc struct { + TempVar string + Sfx string + Rand string + Varname string + KTyp string + Typ string + Size int + } + telem := t.Elem() + tkey := t.Key() + ts := tstruc{ + genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), + x.genTypeName(telem), int(telem.Size() + tkey.Size()), + } + + funcs := make(template.FuncMap) + funcs["decElemZero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["decElemKindImmutable"] = func() bool { + return genIsImmutable(telem) + } + funcs["decElemKindPtr"] = func() bool { + return telem.Kind() == reflect.Ptr + } + funcs["decElemKindIntf"] = func() bool { + return telem.Kind() == reflect.Interface + } + funcs["decLineVarK"] = func(varname string) string { + x.decVar(varname, "", tkey, false, true) + return "" + } + funcs["decLineVar"] = func(varname, decodedNilVarname string) string { + x.decVar(varname, decodedNilVarname, telem, false, true) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + + tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { + ti := x.ti.get(rtid, t) + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + x.line("switch (" + kName + ") {") + var newbuf, nilbuf genBuf + for _, si := range tisfi { + x.line("case \"" + si.encName + "\":") + newbuf.reset() + nilbuf.reset() + t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) + x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf) + x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false) + x.line("}") + } + x.line("default:") + // pass the slice here, so that the string will not escape, and maybe save allocation + x.line("z.DecStructFieldNotFound(-1, " + kName + ")") + x.line("} // end switch " + kName) +} + +func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { + tpfx := genTempVarPfx + ti := x.ti.get(rtid, t) + i := x.varsfx() + kName := tpfx + "s" + i + + switch style { + case genStructMapStyleLenPrefix: + x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) + case genStructMapStyleCheckBreak: + x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) + default: // 0, otherwise. + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) + x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) + x.line("} else { if r.CheckBreak() { break }; }") + } + x.line("r.ReadMapElemKey()") + + // emulate decstructfieldkey + switch ti.keyType { + case valueTypeInt: + x.linef("%s := z.StringView(strconv.AppendInt(z.DecScratchArrayBuffer()[:0], r.DecodeInt64(), 10))", kName) + case valueTypeUint: + x.linef("%s := z.StringView(strconv.AppendUint(z.DecScratchArrayBuffer()[:0], r.DecodeUint64(), 10))", kName) + case valueTypeFloat: + x.linef("%s := z.StringView(strconv.AppendFloat(z.DecScratchArrayBuffer()[:0], r.DecodeFloat64(), 'f', -1, 64))", kName) + default: // string + x.linef("%s := z.StringView(r.DecodeStringAsBytes())", kName) + } + // x.linef("%s := z.StringView(r.DecStructFieldKey(codecSelferValueType%s%s, z.DecScratchArrayBuffer()))", kName, ti.keyType.String(), x.xs) + + x.line("r.ReadMapElemValue()") + x.decStructMapSwitch(kName, varname, rtid, t) + + x.line("} // end for " + tpfx + "j" + i) + x.line("r.ReadMapEnd()") +} + +func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { + tpfx := genTempVarPfx + i := x.varsfx() + ti := x.ti.get(rtid, t) + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + x.linef("var %sj%s int", tpfx, i) + x.linef("var %sb%s bool", tpfx, i) // break + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + var newbuf, nilbuf genBuf + for _, si := range tisfi { + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString) + x.line("r.ReadArrayElem()") + newbuf.reset() + nilbuf.reset() + t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) + x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf) + x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false) + x.line("}") + } + // read remaining values and throw away. + x.line("for {") + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { break }", tpfx, i) + x.line("r.ReadArrayElem()") + x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) + x.line("}") + x.line("r.ReadArrayEnd()") +} + +func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { + // varname MUST be a ptr, or a struct field or a slice element. + i := x.varsfx() + x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) + x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadMapEnd()") + if genUseOneFunctionForDecStructMap { + x.line("} else { ") + x.linef("%s.codecDecodeSelfFromMap(%sl%s, d)", varname, genTempVarPfx, i) + } else { + x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") + x.line(varname + ".codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") + x.line("} else {") + x.line(varname + ".codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") + } + x.line("}") + + // else if container is array + x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadArrayEnd()") + x.line("} else { ") + x.linef("%s.codecDecodeSelfFromArray(%sl%s, d)", varname, genTempVarPfx, i) + x.line("}") + // else panic + x.line("} else { ") + x.line("panic(errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + ")") + x.line("} ") +} + +// -------- + +func (x *genRunner) newGenV(t reflect.Type) (v genV) { + switch t.Kind() { + case reflect.Slice, reflect.Array: + te := t.Elem() + v.Elem = x.genTypeName(te) + v.Size = int(te.Size()) + case reflect.Map: + te, tk := t.Elem(), t.Key() + v.Elem = x.genTypeName(te) + v.MapKey = x.genTypeName(tk) + v.Size = int(te.Size() + tk.Size()) + default: + panic("unexpected type for newGenV. Requires map or slice type") + } + return +} + +func (x *genV) MethodNamePfx(prefix string, prim bool) string { + var name []byte + if prefix != "" { + name = append(name, prefix...) + } + if prim { + name = append(name, genTitleCaseName(x.Primitive)...) + } else { + if x.MapKey == "" { + name = append(name, "Slice"...) + } else { + name = append(name, "Map"...) + name = append(name, genTitleCaseName(x.MapKey)...) + } + name = append(name, genTitleCaseName(x.Elem)...) + } + return string(name) + +} + +// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. +// +// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, +// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. +// We strip it here. +func genImportPath(t reflect.Type) (s string) { + s = t.PkgPath() + s = genStripVendor(s) + return +} + +// A go identifier is (letter|_)[letter|number|_]* +func genGoIdentifier(s string, checkFirstChar bool) string { + b := make([]byte, 0, len(s)) + t := make([]byte, 4) + var n int + for i, r := range s { + if checkFirstChar && i == 0 && !unicode.IsLetter(r) { + b = append(b, '_') + } + // r must be unicode_letter, unicode_digit or _ + if unicode.IsLetter(r) || unicode.IsDigit(r) { + n = utf8.EncodeRune(t, r) + b = append(b, t[:n]...) + } else { + b = append(b, '_') + } + } + return string(b) +} + +func genNonPtr(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func genTitleCaseName(s string) string { + switch s { + case "interface{}", "interface {}": + return "Intf" + default: + return strings.ToUpper(s[0:1]) + s[1:] + } +} + +func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "Ptrto" + t = t.Elem() + } + tstr := t.String() + if tn := t.Name(); tn != "" { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + return ptrPfx + tn + } else { + if genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) + case reflect.Slice: + return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) + case reflect.Array: + return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) + case reflect.Chan: + var cx string + switch t.ChanDir() { + case reflect.SendDir: + cx = "ChanSend" + case reflect.RecvDir: + cx = "ChanRecv" + default: + cx = "Chan" + } + return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) + default: + if t == intfTyp { + return ptrPfx + "Interface" + } else { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + if t.Name() != "" { + return ptrPfx + t.Name() + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } else { + // best way to get the package name inclusive + // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) + if t.Name() != "" && genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + } +} + +// genCustomNameForType base64encodes the t.String() value in such a way +// that it can be used within a function name. +func genCustomTypeName(tstr string) string { + len2 := genBase64enc.EncodedLen(len(tstr)) + bufx := make([]byte, len2) + genBase64enc.Encode(bufx, []byte(tstr)) + for i := len2 - 1; i >= 0; i-- { + if bufx[i] == '=' { + len2-- + } else { + break + } + } + return string(bufx[:len2]) +} + +func genIsImmutable(t reflect.Type) (v bool) { + return isImmutableKind(t.Kind()) +} + +func genStripVendor(s string) string { + // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. + // if s contains /vendor/ OR startsWith vendor/, then return everything after it. + const vendorStart = "vendor/" + const vendorInline = "/vendor/" + if i := strings.LastIndex(s, vendorInline); i >= 0 { + s = s[i+len(vendorInline):] + } else if strings.HasPrefix(s, vendorStart) { + s = s[len(vendorStart):] + } + return s +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/helper.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/helper.go new file mode 100644 index 0000000000000..9c0ed16a93118 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/helper.go @@ -0,0 +1,2926 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +// Some shared ideas around encoding/decoding +// ------------------------------------------ +// +// If an interface{} is passed, we first do a type assertion to see if it is +// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. +// +// If we start with a reflect.Value, we are already in reflect.Value land and +// will try to grab the function for the underlying Type and directly call that function. +// This is more performant than calling reflect.Value.Interface(). +// +// This still helps us bypass many layers of reflection, and give best performance. +// +// Containers +// ------------ +// Containers in the stream are either associative arrays (key-value pairs) or +// regular arrays (indexed by incrementing integers). +// +// Some streams support indefinite-length containers, and use a breaking +// byte-sequence to denote that the container has come to an end. +// +// Some streams also are text-based, and use explicit separators to denote the +// end/beginning of different values. +// +// During encode, we use a high-level condition to determine how to iterate through +// the container. That decision is based on whether the container is text-based (with +// separators) or binary (without separators). If binary, we do not even call the +// encoding of separators. +// +// During decode, we use a different high-level condition to determine how to iterate +// through the containers. That decision is based on whether the stream contained +// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that +// it has to be binary, and we do not even try to read separators. +// +// Philosophy +// ------------ +// On decode, this codec will update containers appropriately: +// - If struct, update fields from stream into fields of struct. +// If field in stream not found in struct, handle appropriately (based on option). +// If a struct field has no corresponding value in the stream, leave it AS IS. +// If nil in stream, set value to nil/zero value. +// - If map, update map from stream. +// If the stream value is NIL, set the map to nil. +// - if slice, try to update up to length of array in stream. +// if container len is less than stream array length, +// and container cannot be expanded, handled (based on option). +// This means you can decode 4-element stream array into 1-element array. +// +// ------------------------------------ +// On encode, user can specify omitEmpty. This means that the value will be omitted +// if the zero value. The problem may occur during decode, where omitted values do not affect +// the value being decoded into. This means that if decoding into a struct with an +// int field with current value=5, and the field is omitted in the stream, then after +// decoding, the value will still be 5 (not 0). +// omitEmpty only works if you guarantee that you always decode into zero-values. +// +// ------------------------------------ +// We could have truncated a map to remove keys not available in the stream, +// or set values in the struct which are not in the stream to their zero values. +// We decided against it because there is no efficient way to do it. +// We may introduce it as an option later. +// However, that will require enabling it for both runtime and code generation modes. +// +// To support truncate, we need to do 2 passes over the container: +// map +// - first collect all keys (e.g. in k1) +// - for each key in stream, mark k1 that the key should not be removed +// - after updating map, do second pass and call delete for all keys in k1 which are not marked +// struct: +// - for each field, track the *typeInfo s1 +// - iterate through all s1, and for each one not marked, set value to zero +// - this involves checking the possible anonymous fields which are nil ptrs. +// too much work. +// +// ------------------------------------------ +// Error Handling is done within the library using panic. +// +// This way, the code doesn't have to keep checking if an error has happened, +// and we don't have to keep sending the error value along with each call +// or storing it in the En|Decoder and checking it constantly along the way. +// +// The disadvantage is that small functions which use panics cannot be inlined. +// The code accounts for that by only using panics behind an interface; +// since interface calls cannot be inlined, this is irrelevant. +// +// We considered storing the error is En|Decoder. +// - once it has its err field set, it cannot be used again. +// - panicing will be optional, controlled by const flag. +// - code should always check error first and return early. +// We eventually decided against it as it makes the code clumsier to always +// check for these error conditions. + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +const ( + scratchByteArrayLen = 32 + // initCollectionCap = 16 // 32 is defensive. 16 is preferred. + + // Support encoding.(Binary|Text)(Unm|M)arshaler. + // This constant flag will enable or disable it. + supportMarshalInterfaces = true + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true + + // arrayCacheLen is the length of the cache used in encoder or decoder for + // allowing zero-alloc initialization. + // arrayCacheLen = 8 + + // size of the cacheline: defaulting to value for archs: amd64, arm64, 386 + // should use "runtime/internal/sys".CacheLineSize, but that is not exposed. + cacheLineSize = 64 + + wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize + wordSize = wordSizeBits / 8 + + // so structFieldInfo fits into 8 bytes + maxLevelsEmbedding = 14 + + // useFinalizers=true configures finalizers to release pool'ed resources + // acquired by Encoder/Decoder during their GC. + // + // Note that calling SetFinalizer is always expensive, + // as code must be run on the systemstack even for SetFinalizer(t, nil). + // + // We document that folks SHOULD call Release() when done, or they can + // explicitly call SetFinalizer themselves e.g. + // runtime.SetFinalizer(e, (*Encoder).Release) + // runtime.SetFinalizer(d, (*Decoder).Release) + useFinalizers = false +) + +var oneByteArr [1]byte +var zeroByteSlice = oneByteArr[:0:0] + +var codecgen bool + +var refBitset bitset256 +var pool pooler +var panicv panicHdl + +func init() { + pool.init() + + refBitset.set(byte(reflect.Map)) + refBitset.set(byte(reflect.Ptr)) + refBitset.set(byte(reflect.Func)) + refBitset.set(byte(reflect.Chan)) +} + +type clsErr struct { + closed bool // is it closed? + errClosed error // error on closing +} + +// type entryType uint8 + +// const ( +// entryTypeBytes entryType = iota // make this 0, so a comparison is cheap +// entryTypeIo +// entryTypeBufio +// entryTypeUnset = 255 +// ) + +type charEncoding uint8 + +const ( + _ charEncoding = iota // make 0 unset + cUTF8 + cUTF16LE + cUTF16BE + cUTF32LE + cUTF32BE + // Deprecated: not a true char encoding value + cRAW charEncoding = 255 +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTime + valueTypeExt + + // valueTypeInvalid = 0xff +) + +var valueTypeStrings = [...]string{ + "Unset", + "Nil", + "Int", + "Uint", + "Float", + "Bool", + "String", + "Symbol", + "Bytes", + "Map", + "Array", + "Timestamp", + "Ext", +} + +func (x valueType) String() string { + if int(x) < len(valueTypeStrings) { + return valueTypeStrings[x] + } + return strconv.FormatInt(int64(x), 10) +} + +type seqType uint8 + +const ( + _ seqType = iota + seqTypeArray + seqTypeSlice + seqTypeChan +) + +// note that containerMapStart and containerArraySend are not sent. +// This is because the ReadXXXStart and EncodeXXXStart already does these. +type containerState uint8 + +const ( + _ containerState = iota + + containerMapStart // slot left open, since Driver method already covers it + containerMapKey + containerMapValue + containerMapEnd + containerArrayStart // slot left open, since Driver methods already cover it + containerArrayElem + containerArrayEnd +) + +// // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo +// type sfiIdx struct { +// name string +// index int +// } + +// do not recurse if a containing type refers to an embedded type +// which refers back to its containing type (via a pointer). +// The second time this back-reference happens, break out, +// so as not to cause an infinite loop. +const rgetMaxRecursion = 2 + +// Anecdotally, we believe most types have <= 12 fields. +// - even Java's PMD rules set TooManyFields threshold to 15. +// However, go has embedded fields, which should be regarded as +// top level, allowing structs to possibly double or triple. +// In addition, we don't want to keep creating transient arrays, +// especially for the sfi index tracking, and the evtypes tracking. +// +// So - try to keep typeInfoLoadArray within 2K bytes +const ( + typeInfoLoadArraySfisLen = 16 + typeInfoLoadArraySfiidxLen = 8 * 112 + typeInfoLoadArrayEtypesLen = 12 + typeInfoLoadArrayBLen = 8 * 4 +) + +type typeInfoLoad struct { + // fNames []string + // encNames []string + etypes []uintptr + sfis []structFieldInfo +} + +type typeInfoLoadArray struct { + // fNames [typeInfoLoadArrayLen]string + // encNames [typeInfoLoadArrayLen]string + sfis [typeInfoLoadArraySfisLen]structFieldInfo + sfiidx [typeInfoLoadArraySfiidxLen]byte + etypes [typeInfoLoadArrayEtypesLen]uintptr + b [typeInfoLoadArrayBLen]byte // scratch - used for struct field names +} + +// mirror json.Marshaler and json.Unmarshaler here, +// so we don't import the encoding/json package + +type jsonMarshaler interface { + MarshalJSON() ([]byte, error) +} +type jsonUnmarshaler interface { + UnmarshalJSON([]byte) error +} + +type isZeroer interface { + IsZero() bool +} + +type codecError struct { + name string + err interface{} +} + +func (e codecError) Cause() error { + switch xerr := e.err.(type) { + case nil: + return nil + case error: + return xerr + case string: + return errors.New(xerr) + case fmt.Stringer: + return errors.New(xerr.String()) + default: + return fmt.Errorf("%v", e.err) + } +} + +func (e codecError) Error() string { + return fmt.Sprintf("%s error: %v", e.name, e.err) +} + +// type byteAccepter func(byte) bool + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem() + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + rawTyp = reflect.TypeOf(Raw{}) + uintptrTyp = reflect.TypeOf(uintptr(0)) + uint8Typ = reflect.TypeOf(uint8(0)) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + uintTyp = reflect.TypeOf(uint(0)) + intTyp = reflect.TypeOf(int(0)) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + + binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + + textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + + jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() + jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() + + selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() + missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem() + iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem() + + uint8TypId = rt2id(uint8Typ) + uint8SliceTypId = rt2id(uint8SliceTyp) + rawExtTypId = rt2id(rawExtTyp) + rawTypId = rt2id(rawTyp) + intfTypId = rt2id(intfTyp) + timeTypId = rt2id(timeTyp) + stringTypId = rt2id(stringTyp) + + mapStrIntfTypId = rt2id(mapStrIntfTyp) + mapIntfIntfTypId = rt2id(mapIntfIntfTyp) + intfSliceTypId = rt2id(intfSliceTyp) + // mapBySliceTypId = rt2id(mapBySliceTyp) + + intBitsize = uint8(intTyp.Bits()) + uintBitsize = uint8(uintTyp.Bits()) + + // bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + chkOvf checkOverflow + + errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo") +) + +var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) + +var immutableKindsSet = [32]bool{ + // reflect.Invalid: , + reflect.Bool: true, + reflect.Int: true, + reflect.Int8: true, + reflect.Int16: true, + reflect.Int32: true, + reflect.Int64: true, + reflect.Uint: true, + reflect.Uint8: true, + reflect.Uint16: true, + reflect.Uint32: true, + reflect.Uint64: true, + reflect.Uintptr: true, + reflect.Float32: true, + reflect.Float64: true, + reflect.Complex64: true, + reflect.Complex128: true, + // reflect.Array + // reflect.Chan + // reflect.Func: true, + // reflect.Interface + // reflect.Map + // reflect.Ptr + // reflect.Slice + reflect.String: true, + reflect.Struct: true, + // reflect.UnsafePointer +} + +// Selfer defines methods by which a value can encode or decode itself. +// +// Any type which implements Selfer will be able to encode or decode itself. +// Consequently, during (en|de)code, this takes precedence over +// (text|binary)(M|Unm)arshal or extension support. +// +// By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself. +// If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error. +// For example, the snippet below will cause such an error. +// +// type testSelferRecur struct{} +// func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) } +// func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) } +// +// Note: *the first set of bytes of any value MUST NOT represent nil in the format*. +// This is because, during each decode, we first check the the next set of bytes +// represent nil, and if so, we just set the value to nil. +type Selfer interface { + CodecEncodeSelf(*Encoder) + CodecDecodeSelf(*Decoder) +} + +// MissingFielder defines the interface allowing structs to internally decode or encode +// values which do not map to struct fields. +// +// We expect that this interface is bound to a pointer type (so the mutation function works). +// +// A use-case is if a version of a type unexports a field, but you want compatibility between +// both versions during encoding and decoding. +// +// Note that the interface is completely ignored during codecgen. +type MissingFielder interface { + // CodecMissingField is called to set a missing field and value pair. + // + // It returns true if the missing field was set on the struct. + CodecMissingField(field []byte, value interface{}) bool + + // CodecMissingFields returns the set of fields which are not struct fields + CodecMissingFields() map[string]interface{} +} + +// MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream. +// The slice contains a sequence of key-value pairs. +// This affords storing a map in a specific sequence in the stream. +// +// Example usage: +// +// type T1 []string // or []int or []Point or any other "slice" type +// func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map +// type T2 struct { KeyValues T1 } +// +// var kvs = []string{"one", "1", "two", "2", "three", "3"} +// var v2 = T2{ KeyValues: T1(kvs) } +// // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} } +// +// The support of MapBySlice affords the following: +// - A slice type which implements MapBySlice will be encoded as a map +// - A slice can be decoded from a map in the stream +// - It MUST be a slice type (not a pointer receiver) that implements MapBySlice +type MapBySlice interface { + MapBySlice() +} + +// BasicHandle encapsulates the common options and extension functions. +// +// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +type BasicHandle struct { + // BasicHandle is always a part of a different type. + // It doesn't have to fit into it own cache lines. + + // TypeInfos is used to get the type info for any type. + // + // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json + TypeInfos *TypeInfos + + // Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls). + // If *[]T is used instead, this becomes comparable, at the cost of extra indirection. + // Thses slices are used all the time, so keep as slices (not pointers). + + extHandle + + intf2impls + + inited uint32 + _ uint32 // padding + + // ---- cache line + + RPCOptions + + // TimeNotBuiltin configures whether time.Time should be treated as a builtin type. + // + // All Handlers should know how to encode/decode time.Time as part of the core + // format specification, or as a standard extension defined by the format. + // + // However, users can elect to handle time.Time as a custom extension, or via the + // standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface. + // To elect this behavior, users can set TimeNotBuiltin=true. + // Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior + // (for Cbor and Msgpack), where time.Time was not a builtin supported type. + TimeNotBuiltin bool + + // ExplicitRelease configures whether Release() is implicitly called after an encode or + // decode call. + // + // If you will hold onto an Encoder or Decoder for re-use, by calling Reset(...) + // on it or calling (Must)Encode repeatedly into a given []byte or io.Writer, + // then you do not want it to be implicitly closed after each Encode/Decode call. + // Doing so will unnecessarily return resources to the shared pool, only for you to + // grab them right after again to do another Encode/Decode call. + // + // Instead, you configure ExplicitRelease=true, and you explicitly call Release() when + // you are truly done. + // + // As an alternative, you can explicitly set a finalizer - so its resources + // are returned to the shared pool before it is garbage-collected. Do it as below: + // runtime.SetFinalizer(e, (*Encoder).Release) + // runtime.SetFinalizer(d, (*Decoder).Release) + ExplicitRelease bool + + be bool // is handle a binary encoding? + js bool // is handle javascript handler? + n byte // first letter of handle name + _ uint16 // padding + + // ---- cache line + + DecodeOptions + + // ---- cache line + + EncodeOptions + + // noBuiltInTypeChecker + + rtidFns atomicRtidFnSlice + mu sync.Mutex + // r []uintptr // rtids mapped to s above +} + +// basicHandle returns an initialized BasicHandle from the Handle. +func basicHandle(hh Handle) (x *BasicHandle) { + x = hh.getBasicHandle() + // ** We need to simulate once.Do, to ensure no data race within the block. + // ** Consequently, below would not work. + // if atomic.CompareAndSwapUint32(&x.inited, 0, 1) { + // x.be = hh.isBinary() + // _, x.js = hh.(*JsonHandle) + // x.n = hh.Name()[0] + // } + + // simulate once.Do using our own stored flag and mutex as a CompareAndSwap + // is not sufficient, since a race condition can occur within init(Handle) function. + // init is made noinline, so that this function can be inlined by its caller. + if atomic.LoadUint32(&x.inited) == 0 { + x.init(hh) + } + return +} + +//go:noinline +func (x *BasicHandle) init(hh Handle) { + // make it uninlineable, as it is called at most once + x.mu.Lock() + if x.inited == 0 { + x.be = hh.isBinary() + _, x.js = hh.(*JsonHandle) + x.n = hh.Name()[0] + atomic.StoreUint32(&x.inited, 1) + } + x.mu.Unlock() +} + +func (x *BasicHandle) getBasicHandle() *BasicHandle { + return x +} + +func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + if x.TypeInfos == nil { + return defTypeInfos.get(rtid, rt) + } + return x.TypeInfos.get(rtid, rt) +} + +func findFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) { + // binary search. adapted from sort/search.go. + // Note: we use goto (instead of for loop) so this can be inlined. + + // h, i, j := 0, 0, len(s) + var h uint // var h, i uint + var j = uint(len(s)) +LOOP: + if i < j { + h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + fn = s[i].fn + } + return +} + +func (x *BasicHandle) fn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) { + rtid := rt2id(rt) + sp := x.rtidFns.load() + if sp != nil { + if _, fn = findFn(sp, rtid); fn != nil { + // xdebugf("<<<< %c: found fn for %v in rtidfns of size: %v", c.n, rt, len(sp)) + return + } + } + c := x + // xdebugf("#### for %c: load fn for %v in rtidfns of size: %v", c.n, rt, len(sp)) + fn = new(codecFn) + fi := &(fn.i) + ti := c.getTypeInfo(rtid, rt) + fi.ti = ti + + rk := reflect.Kind(ti.kind) + + if checkCodecSelfer && (ti.cs || ti.csp) { + fn.fe = (*Encoder).selferMarshal + fn.fd = (*Decoder).selferUnmarshal + fi.addrF = true + fi.addrD = ti.csp + fi.addrE = ti.csp + } else if rtid == timeTypId && !c.TimeNotBuiltin { + fn.fe = (*Encoder).kTime + fn.fd = (*Decoder).kTime + } else if rtid == rawTypId { + fn.fe = (*Encoder).raw + fn.fd = (*Decoder).raw + } else if rtid == rawExtTypId { + fn.fe = (*Encoder).rawExt + fn.fd = (*Decoder).rawExt + fi.addrF = true + fi.addrD = true + fi.addrE = true + } else if xfFn := c.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*Encoder).ext + fn.fd = (*Decoder).ext + fi.addrF = true + fi.addrD = true + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) { + fn.fe = (*Encoder).binaryMarshal + fn.fd = (*Decoder).binaryUnmarshal + fi.addrF = true + fi.addrD = ti.bup + fi.addrE = ti.bmp + } else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) { + //If JSON, we should check JSONMarshal before textMarshal + fn.fe = (*Encoder).jsonMarshal + fn.fd = (*Decoder).jsonUnmarshal + fi.addrF = true + fi.addrD = ti.jup + fi.addrE = ti.jmp + } else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) { + fn.fe = (*Encoder).textMarshal + fn.fd = (*Decoder).textUnmarshal + fi.addrF = true + fi.addrD = ti.tup + fi.addrE = ti.tmp + } else { + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if ti.pkgpath == "" { // un-named slice or map + if idx := fastpathAV.index(rtid); idx != -1 { + fn.fe = fastpathAV[idx].encfn + fn.fd = fastpathAV[idx].decfn + fi.addrD = true + fi.addrF = false + } + } else { + // use mapping for underlying type if there + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(ti.key, ti.elem) + } else { + rtu = reflect.SliceOf(ti.elem) + } + rtuid := rt2id(rtu) + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].encfn + xrt := fastpathAV[idx].rt + fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) { + xfnf(e, xf, xrv.Convert(xrt)) + } + fi.addrD = true + fi.addrF = false // meaning it can be an address(ptr) or a value + xfnf2 := fastpathAV[idx].decfn + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt))) + } else { + xfnf2(d, xf, xrv.Convert(xrt)) + } + } + } + } + } + if fn.fe == nil && fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fe = (*Encoder).kBool + fn.fd = (*Decoder).kBool + case reflect.String: + fn.fe = (*Encoder).kString + fn.fd = (*Decoder).kString + case reflect.Int: + fn.fd = (*Decoder).kInt + fn.fe = (*Encoder).kInt + case reflect.Int8: + fn.fe = (*Encoder).kInt8 + fn.fd = (*Decoder).kInt8 + case reflect.Int16: + fn.fe = (*Encoder).kInt16 + fn.fd = (*Decoder).kInt16 + case reflect.Int32: + fn.fe = (*Encoder).kInt32 + fn.fd = (*Decoder).kInt32 + case reflect.Int64: + fn.fe = (*Encoder).kInt64 + fn.fd = (*Decoder).kInt64 + case reflect.Uint: + fn.fd = (*Decoder).kUint + fn.fe = (*Encoder).kUint + case reflect.Uint8: + fn.fe = (*Encoder).kUint8 + fn.fd = (*Decoder).kUint8 + case reflect.Uint16: + fn.fe = (*Encoder).kUint16 + fn.fd = (*Decoder).kUint16 + case reflect.Uint32: + fn.fe = (*Encoder).kUint32 + fn.fd = (*Decoder).kUint32 + case reflect.Uint64: + fn.fe = (*Encoder).kUint64 + fn.fd = (*Decoder).kUint64 + case reflect.Uintptr: + fn.fe = (*Encoder).kUintptr + fn.fd = (*Decoder).kUintptr + case reflect.Float32: + fn.fe = (*Encoder).kFloat32 + fn.fd = (*Decoder).kFloat32 + case reflect.Float64: + fn.fe = (*Encoder).kFloat64 + fn.fd = (*Decoder).kFloat64 + case reflect.Invalid: + fn.fe = (*Encoder).kInvalid + fn.fd = (*Decoder).kErr + case reflect.Chan: + fi.seq = seqTypeChan + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.fe = (*Encoder).kSlice + fi.addrF = false + fi.addrD = false + rt2 := reflect.SliceOf(ti.elem) + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + d.h.fn(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len())) + } + // fn.fd = (*Decoder).kArray + case reflect.Struct: + if ti.anyOmitEmpty || ti.mf || ti.mfp { + fn.fe = (*Encoder).kStruct + } else { + fn.fe = (*Encoder).kStructNoOmitempty + } + fn.fd = (*Decoder).kStruct + case reflect.Map: + fn.fe = (*Encoder).kMap + fn.fd = (*Decoder).kMap + case reflect.Interface: + // encode: reflect.Interface are handled already by preEncodeValue + fn.fd = (*Decoder).kInterface + fn.fe = (*Encoder).kErr + default: + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue + fn.fe = (*Encoder).kErr + fn.fd = (*Decoder).kErr + } + } + } + + c.mu.Lock() + var sp2 []codecRtidFn + sp = c.rtidFns.load() + if sp == nil { + sp2 = []codecRtidFn{{rtid, fn}} + c.rtidFns.store(sp2) + // xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2)) + // xdebugf(">>>> loading stored rtidfns of size: %v", len(c.rtidFns.load())) + } else { + idx, fn2 := findFn(sp, rtid) + if fn2 == nil { + sp2 = make([]codecRtidFn, len(sp)+1) + copy(sp2, sp[:idx]) + copy(sp2[idx+1:], sp[idx:]) + sp2[idx] = codecRtidFn{rtid, fn} + c.rtidFns.store(sp2) + // xdebugf(">>>> adding rt: %v to rtidfns of size: %v", rt, len(sp2)) + + } + } + c.mu.Unlock() + return +} + +// Handle defines a specific encoding format. It also stores any runtime state +// used during an Encoding or Decoding session e.g. stored state about Types, etc. +// +// Once a handle is configured, it can be shared across multiple Encoders and Decoders. +// +// Note that a Handle is NOT safe for concurrent modification. +// Consequently, do not modify it after it is configured if shared among +// multiple Encoders and Decoders in different goroutines. +// +// Consequently, the typical usage model is that a Handle is pre-configured +// before first time use, and not modified while in use. +// Such a pre-configured Handle is safe for concurrent access. +type Handle interface { + Name() string + // return the basic handle. It may not have been inited. + // Prefer to use basicHandle() helper function that ensures it has been inited. + getBasicHandle() *BasicHandle + recreateEncDriver(encDriver) bool + newEncDriver(w *Encoder) encDriver + newDecDriver(r *Decoder) decDriver + isBinary() bool + hasElemSeparators() bool + // IsBuiltinType(rtid uintptr) bool +} + +// Raw represents raw formatted bytes. +// We "blindly" store it during encode and retrieve the raw bytes during decode. +// Note: it is dangerous during encode, so we may gate the behaviour +// behind an Encode flag which must be explicitly set. +type Raw []byte + +// RawExt represents raw unprocessed extension data. +// Some codecs will decode extension data as a *RawExt +// if there is no registered extension for the tag. +// +// Only one of Data or Value is nil. +// If Data is nil, then the content of the RawExt is in the Value. +type RawExt struct { + Tag uint64 + // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value. + // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types + Data []byte + // Value represents the extension, if Data is nil. + // Value is used by codecs (e.g. cbor, json) which leverage the format to do + // custom serialization of the types. + Value interface{} +} + +// BytesExt handles custom (de)serialization of types to/from []byte. +// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. +type BytesExt interface { + // WriteExt converts a value to a []byte. + // + // Note: v is a pointer iff the registered extension type is a struct or array kind. + WriteExt(v interface{}) []byte + + // ReadExt updates a value from a []byte. + // + // Note: dst is always a pointer kind to the registered extension type. + ReadExt(dst interface{}, src []byte) +} + +// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. +// The Encoder or Decoder will then handle the further (de)serialization of that known type. +// +// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types. +type InterfaceExt interface { + // ConvertExt converts a value into a simpler interface for easy encoding + // e.g. convert time.Time to int64. + // + // Note: v is a pointer iff the registered extension type is a struct or array kind. + ConvertExt(v interface{}) interface{} + + // UpdateExt updates a value from a simpler interface for easy decoding + // e.g. convert int64 to time.Time. + // + // Note: dst is always a pointer kind to the registered extension type. + UpdateExt(dst interface{}, src interface{}) +} + +// Ext handles custom (de)serialization of custom types / extensions. +type Ext interface { + BytesExt + InterfaceExt +} + +// addExtWrapper is a wrapper implementation to support former AddExt exported method. +type addExtWrapper struct { + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +func (x addExtWrapper) WriteExt(v interface{}) []byte { + bs, err := x.encFn(reflect.ValueOf(v)) + if err != nil { + panic(err) + } + return bs +} + +func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { + if err := x.decFn(reflect.ValueOf(v), bs); err != nil { + panic(err) + } +} + +func (x addExtWrapper) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) +} + +func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { + x.ReadExt(dest, v.([]byte)) +} + +type extWrapper struct { + BytesExt + InterfaceExt +} + +type bytesExtFailer struct{} + +func (bytesExtFailer) WriteExt(v interface{}) []byte { + panicv.errorstr("BytesExt.WriteExt is not supported") + return nil +} +func (bytesExtFailer) ReadExt(v interface{}, bs []byte) { + panicv.errorstr("BytesExt.ReadExt is not supported") +} + +type interfaceExtFailer struct{} + +func (interfaceExtFailer) ConvertExt(v interface{}) interface{} { + panicv.errorstr("InterfaceExt.ConvertExt is not supported") + return nil +} +func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) { + panicv.errorstr("InterfaceExt.UpdateExt is not supported") +} + +type binaryEncodingType struct{} + +func (binaryEncodingType) isBinary() bool { return true } + +type textEncodingType struct{} + +func (textEncodingType) isBinary() bool { return false } + +// noBuiltInTypes is embedded into many types which do not support builtins +// e.g. msgpack, simple, cbor. + +// type noBuiltInTypeChecker struct{} +// func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false } +// type noBuiltInTypes struct{ noBuiltInTypeChecker } + +type noBuiltInTypes struct{} + +func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} +func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} + +// type noStreamingCodec struct{} +// func (noStreamingCodec) CheckBreak() bool { return false } +// func (noStreamingCodec) hasElemSeparators() bool { return false } + +type noElemSeparators struct{} + +func (noElemSeparators) hasElemSeparators() (v bool) { return } +func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return } + +// bigenHelper. +// Users must already slice the x completely, because we will not reslice. +type bigenHelper struct { + x []byte // must be correctly sliced to appropriate len. slicing is a cost. + w *encWriterSwitch +} + +func (z bigenHelper) writeUint16(v uint16) { + bigen.PutUint16(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint32(v uint32) { + bigen.PutUint32(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint64(v uint64) { + bigen.PutUint64(z.x, v) + z.w.writeb(z.x) +} + +type extTypeTagFn struct { + rtid uintptr + rtidptr uintptr + rt reflect.Type + tag uint64 + ext Ext + _padding [1]uint64 // padding +} + +type extHandle []extTypeTagFn + +// AddExt registes an encode and decode function for a reflect.Type. +// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. +// +// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. +func (o *extHandle) AddExt(rt reflect.Type, tag byte, + encfn func(reflect.Value) ([]byte, error), + decfn func(reflect.Value, []byte) error) (err error) { + if encfn == nil || decfn == nil { + return o.SetExt(rt, uint64(tag), nil) + } + return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) +} + +// SetExt will set the extension for a tag and reflect.Type. +// Note that the type must be a named type, and specifically not a pointer or Interface. +// An error is returned if that is not honored. +// To Deregister an ext, call SetExt with nil Ext. +// +// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. +func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { + // o is a pointer, because we may need to initialize it + rk := rt.Kind() + for rk == reflect.Ptr { + rt = rt.Elem() + rk = rt.Kind() + } + + if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr { + return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt) + } + + rtid := rt2id(rt) + switch rtid { + case timeTypId, rawTypId, rawExtTypId: + // all natively supported type, so cannot have an extension + return // TODO: should we silently ignore, or return an error??? + } + o2 := *o + for i := range o2 { + v := &o2[i] + if v.rtid == rtid { + v.tag, v.ext = tag, ext + return + } + } + rtidptr := rt2id(reflect.PtrTo(rt)) + *o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext, [1]uint64{}}) + return +} + +func (o extHandle) getExt(rtid uintptr) (v *extTypeTagFn) { + for i := range o { + v = &o[i] + if v.rtid == rtid || v.rtidptr == rtid { + return + } + } + return nil +} + +func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) { + for i := range o { + v = &o[i] + if v.tag == tag { + return + } + } + return nil +} + +type intf2impl struct { + rtid uintptr // for intf + impl reflect.Type + // _ [1]uint64 // padding // not-needed, as *intf2impl is never returned. +} + +type intf2impls []intf2impl + +// Intf2Impl maps an interface to an implementing type. +// This allows us support infering the concrete type +// and populating it when passed an interface. +// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc. +// +// Passing a nil impl will clear the mapping. +func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) { + if impl != nil && !impl.Implements(intf) { + return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf) + } + rtid := rt2id(intf) + o2 := *o + for i := range o2 { + v := &o2[i] + if v.rtid == rtid { + v.impl = impl + return + } + } + *o = append(o2, intf2impl{rtid, impl}) + return +} + +func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) { + for i := range o { + v := &o[i] + if v.rtid == rtid { + if v.impl == nil { + return + } + if v.impl.Kind() == reflect.Ptr { + return reflect.New(v.impl.Elem()) + } + return reflect.New(v.impl).Elem() + } + } + return +} + +type structFieldInfoFlag uint8 + +const ( + _ structFieldInfoFlag = 1 << iota + structFieldInfoFlagReady + structFieldInfoFlagOmitEmpty +) + +func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) { + *x = *x | f +} + +func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) { + *x = *x &^ f +} + +func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool { + return x&f != 0 +} + +func (x structFieldInfoFlag) omitEmpty() bool { + return x.flagGet(structFieldInfoFlagOmitEmpty) +} + +func (x structFieldInfoFlag) ready() bool { + return x.flagGet(structFieldInfoFlagReady) +} + +type structFieldInfo struct { + encName string // encode name + fieldName string // field name + + is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct + nis uint8 // num levels of embedding. if 1, then it's not embedded. + + encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers + structFieldInfoFlag + _ [1]byte // padding +} + +func (si *structFieldInfo) setToZeroValue(v reflect.Value) { + if v, valid := si.field(v, false); valid { + v.Set(reflect.Zero(v.Type())) + } +} + +// rv returns the field of the struct. +// If anonymous, it returns an Invalid +func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) { + // replicate FieldByIndex + for i, x := range si.is { + if uint8(i) == si.nis { + break + } + if v, valid = baseStructRv(v, update); !valid { + return + } + v = v.Field(int(x)) + } + + return v, true +} + +// func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value { +// v, _ = si.field(v, update) +// return v +// } + +func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) { + keytype = valueTypeString // default + if stag == "" { + return + } + for i, s := range strings.Split(stag, ",") { + if i == 0 { + } else { + switch s { + case "omitempty": + omitEmpty = true + case "toarray": + toArray = true + case "int": + keytype = valueTypeInt + case "uint": + keytype = valueTypeUint + case "float": + keytype = valueTypeFloat + // case "bool": + // keytype = valueTypeBool + case "string": + keytype = valueTypeString + } + } + } + return +} + +func (si *structFieldInfo) parseTag(stag string) { + // if fname == "" { + // panic(errNoFieldNameToStructFieldInfo) + // } + + if stag == "" { + return + } + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + switch s { + case "omitempty": + si.flagSet(structFieldInfoFlagOmitEmpty) + // si.omitEmpty = true + // case "toarray": + // si.toArray = true + } + } + } +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { return len(p) } +func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName } +func (p sfiSortedByEncName) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +const structFieldNodeNumToCache = 4 + +type structFieldNodeCache struct { + rv [structFieldNodeNumToCache]reflect.Value + idx [structFieldNodeNumToCache]uint32 + num uint8 +} + +func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) { + for i, k := range &x.idx { + if uint8(i) == x.num { + return // break + } + if key == k { + return x.rv[i], true + } + } + return +} + +func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) { + if x.num < structFieldNodeNumToCache { + x.rv[x.num] = fv + x.idx[x.num] = key + x.num++ + return + } +} + +type structFieldNode struct { + v reflect.Value + cache2 structFieldNodeCache + cache3 structFieldNodeCache + update bool +} + +func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) { + // return si.fieldval(x.v, x.update) + // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding + // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc. + var valid bool + switch si.nis { + case 1: + fv = x.v.Field(int(si.is[0])) + case 2: + if fv, valid = x.cache2.get(uint32(si.is[0])); valid { + fv = fv.Field(int(si.is[1])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache2.tryAdd(fv, uint32(si.is[0])) + fv = fv.Field(int(si.is[1])) + case 3: + var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1]) + if fv, valid = x.cache3.get(key); valid { + fv = fv.Field(int(si.is[2])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + fv = fv.Field(int(si.is[1])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache3.tryAdd(fv, key) + fv = fv.Field(int(si.is[2])) + default: + fv, _ = si.field(x.v, x.update) + } + return +} + +func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !update { + return + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v, true +} + +type typeInfoFlag uint8 + +const ( + typeInfoFlagComparable = 1 << iota + typeInfoFlagIsZeroer + typeInfoFlagIsZeroerPtr +) + +// typeInfo keeps information about each (non-ptr) type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + rt reflect.Type + elem reflect.Type + pkgpath string + + rtid uintptr + // rv0 reflect.Value // saved zero value, used if immutableKind + + numMeth uint16 // number of methods + kind uint8 + chandir uint8 + + anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty" + toArray bool // whether this (struct) type should be encoded as an array + keyType valueType // if struct, how is the field name stored in a stream? default is string + mbs bool // base type (T or *T) is a MapBySlice + + // ---- cpu cache line boundary? + sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfiSrc []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + key reflect.Type + + // ---- cpu cache line boundary? + // sfis []structFieldInfo // all sfi, in src order, as created. + sfiNamesSort []byte // all names, with indexes into the sfiSort + + // format of marshal type fields below: [btj][mu]p? OR csp? + + bm bool // T is a binaryMarshaler + bmp bool // *T is a binaryMarshaler + bu bool // T is a binaryUnmarshaler + bup bool // *T is a binaryUnmarshaler + tm bool // T is a textMarshaler + tmp bool // *T is a textMarshaler + tu bool // T is a textUnmarshaler + tup bool // *T is a textUnmarshaler + + jm bool // T is a jsonMarshaler + jmp bool // *T is a jsonMarshaler + ju bool // T is a jsonUnmarshaler + jup bool // *T is a jsonUnmarshaler + cs bool // T is a Selfer + csp bool // *T is a Selfer + mf bool // T is a MissingFielder + mfp bool // *T is a MissingFielder + + // other flags, with individual bits representing if set. + flags typeInfoFlag + infoFieldOmitempty bool + + _ [6]byte // padding + _ [2]uint64 // padding +} + +func (ti *typeInfo) isFlag(f typeInfoFlag) bool { + return ti.flags&f != 0 +} + +func (ti *typeInfo) indexForEncName(name []byte) (index int16) { + var sn []byte + if len(name)+2 <= 32 { + var buf [32]byte // should not escape to heap + sn = buf[:len(name)+2] + } else { + sn = make([]byte, len(name)+2) + } + copy(sn[1:], name) + sn[0], sn[len(sn)-1] = tiSep2(name), 0xff + j := bytes.Index(ti.sfiNamesSort, sn) + if j < 0 { + return -1 + } + index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8) + return +} + +type rtid2ti struct { + rtid uintptr + ti *typeInfo +} + +// TypeInfos caches typeInfo for each type on first inspection. +// +// It is configured with a set of tag keys, which are used to get +// configuration for the type. +type TypeInfos struct { + // infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected + infos atomicTypeInfoSlice + mu sync.Mutex + tags []string + _ [2]uint64 // padding +} + +// NewTypeInfos creates a TypeInfos given a set of struct tags keys. +// +// This allows users customize the struct tag keys which contain configuration +// of their types. +func NewTypeInfos(tags []string) *TypeInfos { + return &TypeInfos{tags: tags} +} + +func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { + // check for tags: codec, json, in that order. + // this allows seamless support for many configured structs. + for _, x := range x.tags { + s = t.Get(x) + if s != "" { + return s + } + } + return +} + +func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) { + // binary search. adapted from sort/search.go. + // Note: we use goto (instead of for loop) so this can be inlined. + + // if sp == nil { + // return -1, nil + // } + // s := *sp + + // h, i, j := 0, 0, len(s) + var h uint // var h, i uint + var j = uint(len(s)) +LOOP: + if i < j { + h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + goto LOOP + } + if i < uint(len(s)) && s[i].rtid == rtid { + ti = s[i].ti + } + return +} + +func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + sp := x.infos.load() + if sp != nil { + _, pti = findTypeInfo(sp, rtid) + if pti != nil { + return + } + } + + rk := rt.Kind() + + if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) { + panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt) + } + + // do not hold lock while computing this. + // it may lead to duplication, but that's ok. + ti := typeInfo{ + rt: rt, + rtid: rtid, + kind: uint8(rk), + pkgpath: rt.PkgPath(), + keyType: valueTypeString, // default it - so it's never 0 + } + // ti.rv0 = reflect.Zero(rt) + + // ti.comparable = rt.Comparable() + ti.numMeth = uint16(rt.NumMethod()) + + ti.bm, ti.bmp = implIntf(rt, binaryMarshalerTyp) + ti.bu, ti.bup = implIntf(rt, binaryUnmarshalerTyp) + ti.tm, ti.tmp = implIntf(rt, textMarshalerTyp) + ti.tu, ti.tup = implIntf(rt, textUnmarshalerTyp) + ti.jm, ti.jmp = implIntf(rt, jsonMarshalerTyp) + ti.ju, ti.jup = implIntf(rt, jsonUnmarshalerTyp) + ti.cs, ti.csp = implIntf(rt, selferTyp) + ti.mf, ti.mfp = implIntf(rt, missingFielderTyp) + + b1, b2 := implIntf(rt, iszeroTyp) + if b1 { + ti.flags |= typeInfoFlagIsZeroer + } + if b2 { + ti.flags |= typeInfoFlagIsZeroerPtr + } + if rt.Comparable() { + ti.flags |= typeInfoFlagComparable + } + + switch rk { + case reflect.Struct: + var omitEmpty bool + if f, ok := rt.FieldByName(structInfoFieldName); ok { + ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag)) + ti.infoFieldOmitempty = omitEmpty + } else { + ti.keyType = valueTypeString + } + pp, pi := &pool.tiload, pool.tiload.Get() // pool.tiLoad() + pv := pi.(*typeInfoLoadArray) + pv.etypes[0] = ti.rtid + // vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} + vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]} + x.rget(rt, rtid, omitEmpty, nil, &vv) + // ti.sfis = vv.sfis + ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv) + pp.Put(pi) + case reflect.Map: + ti.elem = rt.Elem() + ti.key = rt.Key() + case reflect.Slice: + ti.mbs, _ = implIntf(rt, mapBySliceTyp) + ti.elem = rt.Elem() + case reflect.Chan: + ti.elem = rt.Elem() + ti.chandir = uint8(rt.ChanDir()) + case reflect.Array, reflect.Ptr: + ti.elem = rt.Elem() + } + // sfi = sfiSrc + + x.mu.Lock() + sp = x.infos.load() + var sp2 []rtid2ti + if sp == nil { + pti = &ti + sp2 = []rtid2ti{{rtid, pti}} + x.infos.store(sp2) + } else { + var idx uint + idx, pti = findTypeInfo(sp, rtid) + if pti == nil { + pti = &ti + sp2 = make([]rtid2ti, len(sp)+1) + copy(sp2, sp[:idx]) + copy(sp2[idx+1:], sp[idx:]) + sp2[idx] = rtid2ti{rtid, pti} + x.infos.store(sp2) + } + } + x.mu.Unlock() + return +} + +func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, + indexstack []uint16, pv *typeInfoLoad) { + // Read up fields and store how to access the value. + // + // It uses go's rules for message selectors, + // which say that the field with the shallowest depth is selected. + // + // Note: we consciously use slices, not a map, to simulate a set. + // Typically, types have < 16 fields, + // and iteration using equals is faster than maps there + flen := rt.NumField() + if flen > (1< %v fields are not supported - has %v fields", + (1<= 0; i-- { // bounds-check elimination + b := si.encName[i] + if (b >= '0' && b <= '9') || (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') { + continue + } + si.encNameAsciiAlphaNum = false + break + } + si.fieldName = f.Name + si.flagSet(structFieldInfoFlagReady) + + // pv.encNames = append(pv.encNames, si.encName) + + // si.ikind = int(f.Type.Kind()) + if len(indexstack) > maxLevelsEmbedding-1 { + panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth", + maxLevelsEmbedding-1, len(indexstack)) + } + si.nis = uint8(len(indexstack)) + 1 + copy(si.is[:], indexstack) + si.is[len(indexstack)] = j + + if omitEmpty { + si.flagSet(structFieldInfoFlagOmitEmpty) + } + pv.sfis = append(pv.sfis, si) + } +} + +func tiSep(name string) uint8 { + // (xn[0]%64) // (between 192-255 - outside ascii BMP) + // return 0xfe - (name[0] & 63) + // return 0xfe - (name[0] & 63) - uint8(len(name)) + // return 0xfe - (name[0] & 63) - uint8(len(name)&63) + // return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07)) + return 0xfe - (name[0] & 63) - uint8(len(name)&63) +} + +func tiSep2(name []byte) uint8 { + return 0xfe - (name[0] & 63) - uint8(len(name)&63) +} + +// resolves the struct field info got from a call to rget. +// Returns a trimmed, unsorted and sorted []*structFieldInfo. +func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) ( + y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) { + sa := pv.sfiidx[:0] + sn := pv.b[:] + n := len(x) + + var xn string + var ui uint16 + var sep byte + + for i := range x { + ui = uint16(i) + xn = x[i].encName // fieldName or encName? use encName for now. + if len(xn)+2 > cap(pv.b) { + sn = make([]byte, len(xn)+2) + } else { + sn = sn[:len(xn)+2] + } + // use a custom sep, so that misses are less frequent, + // since the sep (first char in search) is as unique as first char in field name. + sep = tiSep(xn) + sn[0], sn[len(sn)-1] = sep, 0xff + copy(sn[1:], xn) + j := bytes.Index(sa, sn) + if j == -1 { + sa = append(sa, sep) + sa = append(sa, xn...) + sa = append(sa, 0xff, byte(ui>>8), byte(ui)) + } else { + index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8 + // one of them must be reset to nil, + // and the index updated appropriately to the other one + if x[i].nis == x[index].nis { + } else if x[i].nis < x[index].nis { + sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui) + if x[index].ready() { + x[index].flagClr(structFieldInfoFlagReady) + n-- + } + } else { + if x[i].ready() { + x[i].flagClr(structFieldInfoFlagReady) + n-- + } + } + } + + } + var w []structFieldInfo + sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray + if sharingArray { + w = make([]structFieldInfo, n) + } + + // remove all the nils (non-ready) + y = make([]*structFieldInfo, n) + n = 0 + var sslen int + for i := range x { + if !x[i].ready() { + continue + } + if !anyOmitEmpty && x[i].omitEmpty() { + anyOmitEmpty = true + } + if sharingArray { + w[n] = x[i] + y[n] = &w[n] + } else { + y[n] = &x[i] + } + sslen = sslen + len(x[i].encName) + 4 + n++ + } + if n != len(y) { + panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", + rt, len(y), len(x), n) + } + + z = make([]*structFieldInfo, len(y)) + copy(z, y) + sort.Sort(sfiSortedByEncName(z)) + + sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen + if sharingArray { + ss = make([]byte, 0, sslen) + } else { + ss = sa[:0] // reuse the newly made sa array if necessary + } + for i := range z { + xn = z[i].encName + sep = tiSep(xn) + ui = uint16(i) + ss = append(ss, sep) + ss = append(ss, xn...) + ss = append(ss, 0xff, byte(ui>>8), byte(ui)) + } + return +} + +func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) { + return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp) +} + +// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty: +// - does it implement IsZero() bool +// - is it comparable, and can i compare directly using == +// - if checkStruct, then walk through the encodable fields +// and check if they are empty or not. +func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + // v is a struct kind - no need to check again. + // We only check isZero on a struct kind, to reduce the amount of times + // that we lookup the rtid and typeInfo for each type as we walk the tree. + + vt := v.Type() + rtid := rt2id(vt) + if tinfos == nil { + tinfos = defTypeInfos + } + ti := tinfos.get(rtid, vt) + if ti.rtid == timeTypId { + return rv2i(v).(time.Time).IsZero() + } + if ti.isFlag(typeInfoFlagIsZeroerPtr) && v.CanAddr() { + return rv2i(v.Addr()).(isZeroer).IsZero() + } + if ti.isFlag(typeInfoFlagIsZeroer) { + return rv2i(v).(isZeroer).IsZero() + } + if ti.isFlag(typeInfoFlagComparable) { + return rv2i(v) == rv2i(reflect.Zero(vt)) + } + if !checkStruct { + return false + } + // We only care about what we can encode/decode, + // so that is what we use to check omitEmpty. + for _, si := range ti.sfiSrc { + sfv, valid := si.field(v, false) + if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) { + return false + } + } + return true +} + +// func roundFloat(x float64) float64 { +// t := math.Trunc(x) +// if math.Abs(x-t) >= 0.5 { +// return t + math.Copysign(1, x) +// } +// return t +// } + +func panicToErr(h errDecorator, err *error) { + // Note: This method MUST be called directly from defer i.e. defer panicToErr ... + // else it seems the recover is not fully handled + if recoverPanicToErr { + if x := recover(); x != nil { + // fmt.Printf("panic'ing with: %v\n", x) + // debug.PrintStack() + panicValToErr(h, x, err) + } + } +} + +func panicValToErr(h errDecorator, v interface{}, err *error) { + switch xerr := v.(type) { + case nil: + case error: + switch xerr { + case nil: + case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized: + // treat as special (bubble up) + *err = xerr + default: + h.wrapErr(xerr, err) + } + case string: + if xerr != "" { + h.wrapErr(xerr, err) + } + case fmt.Stringer: + if xerr != nil { + h.wrapErr(xerr, err) + } + default: + h.wrapErr(v, err) + } +} + +func isImmutableKind(k reflect.Kind) (v bool) { + // return immutableKindsSet[k] + // since we know reflect.Kind is in range 0..31, then use the k%32 == k constraint + return immutableKindsSet[k%reflect.Kind(len(immutableKindsSet))] // bounds-check-elimination +} + +// ---- + +type codecFnInfo struct { + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType + addrD bool + addrF bool // if addrD, this says whether decode function can take a value or a ptr + addrE bool +} + +// codecFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type codecFn struct { + i codecFnInfo + fe func(*Encoder, *codecFnInfo, reflect.Value) + fd func(*Decoder, *codecFnInfo, reflect.Value) + _ [1]uint64 // padding +} + +type codecRtidFn struct { + rtid uintptr + fn *codecFn +} + +// ---- + +// these "checkOverflow" functions must be inlinable, and not call anybody. +// Overflow means that the value cannot be represented without wrapping/overflow. +// Overflow=false does not mean that the value can be represented without losing precision +// (especially for floating point). + +type checkOverflow struct{} + +// func (checkOverflow) Float16(f float64) (overflow bool) { +// panicv.errorf("unimplemented") +// if f < 0 { +// f = -f +// } +// return math.MaxFloat32 < f && f <= math.MaxFloat64 +// } + +func (checkOverflow) Float32(v float64) (overflow bool) { + if v < 0 { + v = -v + } + return math.MaxFloat32 < v && v <= math.MaxFloat64 +} +func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} +func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} +func (checkOverflow) SignedInt(v uint64) (overflow bool) { + //e.g. -127 to 128 for int8 + pos := (v >> 63) == 0 + ui2 := v & 0x7fffffffffffffff + if pos { + if ui2 > math.MaxInt64 { + overflow = true + } + } else { + if ui2 > math.MaxInt64-1 { + overflow = true + } + } + return +} + +func (x checkOverflow) Float32V(v float64) float64 { + if x.Float32(v) { + panicv.errorf("float32 overflow: %v", v) + } + return v +} +func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 { + if x.Uint(v, bitsize) { + panicv.errorf("uint64 overflow: %v", v) + } + return v +} +func (x checkOverflow) IntV(v int64, bitsize uint8) int64 { + if x.Int(v, bitsize) { + panicv.errorf("int64 overflow: %v", v) + } + return v +} +func (x checkOverflow) SignedIntV(v uint64) int64 { + if x.SignedInt(v) { + panicv.errorf("uint64 to int64 overflow: %v", v) + } + return int64(v) +} + +// ------------------ SORT ----------------- + +func isNaN(f float64) bool { return f != f } + +// ----------------------- + +type ioFlusher interface { + Flush() error +} + +type ioPeeker interface { + Peek(int) ([]byte, error) +} + +type ioBuffered interface { + Buffered() int +} + +// ----------------------- + +type intSlice []int64 +type uintSlice []uint64 + +// type uintptrSlice []uintptr +type floatSlice []float64 +type boolSlice []bool +type stringSlice []string + +// type bytesSlice [][]byte + +func (p intSlice) Len() int { return len(p) } +func (p intSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +func (p intSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p uintSlice) Len() int { return len(p) } +func (p uintSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +func (p uintSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// func (p uintptrSlice) Len() int { return len(p) } +// func (p uintptrSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +// func (p uintptrSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p floatSlice) Len() int { return len(p) } +func (p floatSlice) Less(i, j int) bool { + return p[uint(i)] < p[uint(j)] || isNaN(p[uint(i)]) && !isNaN(p[uint(j)]) +} +func (p floatSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p stringSlice) Len() int { return len(p) } +func (p stringSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } +func (p stringSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// func (p bytesSlice) Len() int { return len(p) } +// func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)], p[uint(j)]) == -1 } +// func (p bytesSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p boolSlice) Len() int { return len(p) } +func (p boolSlice) Less(i, j int) bool { return !p[uint(i)] && p[uint(j)] } +func (p boolSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// --------------------- + +type sfiRv struct { + v *structFieldInfo + r reflect.Value +} + +type intRv struct { + v int64 + r reflect.Value +} +type intRvSlice []intRv +type uintRv struct { + v uint64 + r reflect.Value +} +type uintRvSlice []uintRv +type floatRv struct { + v float64 + r reflect.Value +} +type floatRvSlice []floatRv +type boolRv struct { + v bool + r reflect.Value +} +type boolRvSlice []boolRv +type stringRv struct { + v string + r reflect.Value +} +type stringRvSlice []stringRv +type bytesRv struct { + v []byte + r reflect.Value +} +type bytesRvSlice []bytesRv +type timeRv struct { + v time.Time + r reflect.Value +} +type timeRvSlice []timeRv + +func (p intRvSlice) Len() int { return len(p) } +func (p intRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } +func (p intRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p uintRvSlice) Len() int { return len(p) } +func (p uintRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } +func (p uintRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p floatRvSlice) Len() int { return len(p) } +func (p floatRvSlice) Less(i, j int) bool { + return p[uint(i)].v < p[uint(j)].v || isNaN(p[uint(i)].v) && !isNaN(p[uint(j)].v) +} +func (p floatRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p stringRvSlice) Len() int { return len(p) } +func (p stringRvSlice) Less(i, j int) bool { return p[uint(i)].v < p[uint(j)].v } +func (p stringRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p bytesRvSlice) Len() int { return len(p) } +func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 } +func (p bytesRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p boolRvSlice) Len() int { return len(p) } +func (p boolRvSlice) Less(i, j int) bool { return !p[uint(i)].v && p[uint(j)].v } +func (p boolRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +func (p timeRvSlice) Len() int { return len(p) } +func (p timeRvSlice) Less(i, j int) bool { return p[uint(i)].v.Before(p[uint(j)].v) } +func (p timeRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// ----------------- + +type bytesI struct { + v []byte + i interface{} +} + +type bytesISlice []bytesI + +func (p bytesISlice) Len() int { return len(p) } +func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1 } +func (p bytesISlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } + +// ----------------- + +type set []uintptr + +func (s *set) add(v uintptr) (exists bool) { + // e.ci is always nil, or len >= 1 + x := *s + if x == nil { + x = make([]uintptr, 1, 8) + x[0] = v + *s = x + return + } + // typically, length will be 1. make this perform. + if len(x) == 1 { + if j := x[0]; j == 0 { + x[0] = v + } else if j == v { + exists = true + } else { + x = append(x, v) + *s = x + } + return + } + // check if it exists + for _, j := range x { + if j == v { + exists = true + return + } + } + // try to replace a "deleted" slot + for i, j := range x { + if j == 0 { + x[i] = v + return + } + } + // if unable to replace deleted slot, just append it. + x = append(x, v) + *s = x + return +} + +func (s *set) remove(v uintptr) (exists bool) { + x := *s + if len(x) == 0 { + return + } + if len(x) == 1 { + if x[0] == v { + x[0] = 0 + } + return + } + for i, j := range x { + if j == v { + exists = true + x[i] = 0 // set it to 0, as way to delete it. + // copy(x[i:], x[i+1:]) + // x = x[:len(x)-1] + return + } + } + return +} + +// ------ + +// bitset types are better than [256]bool, because they permit the whole +// bitset array being on a single cache line and use less memory. +// +// Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap). +// +// We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces +// bounds checking, so we discarded them, and everyone uses bitset256. +// +// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1). +// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7 + +type bitset256 [32]byte + +func (x *bitset256) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} + +// func (x *bitset256) issetv(pos byte) byte { +// return x[pos>>3] & (1 << (pos & 7)) +// } + +func (x *bitset256) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} + +// func (x *bitset256) unset(pos byte) { +// x[pos>>3] &^= (1 << (pos & 7)) +// } + +// type bit2set256 [64]byte + +// func (x *bit2set256) set(pos byte, v1, v2 bool) { +// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 +// if v1 { +// x[pos>>2] |= 1 << (pos2 + 1) +// } +// if v2 { +// x[pos>>2] |= 1 << pos2 +// } +// } +// func (x *bit2set256) get(pos byte) uint8 { +// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 +// return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011 +// } + +// ------------ + +type pooler struct { + // function-scoped pooled resources + tiload sync.Pool // for type info loading + sfiRv8, sfiRv16, sfiRv32, sfiRv64, sfiRv128 sync.Pool // for struct encoding + + // lifetime-scoped pooled resources + // dn sync.Pool // for decNaked + buf1k, buf2k, buf4k, buf8k, buf16k, buf32k, buf64k sync.Pool // for [N]byte +} + +func (p *pooler) init() { + p.tiload.New = func() interface{} { return new(typeInfoLoadArray) } + + p.sfiRv8.New = func() interface{} { return new([8]sfiRv) } + p.sfiRv16.New = func() interface{} { return new([16]sfiRv) } + p.sfiRv32.New = func() interface{} { return new([32]sfiRv) } + p.sfiRv64.New = func() interface{} { return new([64]sfiRv) } + p.sfiRv128.New = func() interface{} { return new([128]sfiRv) } + + // p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x } + + p.buf1k.New = func() interface{} { return new([1 * 1024]byte) } + p.buf2k.New = func() interface{} { return new([2 * 1024]byte) } + p.buf4k.New = func() interface{} { return new([4 * 1024]byte) } + p.buf8k.New = func() interface{} { return new([8 * 1024]byte) } + p.buf16k.New = func() interface{} { return new([16 * 1024]byte) } + p.buf32k.New = func() interface{} { return new([32 * 1024]byte) } + p.buf64k.New = func() interface{} { return new([64 * 1024]byte) } + +} + +// func (p *pooler) sfiRv8() (sp *sync.Pool, v interface{}) { +// return &p.strRv8, p.strRv8.Get() +// } +// func (p *pooler) sfiRv16() (sp *sync.Pool, v interface{}) { +// return &p.strRv16, p.strRv16.Get() +// } +// func (p *pooler) sfiRv32() (sp *sync.Pool, v interface{}) { +// return &p.strRv32, p.strRv32.Get() +// } +// func (p *pooler) sfiRv64() (sp *sync.Pool, v interface{}) { +// return &p.strRv64, p.strRv64.Get() +// } +// func (p *pooler) sfiRv128() (sp *sync.Pool, v interface{}) { +// return &p.strRv128, p.strRv128.Get() +// } + +// func (p *pooler) bytes1k() (sp *sync.Pool, v interface{}) { +// return &p.buf1k, p.buf1k.Get() +// } +// func (p *pooler) bytes2k() (sp *sync.Pool, v interface{}) { +// return &p.buf2k, p.buf2k.Get() +// } +// func (p *pooler) bytes4k() (sp *sync.Pool, v interface{}) { +// return &p.buf4k, p.buf4k.Get() +// } +// func (p *pooler) bytes8k() (sp *sync.Pool, v interface{}) { +// return &p.buf8k, p.buf8k.Get() +// } +// func (p *pooler) bytes16k() (sp *sync.Pool, v interface{}) { +// return &p.buf16k, p.buf16k.Get() +// } +// func (p *pooler) bytes32k() (sp *sync.Pool, v interface{}) { +// return &p.buf32k, p.buf32k.Get() +// } +// func (p *pooler) bytes64k() (sp *sync.Pool, v interface{}) { +// return &p.buf64k, p.buf64k.Get() +// } + +// func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) { +// return &p.tiload, p.tiload.Get() +// } + +// func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) { +// return &p.dn, p.dn.Get() +// } + +// func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) { +// sp := &(p.dn) +// vv := sp.Get() +// return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) } +// } +// func (p *pooler) decNakedGet() (v interface{}) { +// return p.dn.Get() +// } +// func (p *pooler) tiLoadGet() (v interface{}) { +// return p.tiload.Get() +// } +// func (p *pooler) decNakedPut(v interface{}) { +// p.dn.Put(v) +// } +// func (p *pooler) tiLoadPut(v interface{}) { +// p.tiload.Put(v) +// } + +// ---------------------------------------------------- + +type panicHdl struct{} + +func (panicHdl) errorv(err error) { + if err != nil { + panic(err) + } +} + +func (panicHdl) errorstr(message string) { + if message != "" { + panic(message) + } +} + +func (panicHdl) errorf(format string, params ...interface{}) { + if format == "" { + } else if len(params) == 0 { + panic(format) + } else { + panic(fmt.Sprintf(format, params...)) + } +} + +// ---------------------------------------------------- + +type errDecorator interface { + wrapErr(in interface{}, out *error) +} + +type errDecoratorDef struct{} + +func (errDecoratorDef) wrapErr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) } + +// ---------------------------------------------------- + +type must struct{} + +func (must) String(s string, err error) string { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Int(s int64, err error) int64 { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Uint(s uint64, err error) uint64 { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Float(s float64, err error) float64 { + if err != nil { + panicv.errorv(err) + } + return s +} + +// ------------------- + +type bytesBufPooler struct { + pool *sync.Pool + poolbuf interface{} +} + +func (z *bytesBufPooler) end() { + if z.pool != nil { + z.pool.Put(z.poolbuf) + z.pool, z.poolbuf = nil, nil + } +} + +func (z *bytesBufPooler) get(bufsize int) (buf []byte) { + // ensure an end is called first (if necessary) + if z.pool != nil { + z.pool.Put(z.poolbuf) + z.pool, z.poolbuf = nil, nil + } + + if bufsize <= 1*1024 { + z.pool, z.poolbuf = &pool.buf1k, pool.buf1k.Get() // pool.bytes1k() + buf = z.poolbuf.(*[1 * 1024]byte)[:] + } else if bufsize <= 2*1024 { + z.pool, z.poolbuf = &pool.buf2k, pool.buf2k.Get() // pool.bytes2k() + buf = z.poolbuf.(*[2 * 1024]byte)[:] + } else if bufsize <= 4*1024 { + z.pool, z.poolbuf = &pool.buf4k, pool.buf4k.Get() // pool.bytes4k() + buf = z.poolbuf.(*[4 * 1024]byte)[:] + } else if bufsize <= 8*1024 { + z.pool, z.poolbuf = &pool.buf8k, pool.buf8k.Get() // pool.bytes8k() + buf = z.poolbuf.(*[8 * 1024]byte)[:] + } else if bufsize <= 16*1024 { + z.pool, z.poolbuf = &pool.buf16k, pool.buf16k.Get() // pool.bytes16k() + buf = z.poolbuf.(*[16 * 1024]byte)[:] + } else if bufsize <= 32*1024 { + z.pool, z.poolbuf = &pool.buf32k, pool.buf32k.Get() // pool.bytes32k() + buf = z.poolbuf.(*[32 * 1024]byte)[:] + } else { + z.pool, z.poolbuf = &pool.buf64k, pool.buf64k.Get() // pool.bytes64k() + buf = z.poolbuf.(*[64 * 1024]byte)[:] + } + return +} + +// ---------------- + +type sfiRvPooler struct { + pool *sync.Pool + poolv interface{} +} + +func (z *sfiRvPooler) end() { + if z.pool != nil { + z.pool.Put(z.poolv) + z.pool, z.poolv = nil, nil + } +} + +func (z *sfiRvPooler) get(newlen int) (fkvs []sfiRv) { + if newlen < 0 { // bounds-check-elimination + // cannot happen // here for bounds-check-elimination + } else if newlen <= 8 { + z.pool, z.poolv = &pool.sfiRv8, pool.sfiRv8.Get() // pool.sfiRv8() + fkvs = z.poolv.(*[8]sfiRv)[:newlen] + } else if newlen <= 16 { + z.pool, z.poolv = &pool.sfiRv16, pool.sfiRv16.Get() // pool.sfiRv16() + fkvs = z.poolv.(*[16]sfiRv)[:newlen] + } else if newlen <= 32 { + z.pool, z.poolv = &pool.sfiRv32, pool.sfiRv32.Get() // pool.sfiRv32() + fkvs = z.poolv.(*[32]sfiRv)[:newlen] + } else if newlen <= 64 { + z.pool, z.poolv = &pool.sfiRv64, pool.sfiRv64.Get() // pool.sfiRv64() + fkvs = z.poolv.(*[64]sfiRv)[:newlen] + } else if newlen <= 128 { + z.pool, z.poolv = &pool.sfiRv128, pool.sfiRv128.Get() // pool.sfiRv128() + fkvs = z.poolv.(*[128]sfiRv)[:newlen] + } else { + fkvs = make([]sfiRv, newlen) + } + return +} + +// safe-mod optimizations + +const safeMode = true + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// +// and call keepAlive4BytesView(v) at point where done with view. +func stringView(v []byte) string { + return string(v) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// +// and call keepAlive4BytesView(v) at point where done with view. +func bytesView(v string) []byte { + return []byte(v) +} + +func definitelyNil(v interface{}) bool { + // this is a best-effort option. + // We just return false, so we don't unnecessarily incur the cost of reflection this early. + return false +} + +func rv2i(rv reflect.Value) interface{} { + return rv.Interface() +} + +func rt2id(rt reflect.Type) uintptr { + return reflect.ValueOf(rt).Pointer() +} + +func i2rtid(i interface{}) uintptr { + return reflect.ValueOf(reflect.TypeOf(i)).Pointer() +} + +// -------------------------- + +func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) + } + return v.IsNil() + case reflect.Struct: + return isEmptyStruct(v, tinfos, deref, checkStruct) + } + return false +} + +// -------------------------- +type atomicClsErr struct { + v atomic.Value +} + +func (x *atomicClsErr) load() (e clsErr) { + if i := x.v.Load(); i != nil { + e = i.(clsErr) + } + return +} + +func (x *atomicClsErr) store(p clsErr) { + x.v.Store(p) +} + +// -------------------------- +type atomicTypeInfoSlice struct { // expected to be 2 words + v atomic.Value +} + +func (x *atomicTypeInfoSlice) load() (e []rtid2ti) { + if i := x.v.Load(); i != nil { + e = i.([]rtid2ti) + } + return +} + +func (x *atomicTypeInfoSlice) store(p []rtid2ti) { + x.v.Store(p) +} + +// -------------------------- +type atomicRtidFnSlice struct { // expected to be 2 words + v atomic.Value +} + +func (x *atomicRtidFnSlice) load() (e []codecRtidFn) { + if i := x.v.Load(); i != nil { + e = i.([]codecRtidFn) + } + return +} + +func (x *atomicRtidFnSlice) store(p []codecRtidFn) { + x.v.Store(p) +} + +// -------------------------- +func (n *decNaked) ru() reflect.Value { + return reflect.ValueOf(&n.u).Elem() +} +func (n *decNaked) ri() reflect.Value { + return reflect.ValueOf(&n.i).Elem() +} +func (n *decNaked) rf() reflect.Value { + return reflect.ValueOf(&n.f).Elem() +} +func (n *decNaked) rl() reflect.Value { + return reflect.ValueOf(&n.l).Elem() +} +func (n *decNaked) rs() reflect.Value { + return reflect.ValueOf(&n.s).Elem() +} +func (n *decNaked) rt() reflect.Value { + return reflect.ValueOf(&n.t).Elem() +} +func (n *decNaked) rb() reflect.Value { + return reflect.ValueOf(&n.b).Elem() +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + rv.SetBytes(d.rawBytes()) +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + rv.SetString(d.d.DecodeString()) +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + rv.SetBool(d.d.DecodeBool()) +} + +func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) { + rv.Set(reflect.ValueOf(d.d.DecodeTime())) +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + fv := d.d.DecodeFloat64() + if chkOvf.Float32(fv) { + d.errorf("float32 overflow: %v", fv) + } + rv.SetFloat(fv) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + rv.SetFloat(d.d.DecodeFloat64()) +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt64()) +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint64()) +} + +// ---------------- + +func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeBool(rv.Bool()) +} + +func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeTime(rv2i(rv).(time.Time)) +} + +func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { + s := rv.String() + if e.h.StringToRaw { + e.e.EncodeStringBytesRaw(bytesView(s)) + } else { + e.e.EncodeStringEnc(cUTF8, s) + } +} + +func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rv.Float()) +} + +func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(float32(rv.Float())) +} + +func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + if size < 0 { + return reflect.MakeMapWithSize(t, 4) + } + return reflect.MakeMapWithSize(t, size) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/helper_internal.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/helper_internal.go new file mode 100644 index 0000000000000..4f5eb84362e2b --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/helper_internal.go @@ -0,0 +1,89 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +// GrowCap will return a new capacity for a slice, given the following: +// - oldCap: current capacity +// - unit: in-memory size of an element +// - num: number of elements to add +func growCap(oldCap, unit, num int) (newCap int) { + // appendslice logic (if cap < 1024, *2, else *1.25): + // leads to many copy calls, especially when copying bytes. + // bytes.Buffer model (2*cap + n): much better for bytes. + // smarter way is to take the byte-size of the appended element(type) into account + + // maintain 3 thresholds: + // t1: if cap <= t1, newcap = 2x + // t2: if cap <= t2, newcap = 1.75x + // t3: if cap <= t3, newcap = 1.5x + // else newcap = 1.25x + // + // t1, t2, t3 >= 1024 always. + // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) + // + // With this, appending for bytes increase by: + // 100% up to 4K + // 75% up to 8K + // 50% up to 16K + // 25% beyond that + + // unit can be 0 e.g. for struct{}{}; handle that appropriately + var t1, t2, t3 int // thresholds + if unit <= 1 { + t1, t2, t3 = 4*1024, 8*1024, 16*1024 + } else if unit < 16 { + t3 = 16 / unit * 1024 + t1 = t3 * 1 / 4 + t2 = t3 * 2 / 4 + } else { + t1, t2, t3 = 1024, 1024, 1024 + } + + var x int // temporary variable + + // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively + if oldCap <= t1 { // [0,t1] + x = 8 + } else if oldCap > t3 { // (t3,infinity] + x = 5 + } else if oldCap <= t2 { // (t1,t2] + x = 7 + } else { // (t2,t3] + x = 6 + } + newCap = x * oldCap / 4 + + if num > 0 { + newCap += num + } + + // ensure newCap is a multiple of 64 (if it is > 64) or 16. + if newCap > 64 { + if x = newCap % 64; x != 0 { + x = newCap / 64 + newCap = 64 * (x + 1) + } + } else { + if x = newCap % 16; x != 0 { + x = newCap / 16 + newCap = 16 * (x + 1) + } + } + return +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/json.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/json.go new file mode 100644 index 0000000000000..5fc375f9286e2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/json.go @@ -0,0 +1,1491 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// By default, this json support uses base64 encoding for bytes, because you cannot +// store and read any arbitrary string in json (only unicode). +// However, the user can configre how to encode/decode bytes. +// +// This library specifically supports UTF-8 for encoding and decoding only. +// +// Note that the library will happily encode/decode things which are not valid +// json e.g. a map[int64]string. We do it for consistency. With valid json, +// we will encode and decode appropriately. +// Users can specify their map type if necessary to force it. +// +// Note: +// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. +// We implement it here. + +// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver +// MUST not call one-another. + +import ( + "bytes" + "encoding/base64" + "math" + "reflect" + "strconv" + "time" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +//-------------------------------- + +var jsonLiterals = [...]byte{ + '"', 't', 'r', 'u', 'e', '"', + '"', 'f', 'a', 'l', 's', 'e', '"', + '"', 'n', 'u', 'l', 'l', '"', +} + +const ( + jsonLitTrueQ = 0 + jsonLitTrue = 1 + jsonLitFalseQ = 6 + jsonLitFalse = 7 + // jsonLitNullQ = 13 + jsonLitNull = 14 +) + +var ( + jsonLiteral4True = jsonLiterals[jsonLitTrue+1 : jsonLitTrue+4] + jsonLiteral4False = jsonLiterals[jsonLitFalse+1 : jsonLitFalse+5] + jsonLiteral4Null = jsonLiterals[jsonLitNull+1 : jsonLitNull+4] +) + +const ( + jsonU4Chk2 = '0' + jsonU4Chk1 = 'a' - 10 + jsonU4Chk0 = 'A' - 10 + + jsonScratchArrayLen = 64 +) + +const ( + // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: + // - If we see first character of null, false or true, + // do not validate subsequent characters. + // - e.g. if we see a n, assume null and skip next 3 characters, + // and do not validate they are ull. + // P.S. Do not expect a significant decoding boost from this. + jsonValidateSymbols = true + + jsonSpacesOrTabsLen = 128 + + jsonAlwaysReturnInternString = false +) + +var ( + // jsonTabs and jsonSpaces are used as caches for indents + jsonTabs, jsonSpaces [jsonSpacesOrTabsLen]byte + + jsonCharHtmlSafeSet bitset256 + jsonCharSafeSet bitset256 + jsonCharWhitespaceSet bitset256 + jsonNumSet bitset256 +) + +func init() { + var i byte + for i = 0; i < jsonSpacesOrTabsLen; i++ { + jsonSpaces[i] = ' ' + jsonTabs[i] = '\t' + } + + // populate the safe values as true: note: ASCII control characters are (0-31) + // jsonCharSafeSet: all true except (0-31) " \ + // jsonCharHtmlSafeSet: all true except (0-31) " \ < > & + for i = 32; i < utf8.RuneSelf; i++ { + switch i { + case '"', '\\': + case '<', '>', '&': + jsonCharSafeSet.set(i) // = true + default: + jsonCharSafeSet.set(i) + jsonCharHtmlSafeSet.set(i) + } + } + for i = 0; i <= utf8.RuneSelf; i++ { + switch i { + case ' ', '\t', '\r', '\n': + jsonCharWhitespaceSet.set(i) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-': + jsonNumSet.set(i) + } + } +} + +// ---------------- + +type jsonEncDriverTypical struct { + jsonEncDriver +} + +func (e *jsonEncDriverTypical) typical() {} + +func (e *jsonEncDriverTypical) WriteArrayStart(length int) { + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriverTypical) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + e.c = containerArrayElem +} + +func (e *jsonEncDriverTypical) WriteArrayEnd() { + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriverTypical) WriteMapStart(length int) { + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriverTypical) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + e.c = containerMapKey +} + +func (e *jsonEncDriverTypical) WriteMapElemValue() { + e.w.writen1(':') + e.c = containerMapValue +} + +func (e *jsonEncDriverTypical) WriteMapEnd() { + e.w.writen1('}') + e.c = containerMapEnd +} + +func (e *jsonEncDriverTypical) EncodeBool(b bool) { + if b { + e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) + } +} + +func (e *jsonEncDriverTypical) EncodeFloat64(f float64) { + fmt, prec := jsonFloatStrconvFmtPrec(f) + e.w.writeb(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64)) +} + +func (e *jsonEncDriverTypical) EncodeInt(v int64) { + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverTypical) EncodeUint(v uint64) { + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverTypical) EncodeFloat32(f float32) { + e.EncodeFloat64(float64(f)) +} + +// func (e *jsonEncDriverTypical) atEndOfEncode() { +// if e.tw { +// e.w.writen1(' ') +// } +// } + +// ---------------- + +type jsonEncDriverGeneric struct { + jsonEncDriver + // ds string // indent string + di int8 // indent per + d bool // indenting? + dt bool // indent using tabs + dl uint16 // indent level + ks bool // map key as string + is byte // integer as string + _ byte // padding + _ [2]uint64 // padding +} + +// indent is done as below: +// - newline and indent are added before each mapKey or arrayElem +// - newline and indent are added before each ending, +// except there was no entry (so we can have {} or []) + +func (e *jsonEncDriverGeneric) reset() { + e.jsonEncDriver.reset() + e.d, e.dt, e.dl, e.di = false, false, 0, 0 + if e.h.Indent > 0 { + e.d = true + e.di = int8(e.h.Indent) + } else if e.h.Indent < 0 { + e.d = true + e.dt = true + e.di = int8(-e.h.Indent) + } + e.ks = e.h.MapKeyAsString + e.is = e.h.IntegerAsString +} + +func (e *jsonEncDriverGeneric) WriteArrayStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriverGeneric) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerArrayElem +} + +func (e *jsonEncDriverGeneric) WriteArrayEnd() { + if e.d { + e.dl-- + if e.c != containerArrayStart { + e.writeIndent() + } + } + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriverGeneric) WriteMapStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriverGeneric) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerMapKey +} + +func (e *jsonEncDriverGeneric) WriteMapElemValue() { + if e.d { + e.w.writen2(':', ' ') + } else { + e.w.writen1(':') + } + e.c = containerMapValue +} + +func (e *jsonEncDriverGeneric) WriteMapEnd() { + if e.d { + e.dl-- + if e.c != containerMapStart { + e.writeIndent() + } + } + e.w.writen1('}') + e.c = containerMapEnd +} + +func (e *jsonEncDriverGeneric) writeIndent() { + e.w.writen1('\n') + x := int(e.di) * int(e.dl) + if e.dt { + for x > jsonSpacesOrTabsLen { + e.w.writeb(jsonTabs[:]) + x -= jsonSpacesOrTabsLen + } + e.w.writeb(jsonTabs[:x]) + } else { + for x > jsonSpacesOrTabsLen { + e.w.writeb(jsonSpaces[:]) + x -= jsonSpacesOrTabsLen + } + e.w.writeb(jsonSpaces[:x]) + } +} + +func (e *jsonEncDriverGeneric) EncodeBool(b bool) { + if e.ks && e.c == containerMapKey { + if b { + e.w.writeb(jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7]) + } + } else { + if b { + e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) + } + } +} + +func (e *jsonEncDriverGeneric) EncodeFloat64(f float64) { + // instead of using 'g', specify whether to use 'e' or 'f' + fmt, prec := jsonFloatStrconvFmtPrec(f) + + var blen int + if e.ks && e.c == containerMapKey { + blen = 2 + len(strconv.AppendFloat(e.b[1:1], f, fmt, prec, 64)) + e.b[0] = '"' + e.b[blen-1] = '"' + } else { + blen = len(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64)) + } + e.w.writeb(e.b[:blen]) +} + +func (e *jsonEncDriverGeneric) EncodeInt(v int64) { + x := e.is + if x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.ks && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + return + } + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverGeneric) EncodeUint(v uint64) { + x := e.is + if x == 'A' || x == 'L' && v > 1<<53 || (e.ks && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + return + } + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverGeneric) EncodeFloat32(f float32) { + // e.encodeFloat(float64(f), 32) + // always encode all floats as IEEE 64-bit floating point. + // It also ensures that we can decode in full precision even if into a float32, + // as what is written is always to float64 precision. + e.EncodeFloat64(float64(f)) +} + +// func (e *jsonEncDriverGeneric) atEndOfEncode() { +// if e.tw { +// if e.d { +// e.w.writen1('\n') +// } else { +// e.w.writen1(' ') +// } +// } +// } + +// -------------------- + +type jsonEncDriver struct { + noBuiltInTypes + e *Encoder + h *JsonHandle + w *encWriterSwitch + se extWrapper + // ---- cpu cache line boundary? + bs []byte // scratch + // ---- cpu cache line boundary? + // scratch: encode time, etc. + // include scratch buffer and padding, but leave space for containerstate + b [jsonScratchArrayLen + 8 + 8 - 1]byte + c containerState + // _ [2]uint64 // padding +} + +func (e *jsonEncDriver) EncodeNil() { + // We always encode nil as just null (never in quotes) + // This allows us to easily decode if a nil in the json stream + // ie if initial token is n. + e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + + // if e.h.MapKeyAsString && e.c == containerMapKey { + // e.w.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6]) + // } else { + // e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + // } +} + +func (e *jsonEncDriver) EncodeTime(t time.Time) { + // Do NOT use MarshalJSON, as it allocates internally. + // instead, we call AppendFormat directly, using our scratch buffer (e.b) + if t.IsZero() { + e.EncodeNil() + } else { + e.b[0] = '"' + b := t.AppendFormat(e.b[1:1], time.RFC3339Nano) + e.b[len(b)+1] = '"' + e.w.writeb(e.b[:len(b)+2]) + } + // v, err := t.MarshalJSON(); if err != nil { e.e.error(err) } e.w.writeb(v) +} + +func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + // only encodes re.Value (never re.Data) + if re.Value == nil { + e.EncodeNil() + } else { + en.encode(re.Value) + } +} + +func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringEnc(c charEncoding, v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if v == nil { + e.EncodeNil() + return + } + if c == cRAW { + if e.se.InterfaceExt != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + + slen := base64.StdEncoding.EncodedLen(len(v)) + 2 + if cap(e.bs) >= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + e.bs[0] = '"' + base64.StdEncoding.Encode(e.bs[1:], v) + e.bs[slen-1] = '"' + e.w.writeb(e.bs) + } else { + e.quoteStr(stringView(v)) + } +} + +func (e *jsonEncDriver) EncodeStringBytesRaw(v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if v == nil { + e.EncodeNil() + return + } + if e.se.InterfaceExt != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + + slen := base64.StdEncoding.EncodedLen(len(v)) + 2 + if cap(e.bs) >= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + e.bs[0] = '"' + base64.StdEncoding.Encode(e.bs[1:], v) + e.bs[slen-1] = '"' + e.w.writeb(e.bs) +} + +func (e *jsonEncDriver) EncodeAsis(v []byte) { + e.w.writeb(v) +} + +func (e *jsonEncDriver) quoteStr(s string) { + // adapted from std pkg encoding/json + const hex = "0123456789abcdef" + w := e.w + htmlasis := e.h.HTMLCharsAsIs + w.writen1('"') + var start int + for i, slen := 0, len(s); i < slen; { + // encode all bytes < 0x20 (except \r, \n). + // also encode < > & to prevent security holes when served to some browsers. + if b := s[i]; b < utf8.RuneSelf { + // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + // if (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) { + if jsonCharHtmlSafeSet.isset(b) || (htmlasis && jsonCharSafeSet.isset(b)) { + i++ + continue + } + if start < i { + w.writestr(s[start:i]) + } + switch b { + case '\\', '"': + w.writen2('\\', b) + case '\n': + w.writen2('\\', 'n') + case '\r': + w.writen2('\\', 'r') + case '\b': + w.writen2('\\', 'b') + case '\f': + w.writen2('\\', 'f') + case '\t': + w.writen2('\\', 't') + default: + w.writestr(`\u00`) + w.writen2(hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. + // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally. + if c == '\u2028' || c == '\u2029' { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\u202`) + w.writen1(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + w.writestr(s[start:]) + } + w.writen1('"') +} + +func (e *jsonEncDriver) atEndOfEncode() { + // if e.c == 0 { // scalar written, output space + // e.w.writen1(' ') + // } else if e.h.TermWhitespace { // container written, output new-line + // e.w.writen1('\n') + // } + if e.h.TermWhitespace { + if e.c == 0 { // scalar written, output space + e.w.writen1(' ') + } else { // container written, output new-line + e.w.writen1('\n') + } + } + + // e.c = 0 +} + +type jsonDecDriver struct { + noBuiltInTypes + d *Decoder + h *JsonHandle + r *decReaderSwitch + se extWrapper + + // ---- writable fields during execution --- *try* to keep in sep cache line + + c containerState + // tok is used to store the token read right after skipWhiteSpace. + tok uint8 + fnull bool // found null from appendStringAsBytes + bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. + bstr [8]byte // scratch used for string \UXXX parsing + // ---- cpu cache line boundary? + b [jsonScratchArrayLen]byte // scratch 1, used for parsing strings or numbers or time.Time + b2 [jsonScratchArrayLen]byte // scratch 2, used only for readUntil, decNumBytes + + // _ [3]uint64 // padding + // n jsonNum +} + +// func jsonIsWS(b byte) bool { +// // return b == ' ' || b == '\t' || b == '\r' || b == '\n' +// return jsonCharWhitespaceSet.isset(b) +// } + +func (d *jsonDecDriver) uncacheRead() { + if d.tok != 0 { + d.r.unreadn1() + d.tok = 0 + } +} + +func (d *jsonDecDriver) ReadMapStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '{' + if d.tok != xc { + d.d.errorf("read map - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapStart + return -1 +} + +func (d *jsonDecDriver) ReadArrayStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '[' + if d.tok != xc { + d.d.errorf("read array - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayStart + return -1 +} + +func (d *jsonDecDriver) CheckBreak() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + return d.tok == '}' || d.tok == ']' +} + +// For the ReadXXX methods below, we could just delegate to helper functions +// readContainerState(c containerState, xc uint8, check bool) +// - ReadArrayElem would become: +// readContainerState(containerArrayElem, ',', d.c != containerArrayStart) +// +// However, until mid-stack inlining comes in go1.11 which supports inlining of +// one-liners, we explicitly write them all 5 out to elide the extra func call. +// +// TODO: For Go 1.11, if inlined, consider consolidating these. + +func (d *jsonDecDriver) ReadArrayElem() { + const xc uint8 = ',' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerArrayStart { + if d.tok != xc { + d.d.errorf("read array element - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerArrayElem +} + +func (d *jsonDecDriver) ReadArrayEnd() { + const xc uint8 = ']' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read array end - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayEnd +} + +func (d *jsonDecDriver) ReadMapElemKey() { + const xc uint8 = ',' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerMapStart { + if d.tok != xc { + d.d.errorf("read map key - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerMapKey +} + +func (d *jsonDecDriver) ReadMapElemValue() { + const xc uint8 = ':' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read map value - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapValue +} + +func (d *jsonDecDriver) ReadMapEnd() { + const xc uint8 = '}' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read map end - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapEnd +} + +// func (d *jsonDecDriver) readLit(length, fromIdx uint8) { +// // length here is always less than 8 (literals are: null, true, false) +// bs := d.r.readx(int(length)) +// d.tok = 0 +// if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) { +// d.d.errorf("expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs) +// } +// } + +func (d *jsonDecDriver) readLit4True() { + bs := d.r.readx(3) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4True) { + d.d.errorf("expecting %s: got %s", jsonLiteral4True, bs) + } +} + +func (d *jsonDecDriver) readLit4False() { + bs := d.r.readx(4) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4False) { + d.d.errorf("expecting %s: got %s", jsonLiteral4False, bs) + } +} + +func (d *jsonDecDriver) readLit4Null() { + bs := d.r.readx(3) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiteral4Null) { + d.d.errorf("expecting %s: got %s", jsonLiteral4Null, bs) + } +} + +func (d *jsonDecDriver) TryDecodeAsNil() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + // we shouldn't try to see if "null" was here, right? + // only the plain string: `null` denotes a nil (ie not quotes) + if d.tok == 'n' { + d.readLit4Null() + return true + } + return false +} + +func (d *jsonDecDriver) DecodeBool() (v bool) { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + fquot := d.c == containerMapKey && d.tok == '"' + if fquot { + d.tok = d.r.readn1() + } + switch d.tok { + case 'f': + d.readLit4False() + // v = false + case 't': + d.readLit4True() + v = true + default: + d.d.errorf("decode bool: got first char %c", d.tok) + // v = false // "unreachable" + } + if fquot { + d.r.readn1() + } + return +} + +func (d *jsonDecDriver) DecodeTime() (t time.Time) { + // read string, and pass the string into json.unmarshal + d.appendStringAsBytes() + if d.fnull { + return + } + t, err := time.Parse(time.RFC3339, stringView(d.bs)) + if err != nil { + d.d.errorv(err) + } + return +} + +func (d *jsonDecDriver) ContainerType() (vt valueType) { + // check container type by checking the first char + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + + // optimize this, so we don't do 4 checks but do one computation. + // return jsonContainerSet[d.tok] + + // ContainerType is mostly called for Map and Array, + // so this conditional is good enough (max 2 checks typically) + if b := d.tok; b == '{' { + return valueTypeMap + } else if b == '[' { + return valueTypeArray + } else if b == 'n' { + return valueTypeNil + } else if b == '"' { + return valueTypeString + } + return valueTypeUnset +} + +func (d *jsonDecDriver) decNumBytes() (bs []byte) { + // stores num bytes in d.bs + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok == '"' { + bs = d.r.readUntil(d.b2[:0], '"') + bs = bs[:len(bs)-1] + } else { + d.r.unreadn1() + bs = d.r.readTo(d.bs[:0], &jsonNumSet) + } + d.tok = 0 + return bs +} + +func (d *jsonDecDriver) DecodeUint64() (u uint64) { + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + n, neg, badsyntax, overflow := jsonParseInteger(bs) + if overflow { + d.d.errorf("overflow parsing unsigned integer: %s", bs) + } else if neg { + d.d.errorf("minus found parsing unsigned integer: %s", bs) + } else if badsyntax { + // fallback: try to decode as float, and cast + n = d.decUint64ViaFloat(stringView(bs)) + } + return n +} + +func (d *jsonDecDriver) DecodeInt64() (i int64) { + const cutoff = uint64(1 << uint(64-1)) + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + n, neg, badsyntax, overflow := jsonParseInteger(bs) + if overflow { + d.d.errorf("overflow parsing integer: %s", bs) + } else if badsyntax { + // d.d.errorf("invalid syntax for integer: %s", bs) + // fallback: try to decode as float, and cast + if neg { + n = d.decUint64ViaFloat(stringView(bs[1:])) + } else { + n = d.decUint64ViaFloat(stringView(bs)) + } + } + if neg { + if n > cutoff { + d.d.errorf("overflow parsing integer: %s", bs) + } + i = -(int64(n)) + } else { + if n >= cutoff { + d.d.errorf("overflow parsing integer: %s", bs) + } + i = int64(n) + } + return +} + +func (d *jsonDecDriver) decUint64ViaFloat(s string) (u uint64) { + if len(s) == 0 { + return + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + d.d.errorf("invalid syntax for integer: %s", s) + // d.d.errorv(err) + } + fi, ff := math.Modf(f) + if ff > 0 { + d.d.errorf("fractional part found parsing integer: %s", s) + } else if fi > float64(math.MaxUint64) { + d.d.errorf("overflow parsing integer: %s", s) + } + return uint64(fi) +} + +func (d *jsonDecDriver) DecodeFloat64() (f float64) { + bs := d.decNumBytes() + if len(bs) == 0 { + return + } + f, err := strconv.ParseFloat(stringView(bs), 64) + if err != nil { + d.d.errorv(err) + } + return +} + +func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if ext == nil { + re := rv.(*RawExt) + re.Tag = xtag + d.d.decode(&re.Value) + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + return +} + +func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. + if d.se.InterfaceExt != nil { + bsOut = bs + d.DecodeExt(&bsOut, 0, &d.se) + return + } + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.tok == '[' { + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } + d.appendStringAsBytes() + // base64 encodes []byte{} as "", and we encode nil []byte as null. + // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}. + // appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs. + // However, it sets a fnull field to true, so we can check if a null was found. + if len(d.bs) == 0 { + if d.fnull { + return nil + } + return []byte{} + } + bs0 := d.bs + slen := base64.StdEncoding.DecodedLen(len(bs0)) + if slen <= cap(bs) { + bsOut = bs[:slen] + } else if zerocopy && slen <= cap(d.b2) { + bsOut = d.b2[:slen] + } else { + bsOut = make([]byte, slen) + } + slen2, err := base64.StdEncoding.Decode(bsOut, bs0) + if err != nil { + d.d.errorf("error decoding base64 binary '%s': %v", bs0, err) + return nil + } + if slen != slen2 { + bsOut = bsOut[:slen2] + } + return +} + +func (d *jsonDecDriver) DecodeString() (s string) { + d.appendStringAsBytes() + return d.bsToString() +} + +func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) { + d.appendStringAsBytes() + return d.bs +} + +func (d *jsonDecDriver) appendStringAsBytes() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + + d.fnull = false + if d.tok != '"' { + // d.d.errorf("expect char '%c' but got char '%c'", '"', d.tok) + // handle non-string scalar: null, true, false or a number + switch d.tok { + case 'n': + d.readLit4Null() + d.bs = d.bs[:0] + d.fnull = true + case 'f': + d.readLit4False() + d.bs = d.bs[:5] + copy(d.bs, "false") + case 't': + d.readLit4True() + d.bs = d.bs[:4] + copy(d.bs, "true") + default: + // try to parse a valid number + bs := d.decNumBytes() + if len(bs) <= cap(d.bs) { + d.bs = d.bs[:len(bs)] + } else { + d.bs = make([]byte, len(bs)) + } + copy(d.bs, bs) + } + return + } + + d.tok = 0 + r := d.r + var cs = r.readUntil(d.b2[:0], '"') + var cslen = uint(len(cs)) + var c uint8 + v := d.bs[:0] + // append on each byte seen can be expensive, so we just + // keep track of where we last read a contiguous set of + // non-special bytes (using cursor variable), + // and when we see a special byte + // e.g. end-of-slice, " or \, + // we will append the full range into the v slice before proceeding + var i, cursor uint + for { + if i == cslen { + v = append(v, cs[cursor:]...) + cs = r.readUntil(d.b2[:0], '"') + cslen = uint(len(cs)) + i, cursor = 0, 0 + } + c = cs[i] + if c == '"' { + v = append(v, cs[cursor:i]...) + break + } + if c != '\\' { + i++ + continue + } + v = append(v, cs[cursor:i]...) + i++ + c = cs[i] + switch c { + case '"', '\\', '/', '\'': + v = append(v, c) + case 'b': + v = append(v, '\b') + case 'f': + v = append(v, '\f') + case 'n': + v = append(v, '\n') + case 'r': + v = append(v, '\r') + case 't': + v = append(v, '\t') + case 'u': + var r rune + var rr uint32 + if cslen < i+4 { + d.d.errorf("need at least 4 more bytes for unicode sequence") + } + var j uint + for _, c = range cs[i+1 : i+5] { // bounds-check-elimination + // best to use explicit if-else + // - not a table, etc which involve memory loads, array lookup with bounds checks, etc + if c >= '0' && c <= '9' { + rr = rr*16 + uint32(c-jsonU4Chk2) + } else if c >= 'a' && c <= 'f' { + rr = rr*16 + uint32(c-jsonU4Chk1) + } else if c >= 'A' && c <= 'F' { + rr = rr*16 + uint32(c-jsonU4Chk0) + } else { + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + } + r = rune(rr) + i += 4 + if utf16.IsSurrogate(r) { + if len(cs) >= int(i+6) { + var cx = cs[i+1:][:6:6] // [:6] affords bounds-check-elimination + if cx[0] == '\\' && cx[1] == 'u' { + i += 2 + var rr1 uint32 + for j = 2; j < 6; j++ { + c = cx[j] + if c >= '0' && c <= '9' { + rr = rr*16 + uint32(c-jsonU4Chk2) + } else if c >= 'a' && c <= 'f' { + rr = rr*16 + uint32(c-jsonU4Chk1) + } else if c >= 'A' && c <= 'F' { + rr = rr*16 + uint32(c-jsonU4Chk0) + } else { + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + } + r = utf16.DecodeRune(r, rune(rr1)) + i += 4 + goto encode_rune + } + } + r = unicode.ReplacementChar + } + encode_rune: + w2 := utf8.EncodeRune(d.bstr[:], r) + v = append(v, d.bstr[:w2]...) + default: + d.d.errorf("unsupported escaped value: %c", c) + } + i++ + cursor = i + } + d.bs = v +} + +func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) { + const cutoff = uint64(1 << uint(64-1)) + + var n uint64 + var neg, badsyntax, overflow bool + + if len(bs) == 0 { + if d.h.PreferFloat { + z.v = valueTypeFloat + z.f = 0 + } else if d.h.SignedInteger { + z.v = valueTypeInt + z.i = 0 + } else { + z.v = valueTypeUint + z.u = 0 + } + return + } + if d.h.PreferFloat { + goto F + } + n, neg, badsyntax, overflow = jsonParseInteger(bs) + if badsyntax || overflow { + goto F + } + if neg { + if n > cutoff { + goto F + } + z.v = valueTypeInt + z.i = -(int64(n)) + } else if d.h.SignedInteger { + if n >= cutoff { + goto F + } + z.v = valueTypeInt + z.i = int64(n) + } else { + z.v = valueTypeUint + z.u = n + } + return +F: + z.v = valueTypeFloat + z.f, err = strconv.ParseFloat(stringView(bs), 64) + return +} + +func (d *jsonDecDriver) bsToString() string { + // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key + if jsonAlwaysReturnInternString || d.c == containerMapKey { + return d.d.string(d.bs) + } + return string(d.bs) +} + +func (d *jsonDecDriver) DecodeNaked() { + z := d.d.naked() + // var decodeFurther bool + + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + switch d.tok { + case 'n': + d.readLit4Null() + z.v = valueTypeNil + case 'f': + d.readLit4False() + z.v = valueTypeBool + z.b = false + case 't': + d.readLit4True() + z.v = valueTypeBool + z.b = true + case '{': + z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart + case '[': + z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart + case '"': + // if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first + d.appendStringAsBytes() + if len(d.bs) > 0 && d.c == containerMapKey && d.h.MapKeyAsString { + switch stringView(d.bs) { + case "null": + z.v = valueTypeNil + case "true": + z.v = valueTypeBool + z.b = true + case "false": + z.v = valueTypeBool + z.b = false + default: + // check if a number: float, int or uint + if err := d.nakedNum(z, d.bs); err != nil { + z.v = valueTypeString + z.s = d.bsToString() + } + } + } else { + z.v = valueTypeString + z.s = d.bsToString() + } + default: // number + bs := d.decNumBytes() + if len(bs) == 0 { + d.d.errorf("decode number from empty string") + return + } + if err := d.nakedNum(z, bs); err != nil { + d.d.errorf("decode number from %s: %v", bs, err) + return + } + } + // if decodeFurther { + // d.s.sc.retryRead() + // } +} + +//---------------------- + +// JsonHandle is a handle for JSON encoding format. +// +// Json is comprehensively supported: +// - decodes numbers into interface{} as int, uint or float64 +// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc. +// - decode integers from float formatted numbers e.g. 1.27e+8 +// - decode any json value (numbers, bool, etc) from quoted strings +// - configurable way to encode/decode []byte . +// by default, encodes and decodes []byte using base64 Std Encoding +// - UTF-8 support for encoding and decoding +// +// It has better performance than the json library in the standard library, +// by leveraging the performance improvements of the codec library. +// +// In addition, it doesn't read more bytes than necessary during a decode, which allows +// reading multiple values from a stream containing json and non-json content. +// For example, a user can read a json value, then a cbor value, then a msgpack value, +// all from the same stream in sequence. +// +// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are +// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD. +type JsonHandle struct { + textEncodingType + BasicHandle + + // Indent indicates how a value is encoded. + // - If positive, indent by that number of spaces. + // - If negative, indent by that number of tabs. + Indent int8 + + // IntegerAsString controls how integers (signed and unsigned) are encoded. + // + // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. + // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. + // This can be mitigated by configuring how to encode integers. + // + // IntegerAsString interpretes the following values: + // - if 'L', then encode integers > 2^53 as a json string. + // - if 'A', then encode all integers as a json string + // containing the exact integer representation as a decimal. + // - else encode all integers as a json number (default) + IntegerAsString byte + + // HTMLCharsAsIs controls how to encode some special characters to html: < > & + // + // By default, we encode them as \uXXX + // to prevent security holes when served from some browsers. + HTMLCharsAsIs bool + + // PreferFloat says that we will default to decoding a number as a float. + // If not set, we will examine the characters of the number and decode as an + // integer type if it doesn't have any of the characters [.eE]. + PreferFloat bool + + // TermWhitespace says that we add a whitespace character + // at the end of an encoding. + // + // The whitespace is important, especially if using numbers in a context + // where multiple items are written to a stream. + TermWhitespace bool + + // MapKeyAsString says to encode all map keys as strings. + // + // Use this to enforce strict json output. + // The only caveat is that nil value is ALWAYS written as null (never as "null") + MapKeyAsString bool + + // _ [2]byte // padding + + // Note: below, we store hardly-used items e.g. RawBytesExt is cached in the (en|de)cDriver. + + // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. + // If not configured, raw bytes are encoded to/from base64 text. + RawBytesExt InterfaceExt + + _ [2]uint64 // padding +} + +// Name returns the name of the handle: json +func (h *JsonHandle) Name() string { return "json" } +func (h *JsonHandle) hasElemSeparators() bool { return true } +func (h *JsonHandle) typical() bool { + return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L' +} + +type jsonTypical interface { + typical() +} + +func (h *JsonHandle) recreateEncDriver(ed encDriver) (v bool) { + _, v = ed.(jsonTypical) + return v != h.typical() +} + +// SetInterfaceExt sets an extension +func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) +} + +func (h *JsonHandle) newEncDriver(e *Encoder) (ee encDriver) { + var hd *jsonEncDriver + if h.typical() { + var v jsonEncDriverTypical + ee = &v + hd = &v.jsonEncDriver + } else { + var v jsonEncDriverGeneric + ee = &v + hd = &v.jsonEncDriver + } + hd.e, hd.h, hd.bs = e, h, hd.b[:0] + hd.se.BytesExt = bytesExtFailer{} + ee.reset() + return +} + +func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { + // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} + hd := jsonDecDriver{d: d, h: h} + hd.se.BytesExt = bytesExtFailer{} + hd.bs = hd.b[:0] + hd.reset() + return &hd +} + +func (e *jsonEncDriver) reset() { + e.w = e.e.w + e.se.InterfaceExt = e.h.RawBytesExt + if e.bs != nil { + e.bs = e.bs[:0] + } + e.c = 0 +} + +func (d *jsonDecDriver) reset() { + d.r = d.d.r + d.se.InterfaceExt = d.h.RawBytesExt + if d.bs != nil { + d.bs = d.bs[:0] + } + d.c, d.tok = 0, 0 + // d.n.reset() +} + +func jsonFloatStrconvFmtPrec(f float64) (fmt byte, prec int) { + prec = -1 + var abs = math.Abs(f) + if abs != 0 && (abs < 1e-6 || abs >= 1e21) { + fmt = 'e' + } else { + fmt = 'f' + // set prec to 1 iff mod is 0. + // better than using jsonIsFloatBytesB2 to check if a . or E in the float bytes. + // this ensures that every float has an e or .0 in it. + if abs <= 1 { + if abs == 0 || abs == 1 { + prec = 1 + } + } else if _, mod := math.Modf(abs); mod == 0 { + prec = 1 + } + } + return +} + +// custom-fitted version of strconv.Parse(Ui|I)nt. +// Also ensures we don't have to search for .eE to determine if a float or not. +// Note: s CANNOT be a zero-length slice. +func jsonParseInteger(s []byte) (n uint64, neg, badSyntax, overflow bool) { + const maxUint64 = (1<<64 - 1) + const cutoff = maxUint64/10 + 1 + + if len(s) == 0 { // bounds-check-elimination + // treat empty string as zero value + // badSyntax = true + return + } + switch s[0] { + case '+': + s = s[1:] + case '-': + s = s[1:] + neg = true + } + for _, c := range s { + if c < '0' || c > '9' { + badSyntax = true + return + } + // unsigned integers don't overflow well on multiplication, so check cutoff here + // e.g. (maxUint64-5)*10 doesn't overflow well ... + if n >= cutoff { + overflow = true + return + } + n *= 10 + n1 := n + uint64(c-'0') + if n1 < n || n1 > maxUint64 { + overflow = true + return + } + n = n1 + } + return +} + +var _ decDriver = (*jsonDecDriver)(nil) +var _ encDriver = (*jsonEncDriverGeneric)(nil) +var _ encDriver = (*jsonEncDriverTypical)(nil) +var _ jsonTypical = (*jsonEncDriverTypical)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/mammoth-test.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/v2/codec/mammoth-test.go.tmpl new file mode 100644 index 0000000000000..c598cc73a5eba --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/mammoth-test.go.tmpl @@ -0,0 +1,154 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from mammoth-test.go.tmpl - DO NOT EDIT. + +package codec + +import "testing" +import "fmt" +import "reflect" + +// TestMammoth has all the different paths optimized in fast-path +// It has all the primitives, slices and maps. +// +// For each of those types, it has a pointer and a non-pointer field. + +func init() { _ = fmt.Printf } // so we can include fmt as needed + +type TestMammoth struct { + +{{range .Values }}{{if .Primitive }}{{/* +*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} +{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} +{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}} type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }} +func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { } +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}} type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +func doTestMammothSlices(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}} + var v{{$i}}va [8]{{ .Elem }} + for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { {{/* + // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v) + // - encode value to some []byte + // - decode into a length-wise-equal []byte + // - check if equal to initial slice + // - encode ptr to the value + // - check if encode bytes are same + // - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice + // - decode into non-addressable slice of equal length, then larger len + // - for each decode, compare elem-by-elem to the original slice + // - + // - rinse and repeat for a MapBySlice version + // - + */}} + var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }} + v{{$i}}v1 = v + bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr") + // ... + bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:1:1] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-1") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-len") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + v{{$i}}v2 = v{{$i}}va[:] + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-cap") + if len(v{{$i}}v1) > 1 { + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr") + testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-len-noaddr") + v{{$i}}va = [8]{{ .Elem }}{} // clear the array + testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr") + testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr") + } + // ... + var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }} + v{{$i}}v2 = nil + if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1) + v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) + bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom") + testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom") + bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p") + v{{$i}}v2 = nil + v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2) + testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p") + } +{{end}}{{end}}{{end}} +} + +func doTestMammothMaps(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}} + for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } { + // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v) + var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }} + v{{$i}}v1 = v + bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-noaddr") + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len") + bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil") + // ... + if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map + var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }} + v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1) + v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2) + bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom") + testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len") + } +{{end}}{{end}}{{end}} + +} + +func doTestMammothMapsAndSlices(t *testing.T, h Handle) { + doTestMammothSlices(t, h) + doTestMammothMaps(t, h) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/mammoth2-test.go.tmpl b/vendor/github.com/hashicorp/go-msgpack/v2/codec/mammoth2-test.go.tmpl new file mode 100644 index 0000000000000..3b546f3e40b6d --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/mammoth2-test.go.tmpl @@ -0,0 +1,92 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT. + +package codec + +// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go.... +// +// Add: +// - test file for creating a mammoth generated file as _mammoth_generated.go +// - generate a second mammoth files in a different file: mammoth2_generated_test.go +// - mammoth-test.go.tmpl will do this +// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags) +// - as part of TestMammoth, run it also +// - this will cover all the codecgen, gen-helper, etc in one full run +// - check in mammoth* files into github also +// - then +// +// Now, add some types: +// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it +// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types +// - this wrapper object is what we work encode/decode (so that the codecgen methods are called) + + +// import "encoding/binary" +import "fmt" + +type TestMammoth2 struct { + +{{range .Values }}{{if .Primitive }}{{/* +*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} +{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} +{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +} + +// ----------- + +type testMammoth2Binary uint64 +func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) { +data = make([]byte, 8) +bigen.PutUint64(data, uint64(x)) +return +} +func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) { +*x = testMammoth2Binary(bigen.Uint64(data)) +return +} + +type testMammoth2Text uint64 +func (x testMammoth2Text) MarshalText() (data []byte, err error) { +data = []byte(fmt.Sprintf("%b", uint64(x))) +return +} +func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) { +_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x)) +return +} + +type testMammoth2Json uint64 +func (x testMammoth2Json) MarshalJSON() (data []byte, err error) { +data = []byte(fmt.Sprintf("%v", uint64(x))) +return +} +func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) { +_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x)) +return +} + +type testMammoth2Basic [4]uint64 + +type TestMammoth2Wrapper struct { + V TestMammoth2 + T testMammoth2Text + B testMammoth2Binary + J testMammoth2Json + C testMammoth2Basic + M map[testMammoth2Basic]TestMammoth2 + L []TestMammoth2 + A [4]int64 +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/msgpack.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/msgpack.go new file mode 100644 index 0000000000000..99c8a13fd85f7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/msgpack.go @@ -0,0 +1,1150 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed +*/ + +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" + "reflect" + "time" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax byte = 0x7f + mpFixMapMin byte = 0x80 + mpFixMapMax byte = 0x8f + mpFixArrayMin byte = 0x90 + mpFixArrayMax byte = 0x9f + mpFixStrMin byte = 0xa0 + mpFixStrMax byte = 0xbf + mpNil byte = 0xc0 + _ byte = 0xc1 + mpFalse byte = 0xc2 + mpTrue byte = 0xc3 + mpFloat byte = 0xca + mpDouble byte = 0xcb + mpUint8 byte = 0xcc + mpUint16 byte = 0xcd + mpUint32 byte = 0xce + mpUint64 byte = 0xcf + mpInt8 byte = 0xd0 + mpInt16 byte = 0xd1 + mpInt32 byte = 0xd2 + mpInt64 byte = 0xd3 + + // extensions below + mpBin8 byte = 0xc4 + mpBin16 byte = 0xc5 + mpBin32 byte = 0xc6 + mpExt8 byte = 0xc7 + mpExt16 byte = 0xc8 + mpExt32 byte = 0xc9 + mpFixExt1 byte = 0xd4 + mpFixExt2 byte = 0xd5 + mpFixExt4 byte = 0xd6 + mpFixExt8 byte = 0xd7 + mpFixExt16 byte = 0xd8 + + mpStr8 byte = 0xd9 // new + mpStr16 byte = 0xda + mpStr32 byte = 0xdb + + mpArray16 byte = 0xdc + mpArray32 byte = 0xdd + + mpMap16 byte = 0xde + mpMap32 byte = 0xdf + + mpNegFixNumMin byte = 0xe0 + mpNegFixNumMax byte = 0xff +) + +var mpTimeExtTag int8 = -1 +var mpTimeExtTagU = uint8(mpTimeExtTag) + +// var mpdesc = map[byte]string{ +// mpPosFixNumMin: "PosFixNumMin", +// mpPosFixNumMax: "PosFixNumMax", +// mpFixMapMin: "FixMapMin", +// mpFixMapMax: "FixMapMax", +// mpFixArrayMin: "FixArrayMin", +// mpFixArrayMax: "FixArrayMax", +// mpFixStrMin: "FixStrMin", +// mpFixStrMax: "FixStrMax", +// mpNil: "Nil", +// mpFalse: "False", +// mpTrue: "True", +// mpFloat: "Float", +// mpDouble: "Double", +// mpUint8: "Uint8", +// mpUint16: "Uint16", +// mpUint32: "Uint32", +// mpUint64: "Uint64", +// mpInt8: "Int8", +// mpInt16: "Int16", +// mpInt32: "Int32", +// mpInt64: "Int64", +// mpBin8: "Bin8", +// mpBin16: "Bin16", +// mpBin32: "Bin32", +// mpExt8: "Ext8", +// mpExt16: "Ext16", +// mpExt32: "Ext32", +// mpFixExt1: "FixExt1", +// mpFixExt2: "FixExt2", +// mpFixExt4: "FixExt4", +// mpFixExt8: "FixExt8", +// mpFixExt16: "FixExt16", +// mpStr8: "Str8", +// mpStr16: "Str16", +// mpStr32: "Str32", +// mpArray16: "Array16", +// mpArray32: "Array32", +// mpMap16: "Map16", +// mpMap32: "Map32", +// mpNegFixNumMin: "NegFixNumMin", +// mpNegFixNumMax: "NegFixNumMax", +// } + +func mpdesc(bd byte) string { + switch bd { + case mpNil: + return "nil" + case mpFalse: + return "false" + case mpTrue: + return "true" + case mpFloat, mpDouble: + return "float" + case mpUint8, mpUint16, mpUint32, mpUint64: + return "uint" + case mpInt8, mpInt16, mpInt32, mpInt64: + return "int" + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + return "int" + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + return "int" + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + return "string|bytes" + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + return "bytes" + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + return "array" + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + return "map" + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + return "ext" + default: + return "unknown" + } + } +} + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff uint8 + bFixMin, b8, b16, b32 byte + // hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerRawLegacy = msgpackContainerType{ + 32, mpFixStrMin, 0, mpStr16, mpStr32, + } + msgpackContainerStr = msgpackContainerType{ + 32, mpFixStrMin, mpStr8, mpStr16, mpStr32, // true, true, false, + } + msgpackContainerBin = msgpackContainerType{ + 0, 0, mpBin8, mpBin16, mpBin32, // false, true, true, + } + msgpackContainerList = msgpackContainerType{ + 16, mpFixArrayMin, 0, mpArray16, mpArray32, // true, false, false, + } + msgpackContainerMap = msgpackContainerType{ + 16, mpFixMapMin, 0, mpMap16, mpMap32, // true, false, false, + } +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + noBuiltInTypes + encDriverNoopContainerWriter + // encNoSeparator + e *Encoder + w *encWriterSwitch + h *MsgpackHandle + x [8]byte + // _ [3]uint64 // padding +} + +func (e *msgpackEncDriver) EncodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) EncodeInt(i int64) { + if e.h.PositiveIntUnsigned && i >= 0 { + e.EncodeUint(uint64(i)) + } else if i > math.MaxInt8 { + if i <= math.MaxInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } + } else if i >= -32 { + if e.h.NoFixedNum { + e.w.writen2(mpInt8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i >= math.MinInt8 { + e.w.writen2(mpInt8, byte(i)) + } else if i >= math.MinInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i >= math.MinInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeUint(i uint64) { + if i <= math.MaxInt8 { + if e.h.NoFixedNum { + e.w.writen2(mpUint8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i <= math.MaxUint8 { + e.w.writen2(mpUint8, byte(i)) + } else if i <= math.MaxUint16 { + e.w.writen1(mpUint16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxUint32 { + e.w.writen1(mpUint32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpUint64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) EncodeFloat32(f float32) { + e.w.writen1(mpFloat) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) EncodeFloat64(f float64) { + e.w.writen1(mpDouble) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) EncodeTime(t time.Time) { + // use the MarshalBinary format if requested + if e.h.TimeNotBuiltin { + bin, err := t.MarshalBinary() + if err != nil { + return + } + e.EncodeStringBytesRaw(bin) + return + } + if t.IsZero() { + e.EncodeNil() + return + } + t = t.UTC() + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + var data64 uint64 + var l = 4 + if sec >= 0 && sec>>34 == 0 { + data64 = (nsec << 34) | uint64(sec) + if data64&0xffffffff00000000 != 0 { + l = 8 + } + } else { + l = 12 + } + if e.h.WriteExt { + e.encodeExtPreamble(mpTimeExtTagU, l) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, l) + } + switch l { + case 4: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(data64)) + case 8: + bigenHelper{e.x[:8], e.w}.writeUint64(data64) + case 12: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(nsec)) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(sec)) + } +} + +func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(v) + if bs == nil { + e.EncodeNil() + return + } + if e.h.WriteExt { + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) + } else { + e.EncodeStringBytesRaw(bs) + } +} + +func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + if l == 1 { + e.w.writen2(mpFixExt1, xtag) + } else if l == 2 { + e.w.writen2(mpFixExt2, xtag) + } else if l == 4 { + e.w.writen2(mpFixExt4, xtag) + } else if l == 8 { + e.w.writen2(mpFixExt8, xtag) + } else if l == 16 { + e.w.writen2(mpFixExt16, xtag) + } else if l < 256 { + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + } else if l < 65536 { + e.w.writen1(mpExt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + e.w.writen1(xtag) + } else { + e.w.writen1(mpExt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) WriteArrayStart(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) WriteMapStart(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { + slen := len(s) + if c == cRAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, slen) + } + if slen > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) EncodeStringEnc(c charEncoding, s string) { + slen := len(s) + if e.h.WriteExt { + e.writeContainerLen(msgpackContainerStr, slen) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, slen) + } + if slen > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { + if bs == nil { + e.EncodeNil() + return + } + slen := len(bs) + if c == cRAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, slen) + } + if slen > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) EncodeStringBytesRaw(bs []byte) { + if bs == nil { + e.EncodeNil() + return + } + slen := len(bs) + if e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerRawLegacy, slen) + } + if slen > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + if ct.fixCutoff > 0 && l < int(ct.fixCutoff) { + e.w.writen1(ct.bFixMin | byte(l)) + } else if ct.b8 > 0 && l < 256 { + e.w.writen2(ct.b8, uint8(l)) + } else if l < 65536 { + e.w.writen1(ct.b16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + } else { + e.w.writen1(ct.b32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + d *Decoder + r *decReaderSwitch + h *MsgpackHandle + // b [scratchByteArrayLen]byte + bd byte + bdRead bool + br bool // bytes reader + noBuiltInTypes + // noStreamingCodec + // decNoSeparator + decDriverNoopContainerReader + // _ [3]uint64 // padding +} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + n := d.d.naked() + var decodeFurther bool + + switch bd { + case mpNil: + n.v = valueTypeNil + d.bdRead = false + case mpFalse: + n.v = valueTypeBool + n.b = false + case mpTrue: + n.v = valueTypeBool + n.b = true + + case mpFloat: + n.v = valueTypeFloat + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + case mpDouble: + n.v = valueTypeFloat + n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + + case mpUint8: + n.v = valueTypeUint + n.u = uint64(d.r.readn1()) + case mpUint16: + n.v = valueTypeUint + n.u = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + n.v = valueTypeUint + n.u = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + n.v = valueTypeUint + n.u = uint64(bigen.Uint64(d.r.readx(8))) + + case mpInt8: + n.v = valueTypeInt + n.i = int64(int8(d.r.readn1())) + case mpInt16: + n.v = valueTypeInt + n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + n.v = valueTypeInt + n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + n.v = valueTypeInt + n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.WriteExt || d.h.RawToString { + n.v = valueTypeString + n.s = d.DecodeString() + } else { + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + decNakedReadRawBytes(d, d.d, n, d.h.RawToString) + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + n.v = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + n.v = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + n.v = valueTypeExt + clen := d.readExtLen() + n.u = uint64(d.r.readn1()) + if n.u == uint64(mpTimeExtTagU) { + n.v = valueTypeTime + n.t = d.decodeTime(clen) + } else if d.br { + n.l = d.r.readx(uint(clen)) + } else { + n.l = decByteSlice(d.r, clen, d.d.h.MaxInitLen, d.d.b[:]) + } + default: + d.d.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) + } + } + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeInt64() (i int64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(bigen.Uint16(d.r.readx(2)))) + case mpUint32: + i = int64(uint64(bigen.Uint32(d.r.readx(4)))) + case mpUint64: + i = int64(bigen.Uint64(d.r.readx(8))) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + i = int64(bigen.Uint64(d.r.readx(8))) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + d.d.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeUint64() (ui uint64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + ui = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + ui = bigen.Uint64(d.r.readx(8)) + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt16: + if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt32: + if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt64: + if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + d.d.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd)) + return + default: + d.d.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFloat { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == mpDouble { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + f = float64(d.DecodeInt64()) + } + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFalse || d.bd == 0 { + // b = false + } else if d.bd == mpTrue || d.bd == 1 { + b = true + } else { + d.d.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + + bd := d.bd + var clen int + if bd == mpNil { + d.bdRead = false + return + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) // binary + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + clen = d.readContainerLen(msgpackContainerStr) // string/raw + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + // check if an "array" of uint8's + if zerocopy && len(bs) == 0 { + bs = d.d.b[:] + } + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } else { + d.d.errorf("invalid byte descriptor for decoding bytes, got: 0x%x", d.bd) + return + } + + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(uint(clen)) + } else if len(bs) == 0 { + bs = d.d.b[:] + } + } + return decByteSlice(d.r, clen, d.h.MaxInitLen, bs) +} + +func (d *msgpackDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.d.b[:], true)) +} + +func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.d.b[:], true) +} + +func (d *msgpackDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *msgpackDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *msgpackDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + // if bd == mpNil { + // // nil + // } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + // // binary + // } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + // // string/raw + // } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + // // array + // } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + // // map + // } + if bd == mpNil { + return valueTypeNil + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + return valueTypeBytes + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + if d.h.WriteExt || d.h.RawToString { // UTF-8 string (new spec) + return valueTypeString + } + return valueTypeBytes // raw (old spec) + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + return valueTypeArray + } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + return valueTypeMap + } + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset +} + +func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpNil { + d.bdRead = false + return true + } + return +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + if bd == mpNil { + clen = -1 // to represent nil + } else if bd == ct.b8 { + clen = int(d.r.readn1()) + } else if bd == ct.b16 { + clen = int(bigen.Uint16(d.r.readx(2))) + } else if bd == ct.b32 { + clen = int(bigen.Uint32(d.r.readx(4))) + } else if (ct.bFixMin & bd) == ct.bFixMin { + clen = int(ct.bFixMin ^ bd) + } else { + d.d.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) ReadMapStart() int { + if !d.bdRead { + d.readNextBd() + } + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) ReadArrayStart() int { + if !d.bdRead { + d.readNextBd() + } + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(bigen.Uint16(d.r.readx(2))) + case mpExt32: + clen = int(bigen.Uint32(d.r.readx(4))) + default: + d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + return + } + return +} + +func (d *msgpackDecDriver) DecodeTime() (t time.Time) { + // decode time from string bytes or ext + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + var clen int + if bd == mpNil { + d.bdRead = false + return + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) // binary + } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax) { + clen = d.readContainerLen(msgpackContainerStr) // string/raw + } else { + // expect to see mpFixExt4,-1 OR mpFixExt8,-1 OR mpExt8,12,-1 + d.bdRead = false + b2 := d.r.readn1() + if d.bd == mpFixExt4 && b2 == mpTimeExtTagU { + clen = 4 + } else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU { + clen = 8 + } else if d.bd == mpExt8 && b2 == 12 && d.r.readn1() == mpTimeExtTagU { + clen = 12 + } else { + d.d.errorf("invalid stream for decoding time as extension: got 0x%x, 0x%x", d.bd, b2) + return + } + } + return d.decodeTime(clen) +} + +func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) { + bs := d.r.readx(uint(clen)) + + // Decode as a binary marshalled string for compatibility with other versions of go-msgpack. + // time.Time should always be encoded as 16 bytes or fewer in the binary marshalling format, + // so will always fit within the 32 byte max for fixed strings + if d.bd >= mpFixStrMin && d.bd <= mpFixStrMax { + err := t.UnmarshalBinary(bs) + if err == nil { + return + } + // fallthrough on failure + } + + d.bdRead = false + switch clen { + case 4: + t = time.Unix(int64(bigen.Uint32(bs)), 0).UTC() + case 8: + tv := bigen.Uint64(bs) + t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC() + case 12: + nsec := bigen.Uint32(bs[:4]) + sec := bigen.Uint64(bs[4:]) + t = time.Unix(int64(sec), int64(nsec)).UTC() + default: + d.d.errorf("invalid bytes for decoding time - expecting string or 4, 8, or 12 bytes, got %d", clen) + } + return +} + +func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + xbd := d.bd + if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { + xbs = d.DecodeBytes(nil, true) + } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || + (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { + xbs = d.DecodeStringAsBytes() + } else { + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag) + return + } + if d.br { + xbs = d.r.readx(uint(clen)) + } else { + xbs = decByteSlice(d.r, clen, d.d.h.MaxInitLen, d.d.b[:]) + } + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +// MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum. + NoFixedNum bool + + // WriteExt controls whether the new spec is honored. + // + // With WriteExt=true, we can encode configured extensions with extension tags + // and encode string/[]byte/extensions in a way compatible with the new spec + // but incompatible with the old spec. + // + // For compatibility with the old spec, set WriteExt=false. + // + // With WriteExt=false: + // configured extensions are serialized as raw bytes (not msgpack extensions). + // reserved byte descriptors like Str8 and those enabling the new msgpack Binary type + // are not encoded. + WriteExt bool + + // PositiveIntUnsigned says to encode positive integers as unsigned. + PositiveIntUnsigned bool + + binaryEncodingType + noElemSeparators + + // _ [1]uint64 // padding +} + +// Name returns the name of the handle: msgpack +func (h *MsgpackHandle) Name() string { return "msgpack" } + +// SetBytesExt sets an extension +func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) +} + +func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { + return &msgpackEncDriver{e: e, w: e.w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { + return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *msgpackEncDriver) reset() { + e.w = e.e.w +} + +func (d *msgpackDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + if cls := c.cls.load(); cls.closed { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var ba [1]byte + var n int + for { + n, err = c.r.Read(ba[:]) + if err != nil { + return + } + if n == 1 { + break + } + } + + var b = ba[0] + if b != fia { + err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b)) + } else { + err = c.read(&b) + if err == nil { + if b != expectTypeByte { + err = fmt.Errorf("%s - expecting %v but got %x/%s", + msgBadDesc, expectTypeByte, b, mpdesc(b)) + } else { + err = c.read(msgid) + if err == nil { + err = c.read(methodOrError) + } + } + } + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// +// See GoRpc documentation, for information on buffering for better performance. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/rpc.go b/vendor/github.com/hashicorp/go-msgpack/v2/codec/rpc.go new file mode 100644 index 0000000000000..3fa9f547aec06 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/rpc.go @@ -0,0 +1,227 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bufio" + "errors" + "io" + "net/rpc" +) + +var errRpcJsonNeedsTermWhitespace = errors.New("rpc requires JsonHandle with TermWhitespace=true") + +// Rpc provides a rpc Server or Client Codec for rpc communication. +type Rpc interface { + ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec + ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec +} + +// RPCOptions holds options specific to rpc functionality +type RPCOptions struct { + // RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls. + // + // Set RPCNoBuffer=true to turn buffering off. + // Buffering can still be done if buffered connections are passed in, or + // buffering is configured on the handle. + RPCNoBuffer bool +} + +// rpcCodec defines the struct members and common methods. +type rpcCodec struct { + c io.Closer + r io.Reader + w io.Writer + f ioFlusher + + dec *Decoder + enc *Encoder + // bw *bufio.Writer + // br *bufio.Reader + h Handle + + cls atomicClsErr +} + +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + // return newRPCCodec2(bufio.NewReader(conn), bufio.NewWriter(conn), conn, h) + return newRPCCodec2(conn, conn, conn, h) +} + +func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec { + // defensive: ensure that jsonH has TermWhitespace turned on. + if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace { + panic(errRpcJsonNeedsTermWhitespace) + } + // always ensure that we use a flusher, and always flush what was written to the connection. + // we lose nothing by using a buffered writer internally. + f, ok := w.(ioFlusher) + bh := basicHandle(h) + if !bh.RPCNoBuffer { + if bh.WriterBufferSize <= 0 { + if !ok { + bw := bufio.NewWriter(w) + f, w = bw, bw + } + } + if bh.ReaderBufferSize <= 0 { + if _, ok = w.(ioPeeker); !ok { + if _, ok = w.(ioBuffered); !ok { + br := bufio.NewReader(r) + r = br + } + } + } + } + return rpcCodec{ + c: c, + w: w, + r: r, + f: f, + h: h, + enc: NewEncoder(w, h), + dec: NewDecoder(r, h), + } +} + +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) { + if c.c != nil { + cls := c.cls.load() + if cls.closed { + return cls.errClosed + } + } + err = c.enc.Encode(obj1) + if err == nil { + if writeObj2 { + err = c.enc.Encode(obj2) + } + // if err == nil && c.f != nil { + // err = c.f.Flush() + // } + } + if c.f != nil { + if err == nil { + err = c.f.Flush() + } else { + _ = c.f.Flush() // swallow flush error, so we maintain prior error on write + } + } + return +} + +func (c *rpcCodec) swallow(err *error) { + defer panicToErr(c.dec, err) + c.dec.swallow() +} + +func (c *rpcCodec) read(obj interface{}) (err error) { + if c.c != nil { + cls := c.cls.load() + if cls.closed { + return cls.errClosed + } + } + //If nil is passed in, we should read and discard + if obj == nil { + // var obj2 interface{} + // return c.dec.Decode(&obj2) + c.swallow(&err) + return + } + return c.dec.Decode(obj) +} + +func (c *rpcCodec) Close() error { + if c.c == nil { + return nil + } + cls := c.cls.load() + if cls.closed { + return cls.errClosed + } + cls.errClosed = c.c.Close() + cls.closed = true + c.cls.store(cls) + return cls.errClosed +} + +func (c *rpcCodec) ReadResponseBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +type goRpcCodec struct { + rpcCodec +} + +func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + return c.write(r, body, true) +} + +func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + return c.write(r, body, true) +} + +func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +// goRpc is the implementation of Rpc that uses the communication protocol +// as defined in net/rpc package. +type goRpc struct{} + +// GoRpc implements Rpc using the communication protocol defined in net/rpc package. +// +// Note: network connection (from net.Dial, of type io.ReadWriteCloser) is not buffered. +// +// For performance, you should configure WriterBufferSize and ReaderBufferSize on the handle. +// This ensures we use an adequate buffer during reading and writing. +// If not configured, we will internally initialize and use a buffer during reads and writes. +// This can be turned off via the RPCNoBuffer option on the Handle. +// +// var handle codec.JsonHandle +// handle.RPCNoBuffer = true // turns off attempt by rpc module to initialize a buffer +// +// Example 1: one way of configuring buffering explicitly: +// +// var handle codec.JsonHandle // codec handle +// handle.ReaderBufferSize = 1024 +// handle.WriterBufferSize = 1024 +// var conn io.ReadWriteCloser // connection got from a socket +// var serverCodec = GoRpc.ServerCodec(conn, handle) +// var clientCodec = GoRpc.ClientCodec(conn, handle) +// +// Example 2: you can also explicitly create a buffered connection yourself, +// and not worry about configuring the buffer sizes in the Handle. +// +// var handle codec.Handle // codec handle +// var conn io.ReadWriteCloser // connection got from a socket +// var bufconn = struct { // bufconn here is a buffered io.ReadWriteCloser +// io.Closer +// *bufio.Reader +// *bufio.Writer +// }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)} +// var serverCodec = GoRpc.ServerCodec(bufconn, handle) +// var clientCodec = GoRpc.ClientCodec(bufconn, handle) +var GoRpc goRpc + +func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} diff --git a/vendor/github.com/hashicorp/go-msgpack/v2/codec/test.py b/vendor/github.com/hashicorp/go-msgpack/v2/codec/test.py new file mode 100644 index 0000000000000..8418fee4d1eb6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/v2/codec/test.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python + +# This will create golden files in a directory passed to it. +# A Test calls this internally to create the golden files +# So it can process them (so we don't have to checkin the files). + +# Ensure msgpack-python is installed first, using: +# sudo apt-get install python-dev +# sudo apt-get install python-pip +# pip install --user msgpack-python msgpack-rpc-python + +# Ensure all "string" keys are utf strings (else encoded as bytes) + +import msgpack, msgpackrpc, sys, os, threading + +def get_test_data_list(): + # get list with all primitive types, and a combo type + l0 = [ + -8, + -1616, + -32323232, + -6464646464646464, + 192, + 1616, + 32323232, + 6464646464646464, + 192, + -3232.0, + -6464646464.0, + 3232.0, + 6464.0, + 6464646464.0, + False, + True, + u"null", + None, + u"some&day>some 0 + if stopTimeSec > 0: + def myStopRpcServer(): + server.stop() + t = threading.Timer(stopTimeSec, myStopRpcServer) + t.start() + server.start() + +def doRpcClientToPythonSvc(port): + address = msgpackrpc.Address('127.0.0.1', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print(client.call("Echo123", "A1", "B2", "C3")) + print(client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})) + +def doRpcClientToGoSvc(port): + # print ">>>> port: ", port, " <<<<<" + address = msgpackrpc.Address('127.0.0.1', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print(client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])) + print(client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})) + +def doMain(args): + if len(args) == 2 and args[0] == "testdata": + build_test_data(args[1]) + elif len(args) == 3 and args[0] == "rpc-server": + doRpcServer(int(args[1]), int(args[2])) + elif len(args) == 2 and args[0] == "rpc-client-python-service": + doRpcClientToPythonSvc(int(args[1])) + elif len(args) == 2 and args[0] == "rpc-client-go-service": + doRpcClientToGoSvc(int(args[1])) + else: + print("Usage: test.py " + + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") + +if __name__ == "__main__": + doMain(sys.argv[1:]) + diff --git a/vendor/github.com/hashicorp/raft-wal/.copywrite.hcl b/vendor/github.com/hashicorp/raft-wal/.copywrite.hcl new file mode 100644 index 0000000000000..5e1977c2a0919 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/.copywrite.hcl @@ -0,0 +1,11 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2022 + + # (OPTIONAL) A list of globs that should not have copyright/license headers. + # Supports doublestar glob patterns for more flexibility in defining which + # files or folders should be ignored (e.g., "vendors/**") + header_ignore = [] +} diff --git a/vendor/github.com/hashicorp/raft-wal/.gitignore b/vendor/github.com/hashicorp/raft-wal/.gitignore new file mode 100644 index 0000000000000..17b6e86df9dd7 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/.gitignore @@ -0,0 +1,3 @@ +/.idea/ +bench.test +profile.out \ No newline at end of file diff --git a/vendor/github.com/hashicorp/raft-wal/CODEOWNERS b/vendor/github.com/hashicorp/raft-wal/CODEOWNERS new file mode 100644 index 0000000000000..13a3e0d34e62a --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/CODEOWNERS @@ -0,0 +1,3 @@ +# Copyright (c) HashiCorp, Inc. + +* @consul-core-reviewers @raft-force \ No newline at end of file diff --git a/vendor/github.com/hashicorp/raft-wal/LICENSE b/vendor/github.com/hashicorp/raft-wal/LICENSE new file mode 100644 index 0000000000000..9803e74705189 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/LICENSE @@ -0,0 +1,375 @@ +Copyright (c) 2022 HashiCorp, Inc. + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/raft-wal/NOTICE.txt b/vendor/github.com/hashicorp/raft-wal/NOTICE.txt new file mode 100644 index 0000000000000..65f9d0509caef --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/NOTICE.txt @@ -0,0 +1,4 @@ +HashiCorp Raft WAL Library +https://www.hashicorp.com/ +License: Mozilla Public License Version 2.0 +Copyright 2022 HashiCorp, Inc. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/raft-wal/README.md b/vendor/github.com/hashicorp/raft-wal/README.md new file mode 100644 index 0000000000000..f98d0e59ea33b --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/README.md @@ -0,0 +1,752 @@ +# Raft WAL + +This library implements a Write-Ahead Log (WAL) suitable for use with +[`hashicorp/raft`](https://github.com/hashicorp/raft). + +Specifically the library provides and instance of raft's `LogStore` and +`StableStore` interfaces for storing both raft logs and the other small items +that require stable storage (like which term the node last voted in). + +**This library is still considered experimental!** + +It is complete and reasonably well tested so far but we plan to complete more +rigorous end-to-end testing and performance analysis within our products and +together with some of our users before we consider this safe for production. + +The advantage of this library over `hashicorp/raft-boltdb` that has been used +for many years in HashiCorp products are: + 1. Efficient truncations that don't cause later appends to slow down due to + free space tracking issues in BoltDB's btree. + 2. More efficient appends due to only one fsync per append vs two in BoltDB. + 3. More efficient and suitable on-disk structure for a log vs a copy-on-write + BTree. + +We aim to provide roughly equivalent resiliency to crashes as respected storage +systems such as SQLite, LevelDB/RocksDB and etcd. BoltDB technically has a +stronger property due to it's page-aligned model (no partial sector overwrites). +We initially [designed a WAL on the same principals](/01-WAL-pages.md), +however felt that the additional complexity it adds wasn't justified given the +weaker assumptions that many other battle-tested systems above use. + +Our design goals for crash recovery are: + + - Crashes at any point must not loose committed log entries or result in a + corrupt file, even if in-flight sector writes are not atomic. + - We _do_ assume [Powersafe Overwrites](#powersafe-overwrites-psow) where + partial sectors can be appended to without corrupting existing data even in a + power failure. + - Latent errors (i.e. silent corruption in the FS or disk) _may_ be detected + during a read, but we assume that the file-system and disk are responsible + for this really. (i.e. we don't validate checksums on every record read). + This is equivalent to SQLite, LMDB, BoltDB etc. + +See the [system assumptions](#system-assumptions) and [crash +safety](#crash-safety) sections for more details. + +## Limitations + +Here are some notable (but we think acceptable) limitations of this design. + + * Segment files can't be larger than 4GiB. (Current default is 64MiB). + * Individual records can't be larger than 4GiB without changing the format. + (Current limit is 64MiB). + * Appended log entries must have monotonically increasing `Index` fields with + no gaps (though may start at any index in an empty log). + * Only head or tail truncations are supported. `DeleteRange` will error if the + range is not a prefix of suffix of the log. `hashicorp/raft` never needs + that. + * No encryption or compression support. + * Though we do provide a pluggable entry codec and internally treat each + entry as opaque bytes so it's possible to supply a custom codec that + transforms entries in any way desired. + * If the segment tail file is lost _after_ entries are committed to it due to + manual intervention or filesystem bug, the WAL can't distinguish that from a + crash during rotation that left the file missing since we don't update + metadata on every append for performance reasons. In most other cases, + missing data would be detected on recovery and fail the recovery to protect + from silent data loss, but in this particular case that's not possible + without significantly impacting performance in the steady state by updating + the last committed entry to meta DB on every append. We assume this is + reasonable since previous LogStore implementations would also "silently" + loose data if the database files were removed too. + +## Storage Format Overview + +The WAL has two types of file: a meta store and one or more log segments. + +### Meta Store + +We need to provide a `StableStore` interface for small amounts of Raft data. We +also need to store some meta data about log segments to simplify managing them +in an atomic and crash-safe way. + +Since this data is _generally_ small we could invent our own storage format with +some sort of double-buffering and limit ourselves to a single page of data etc. +But since performance is not critical for meta-data operations and size is +extremely unlikely to get larger than a few KiB, we choose instead the pragmatic +approach of using BoltDB for our `wal-meta.db`. + +The meta database contains two buckets: `stable` containing key/values persisted +by Raft via the `StableStore` interface, and `wal-state` which contains the +source-of-truth meta data about which segment files should be considered part of +the current log. + +The `wal-state` bucket contains one record with all the state since it's only +loaded or persisted in one atomic batch and is small. The state is just a JSON +encoded object described by the following structs. JSON encoding is used as this +is not performance sensitive and it's simpler to work with and more human +readable. + +```go +type PersistentState struct { + NextSegmentID uint64 + Segments []SegmentInfo +} +type SegmentInfo struct { + ID uint64 + BaseIndex uint64 + MinIndex uint64 + MaxIndex uint64 + Codec uint64 + IndexStart uint64 + CreateTime time.Time + SealTime time.Time +} +``` + +The last segment (with highest baseIndex) is the "tail" and must be the only one where +`SealTime = 0` (i.e. it's unsealed). `IndexStart` and `MaxIndex` are also zero until +the segments is sealed. + +Why use BoltDB when the main reason for this library is because the existing +BoltDB `LogStore` has performance issues? + +Well, the major performance issue in `raft-boltdb` occurs when a large amount of +log data is written and then truncated, the overhead of tracking all the free +space in the file makes further appends slower. + +Our use here is orders of magnitude lighter than storing all log data. As an +example, let's assume we allow 100GiB of logs to be kept around which is at +least an order of magnitude larger than the largest current known Consul user's +worst-case log size, and two orders of magnitude more than the largest Consul +deployments steady-state. Assuming fixed 64MiB segments, that would require +about 1600 segments which encode to about 125 bytes in JSON each. Even at this +extreme, the meta DB only has to hold under 200KiB. + +Even if a truncation occurs that reduces that all the way back to a single +segment, 200KiB is only a hundred or so pages (allowing for btree overhead) so +the free list will never be larger than a single 4KB page. + +On top of that, we only pay the cost of a write to BoltDB for meta-data +transactions: rotating to a new segment, or truncating. The vast majority of +appends only need to append to a log segment. + +### Segment Files + +Segment files are pre-allocated (if supported by the filesystem) on creation to +a fixed size. By default we use 64MiB segment files. This sections defines the +encoding for those files. All integer types are encoded in little-endian order. + +The file starts with a fixed-size header that is written once before the +first comitted entries. + +``` +0 1 2 3 4 5 6 7 8 ++------+------+------+------+------+------+------+------+ +| Magic | Reserved | Vsn | ++------+------+------+------+------+------+------+------+ +| BaseIndex | ++------+------+------+------+------+------+------+------+ +| SegmentID | ++------+------+------+------+------+------+------+------+ +| Codec | ++------+------+------+------+------+------+------+------+ +``` + +| Field | Type | Description | +| ------------ | --------- | ----------- | +| `Magic` | `uint32` | The randomly chosen value `0x58eb6b0d`. | +| `Reserved` | `[3]byte` | Bytes reserved for future file flags. | +| `Vsn` | `uint8` | The version of the file, currently `0x0`. | +| `BaseIndex` | `uint64` | The raft Index of the first entry that will be stored in this file. | +| `SegmentID` | `uint64` | A unique identifier for this segment file. | +| `Codec` | `uint64` | The codec used to write the file. | + +Each segment file is named `-.wal`. `BaseIndex` is +formatted in decimal with leading zeros and a fixed width of 20 chars. +`SegmentID` is formatted in lower-case hex with zero padding to 16 chars wide. +This has the nice property of them sorting lexicographically in the directory, +although we don't rely on that. + +### Frames + +Log entries are stored in consecutive frames after the header. As well as log +entry frames there are a few meta data frame types too. Each frame starts with +an 8-byte header. + +``` +0 1 2 3 4 5 6 7 8 ++------+------+------+------+------+------+------+------+ +| Type | Reserved | Length/CRC | ++------+------+------+------+------+------+------+------+ +``` + +| Field | Type | Description | +| ------------- | ----------- | ----------- | +| `Type` | `uint8` | The frame type. See below. | +| `Length/CRC` | `uint32` | Depends on Type. See Below | + + +| Type | Value | Description | +| ---- | ----- | ----------- | +| `Invalid` | `0x0` | The frame is invalid. We make zero value invalid so we can detect unwritten frames cleanly. | +| `Entry` | `0x1` | The frame contains an entire log entry. | +| `Index` | `0x2` | The frame contains an index array, not actual log entries. | +| `Commit` | `0x3` | The frame contains a CRC for all data written in a batch. | + +#### Index Frame + +An index frame payload is an array of `uint32` file offsets for the +correspoinding records. The first element of the array contains the file offset +of the frame containing the first entry in the segment and so on. + +`Length` is used to indicate the length in bytes of the array (i.e. number of +entries in the segments is `Length/4`). + +Index frames are written only when the segment is sealed and a commit frame +follows to validate the final write. + +#### Commit Frame + +A Commit frame marks the last write before fsync is called. In order to detect +incomplete or torn writes on recovery the commit frame stores a CRC of all the +bytes appended since the last fsync. + +`CRC` is used to specify a CRC32 (Castagnoli) over all bytes written since the +last fsync. That is, since just after the last commit frame, or just after the +file header. + +There may also be 4 bytes of padding to keep alignment. Later we could +use these too. + +#### Alignment + +All frame headers are written with 8-byte alignment to ensure they remain in a +single disk sector. We don't entirely depend on atomic sector writes for +correctness, but it's a simple way to improve our chances or being able to read +through the file on a recovery with some sectors missing. + +We add an implicit 0-7 null bytes after each frame to ensure the next frame +header is aligned. This padding is _not_ represented in `Length` but it is +always present and is deterministic by rounding up `Length` to the nearest +multiple of 8. It is always accounted for when reading and CRCs are calculated +over raw bytes written so always include the padding (zero) bytes. + +Despite alignment we still don't blindly trust the headers we read are valid. A +CRC mismatch or invalid record format indicate torn writes in the last batch +written and we always safety check the size of lengths read before allocating +memory for them - Entry lengths can't be bigger than the `MaxEntrySize` which +we default to 64MiB. + +### Sealing + +Once a segment file has grown larger than the configured soft-limit (64MiB +default), we "seal" it. This process involves: + + 1. Write out the in-memory index of record offsets to an index frame. + 2. Write a commit frame to validate all bytes appended in this final append + (which probably included one or more records that took the segment file over + the limit). + 3. Return the final `IndexStart` to be stored in `wal-meta.db` + +Sealed files can have their indexes read directly on open from the IndexStart in +`wal-meta.db` so records can be looked up in constant time. + +## Log Lookup by Index + +For an unsealed segment we first lookup the offset in the in-memory index. + +For a sealed segment we can discover the index frame location from the metadata +and then perform a read at the right location in the file to lookup the record's +offset. Implementations may choose to cache or memory-map the index array but we +will initially just read the specific entry we need each time and assume the OS +page cache will make that fast for frequently accessed index areas or in-order +traversals. We don't have to read the whole index, just the 4 byte entry we care +about since we can work out it's offset from IndexStart, the BaseIndex of the +segment, and the Index being searched for. + +# Crash Safety + +Crash safety must be maintained through three type of write operation: appending +a batch of entries, truncating from the head (oldest) entries, and truncating +the newest entries. + +## Appending Entries + +We want to `fsync` only once for an append batch, however many entries were in +it. We assume [Powersafe Overwrites](#powersafe-overwrites-psow) or PSOW, a +weaker assumption than atomic sector writes in general. Thanks to PSOW, we +assume we can start appending at the tail of the file right after previously +committed entries even if the new entries will be written to the same sector as +the older entries, and that the system will never corrupt the already committed +part of the sector even if it is not atomic and arbitrarily garbles the part of +the sector we actually did write. + +At the end of the batch we write a `Commit` frame containing the CRC over the +data written during the current batch. + +In a crash one of the following states occurs: + 1. All sectors modified across all frames make it to disk (crash _after_ fsync). + 2. A torn write: one or more sectors, anywhere in the modified tail of the file + might not be persisted. We don't assume they are zero, they might be + arbitrarily garbled (crash _before_ fsync). + +We can check which one of these is true with the recovery procedure outlined +below. If we find the last batch _was_ torn. It must not have been acknowledged +to Raft yet (since `fsync` can't have returned) and so it is safe to assume that +the previous commit frame is the tail of the log we've actually acknowledged. + +### Recovery + +We cover recovering the segments generally below since we have to account for +truncations. All segments except the tail were fsynced during seal before the +new tail was added to the meta DB so we can assume they are all made it to disk +if a later tail was added. + +On startup we just need to recover the tail log as follows: + + 1. If the file doesn't exist, create it from Meta DB information. DONE. + 2. Open file and validate header matches filename. If not delete it and go to 1. + 3. Read all records in the file in sequence, keeping track of the last two + commit frames observed. + 1. If the file ends with a corrupt frame or non commit frame, discard + anything after the last commit frame. We're DONE because we wouldn't have + written extra frames after commit until fsync completed so this commit + must have been acknowledged. + 1. Else the file ends with a commit frame. Validate its checksum. If it is good DONE. + 2. If CRC is not good then discard everything back to previous commit frame and DONE. + 4. If we read an index frame in that process and the commit frame proceeding it + is the new tail then mark the segment as sealed and return the seal info + (crash occured after seal but before updating `wal-meta.db`) + +## Head Truncations + +The most common form of truncation is a "head" truncation or removing the oldest +prefix of entries after a periodic snapshot has been made to reclaim space. + +To be crash safe we can't rely on atomically updating or deleting multiple +segment files. The process looks like this. + + 1. In one transaction on Meta DB: + 1. Update the `meta.min_index` to be the new min. + 2. Delete any segments from the `segments` bucket that are sealed and where + their highest index is less than the new min index. + 3. Commit Txn. This is the commit point for crash recovery. + 2. Update in memory segment state to match (if not done already with a lock + held). + 3. Delete any segment files we just removed from the meta DB. + +### Recovery + +The meta data update is crash safe thanks to BoltDB being the source of truth. + + 1. Reload meta state from Meta DB. + 2. Walk the files in the dir. + 2. For each one: + 1. Check if that file is present in Meta DB. If not mark it for deletion. + 2. (optionally) validate the file header file size and final block trailer + to ensure the file appears to be well-formed and contain the expected + data. + 4. Delete the obsolete segments marked (could be done in a background thread). + + ## Tail Truncations + + Raft occasionally needs to truncate entries from the tail of the log, i.e. + remove the _most recent_ N entries. This can occur when a follower has + replicated entries from an old leader that was partitioned with it, but later + discovers they conflict with entries committed by the new leader in a later + term. The bounds on how long a partitioned leader can continue to replicate to + a follower are generally pretty small (30 seconds or so) so it's unlikely that + the number of records to be truncated will ever be large compared to the size + of a segment file, but we have to account for needing to delete one or more + segment files from the tail, as well as truncate older entries out of the new + tail. + + This follows roughly the same pattern as head-truncation, although there is an + added complication. A naive implementation that used only the baseIndex as a + segment file name could in theory get into a tricky state where it's ambiguous + whether the tail segment is an old one that was logically truncated away but we + crashed before actually unlinking, or a new replacement with committed data in. + + It's possible to solve this with complex transactional semantics but we take + the simpler approach of just assigning every segment a unique identifier + separate from it's baseIndex. So to truncate the tail follows the same + procedure as the head above: segments we remove from Meta DB can be + un-ambiguously deleted on recovery because their IDs won't match even if later + segments end up with the same baseIndex. + + Since these truncations are generally rare and disk space is generally not a + major bottleneck, we also choose not to try to actually re-use a segment file + that was previously written and sealed by truncating it etc. Instead we just + mark it as "sealed" in the Meta DB and with a MaxIndex of the highest index + left after the truncation (which we check on reads) and start a new segment at + the next index. + +## System Assumptions + +There are no straight answers to any question about which guarantees can be +reliably relied on across operating systems, file systems, raid controllers and +hardware devices. We state [our assumptions](#our-assumptions) followed by a +summary of the assumptions made by some other respectable sources for +comparison. + +### Our Assumptions + +We've tried to make the weakest assumptions we can while still keeping things +relatively simple and performant. + +We assume: + 1. That while silent latent errors are possible, they are generally rare and + there's not a whole lot we can do other than return a `Corrupt` error on + read. In most cases the hardware or filesystem will detect and return an + error on read anyway for latent corruption. Not doing so is regarded as a + bug in the OS/filesystem/hardware. For this reason we don't go out of our + way to checksum everything to protect against "bitrot". This is roughly + equivalent to assumptions in BoltDB, LMDB and SQLite. + + While we respect the work in [Protocol Aware Recovery for Consensus-based + Storage](https://www.usenix.org/system/files/conference/fast18/fast18-alagappan.pdf) + we choose not to implement a WAL format that allows identifying the index + and term of "lost" records on read errors so they can be recovered from + peers. This is mostly for the pragmatic reason that the Raft library this is + designed to work with would need a major re-write to take advantage of that + anyway. The proposed format in that paper also seems to make stronger + assumptions about sector atomicity than we are comfortable with too. + 2. That sector writes are _not_ atomic. (Equivalent to SQLite, weaker than + almost everything else). + 3. That writing a partial sector does _not_ corrupt any already stored data in + that sector outside of the range being written ( + [PSOW](#powersafe-overwrites-psow)), (Equivalent to SQLite's defaults, + RocksDB and Etcd). + 3. That `fsync` as implemented in Go's standard library actually flushes all + written sectors of the file to persistent media. + 4. That `fsync` on a parent dir is sufficient to ensure newly created files are + not lost after a crash (assuming the file itself was written and `fsync`ed + first). + 6. That appending to files may not be atomic since the filesystem metadata + about the size of the file may not be updated atomically with the data. + Generally we pre-allocate files where possible without writing all zeros but + we do potentially extend them if the last batch doesn't fit into the + allocated space or the filesystem doesn't support pre-allocation. Either way + we don't rely on the filesystem's reported size and validate the tail is + coherent on recovery. + +### Published Paper on Consensus Disk Recovery + +In the paper on [Protocol Aware Recovery for Consensus-based +Storage](https://www.usenix.org/system/files/conference/fast18/fast18-alagappan.pdf) +the authors assume that corruptions of the log can happen due to either torn +writes (for multi-sector appends) or latent corruptions after commit. They +explain the need to detect which it was because torn writes only loose +un-acknowledged records and so are safe to detect and truncate, while corruption +of previously committed records impacts the correctness of the protocol more +generally. Their whole paper seems to indicate that these post-commit +corruptions are a major problem that needs to be correctly handled (which may +well be true). On the flip side, their WAL format design writes a separate index +and log, and explicitly assumes that because the index entries are smaller than +a 512 sector size, that those are safe from corruption during a write. + +The core assumptions here are: + 1. Latent, silent corruption of committed data needs to be detected at + application layer with a checksum per record checked on every read. + 2. Sector writes are atomic. + 3. Sector writes have [powersafe overwrites](#powersafe-overwrites-psow). + +### SQLite + +The SQLite authors have a [detailed explanation of their system +assumptions](https://www.sqlite.org/atomiccommit.html) which impact correctness +of atomic database commits. + +> SQLite assumes that the detection and/or correction of bit errors caused by cosmic rays, thermal noise, quantum fluctuations, device driver bugs, or other mechanisms, is the responsibility of the underlying hardware and operating system. SQLite does not add any redundancy to the database file for the purpose of detecting corruption or I/O errors. SQLite assumes that the data it reads is exactly the same data that it previously wrote. + +Is very different from the above paper authors whose main point of their paper +is predicated on how to recover from silent corruptions of the file caused by +hardware, firmware or filesystem errors on read. + +Note that this is a pragmatic position rather than a naive one: the authors are +certainly aware that file-systems have bugs, that faulty raid controllers exist +and even that hardware anomalies like high-flying or poorly tracking disk heads +can happen but choose _not_ to protect against that _at all_. See their +[briefing for linux kernel +developers](https://sqlite.org/lpc2019/doc/trunk/briefing.md) for more details +on the uncertainty they understand exists around these areas. + +> SQLite has traditionally assumed that a sector write is not atomic. + +These statements are on a page with this disclaimer: + +> The information in this article applies only when SQLite is operating in "rollback mode", or in other words when SQLite is not using a write-ahead log. + +[WAL mode](https://sqlite.org/wal.html) docs are less explicit on assumptions +and how crash recovery is achieved but we can infer some things from the [file +format](https://sqlite.org/fileformat2.html#walformat) and +[code](https://github.com/sqlite/sqlite/blob/master/src/wal.c) though. + +> The WAL header is 32 bytes in size... + +> Immediately following the wal-header are zero or more frames. Each frame consists of a 24-byte frame-header followed by a page-size bytes of page data. + +So each dirty page is appended with a 24 byte header making it _not_ sector +aligned even though pages must be a multiple of sector size. + +Commit frames are also appended in the same way (and fsync called if enabled as +an option). If fsync is enabled though (and POWERSAFE_OVERWRITE disabled), +SQLite will "pad" to the next sector boundary (or beyond) by repeating the last +frame until it's passed that boundary. For some reason, they take great care to +write up to the sector boundary, sync then write the rest. I assume this is just +to avoid waiting to flush the redundant padding bytes past the end of the sector +they care about. Padding prevents the next append from potentially overwriting +the committed frame's sector. + +But... + +> By default, SQLite assumes that an operating system call to write a range of bytes will not damage or alter any bytes outside of that range even if a power loss or OS crash occurs during that write. We call this the "powersafe overwrite" property. Prior to version 3.7.9 (2011-11-01), SQLite did not assume powersafe overwrite. But with the standard sector size increasing from 512 to 4096 bytes on most disk drives, it has become necessary to assume powersafe overwrite in order to maintain historical performance levels and so powersafe overwrite is assumed by default in recent versions of SQLite. + +> [assuming no power safe overwrite] In WAL mode, each transaction had to be padded out to the next 4096-byte boundary in the WAL file, rather than the next 512-byte boundary, resulting in thousands of extra bytes being written per transaction. + +> SQLite never assumes that database page writes are atomic, regardless of the PSOW setting.(1) And hence SQLite is always able to automatically recover from torn pages induced by a crash. Enabling PSOW does not decrease SQLite's ability to recover from a torn page. + +So they basically changed to make SSDs performant and now assume _by default_ +that appending to a partial sector won't damage other data. The authors are +explicit that ["powersafe overwrite"](#powersafe-overwrites-psow) is a separate +property from atomicity and they still don't rely on sector atomicity. But they +do now assume powersafe overwrites by default. + +To summarize, SQLite authors assume: + 1. Latent, silent corruptions of committed data should be caught by the file + system or hardware and so should't need to be accounted for in application + code. + 2. Sector writes are _not_ atomic, but... + 3. Partial sector overwrites can't corrupt committed data in same sector (by + default). + +### Etcd WAL + +The authors of etcd's WAL similarly to the authors of the above paper indicate +the need to distinguish between torn writes and silent corruptions. + +They maintain a rolling checksum of all records which is used on recovery only +which would imply they only care about torn writes since per-record checksums +are not checked on subsequent reads from the file after recovery. But they have +specific code to distinguish between torn writes and "other" corruption during +recovery. + +They are careful to pad every record with 0 to 7 bytes such that the length +prefix for the next record is always 8-byte aligned and so can't span more than +one segment. + +But their method of detecting a torn-write (rather than latent corruption) +relies on reading through every 512 byte aligned slice of the set of records +whose checksum has failed to match and seeing if there are any entirely zero +sectors. + +This seems problematic in a purely logical way regardless of disk behavior: if a +legitimate record contains more than 1kb of zero bytes and happens to ever be +corrupted after writing, that record will be falsely detected as a torn-write +because at least one sector will be entirely zero bytes. In practice this +doesn't matter much because corruptions caused by anything other than torn +writes are likely very rare but it does make me wonder why bother trying to tell +the difference. + +The implied assumptions in their design are: + 1. Latent, silent corruption needs to be detected on recovery, but not on every + read. + 2. Sector writes are atomic. + 3. Partial sector writes don't corrupt existing data. + 3. Torn writes (caused by multi-segment appends) always leave sectors all-zero. + +### LMDB + +Symas' Lightning Memory-mapped Database or LMDB is another well-used and +respected DB file format (along with Go-native port BoltDB used by Consul, +etcd and others). + +LMDB writes exclusively in whole 4kb pages. LMDB has a copy-on-write design +which reuses free pages and commits transactions using the a double-buffering +technique: writing the root alternately to the first and second pages of the +file. Individual pages do not have checksums and may be larger than the physical +sector size. Dirty pages are written out to new or un-used pages and then +`fsync`ed before the transaction commits so there is no reliance on atomic +sector writes for data pages (a crash might leave pages of a transaction +partially written but they are not linked into the tree root yet so are ignored +on recovery). + +The transaction commits only after the double-buffered meta page is written +out. LMDB relies on the fact that the actual content of the meta page is small +enough to fit in a single sector to avoid "torn writes" on the meta page. (See +[the authors +comments](https://ayende.com/blog/162856/reviewing-lightning-memory-mapped-database-library-transactions-commits) +on this blog). Although sector writes are assumed to be atomic, there is no +reliance on partial sector writes due to the paged design. + +The implied assumptions in this design are: + 1. Latent, silent corruptions of committed data should be caught by the file + system or hardware and so should't need to be accounted for in application + code. + 2. Sector writes _are_ atomic. + 3. No assumptions about Powersafe overwrite since all IO is in whole pages. + + +### BoltDB + +BoltDB is a Go port of LMDB so inherits almost all of the same design +assumptions. One notable different is that the author added a checksum to +metadata page even though it still fits in a single sector. The author noted +in private correspondence that this was probably just a defensive measure +rather than a fix for a specific identified flaw in LMDB's design. + +Initially this was _not_ used to revert to the alternate page on failure because +it was still assumed that meta fit in a single sector and that those writes were +atomic. But [a report of Docker causing corruption on a +crash](https://github.com/boltdb/bolt/issues/548) seemed to indicate that the +atomic sector writes assumption _was not safe_ alone and so the checksum was +used to detect non-atomic writes even on the less-than-a-sector meta page. + +BoltDB is also an important base case for our WAL since it is used as the +current log store in use for many years within Consul and other HashiCorp +products. + +The implied assumptions in this design are: +1. Latent, silent corruptions of committed data should be caught by the file + system or hardware and so should't need to be accounted for in application + code. +2. Sector writes are _not_ atomic. +3. No assumptions about Powersafe overwrite since all IO is in whole pages. + + +### RocksDB WAL + +RocksDB is another well-respected storage library based on Google's LevelDB. +RocksDB's [WAL +Format](https://github.com/facebook/rocksdb/wiki/Write-Ahead-Log-File-Format) +uses blocks to allow skipping through files and over corrupt records (which +seems dangerous to me in general but perhaps they assume only torn-write +corruptions are possible?). + +Records are packed into 32KiB blocks until they don't fit. Records that are +larger use first/middle/last flags (which inspired this library) to consume +multiple blocks. + +RocksDB WAL uses pre-allocated files but also re-uses old files on a circular +buffer pattern since they have tight control of how much WAL is needed. This +means they might be overwriting old records in place. + +Each record independently gets a header with a checksum to detect corruption or +incomplete writes, but no attempt is made to avoid sector boundaries or partial +block writes - the current block is just appended to for each write. + +Implied assumptions: + 1. No Latent Corruptions? This isn't totally clear from the code or docs, but + the docs indicate that a record with a mismatching checksum can simply be + skipped over which would seem to violate basic durability properties for a + database if they were already committed. That would imply that checksums + only (correctly) detect torn writes with latent corruption not accounted + for. + 2. Sector writes _are_ atomic. + 3. Partial sector writes don't corrupt existing data. + +### Are Sector Writes Atomic? + +Russ Cox asked this on twitter and tweeted a link to an [excellent Stack +Overflow +answer](https://stackoverflow.com/questions/2009063/are-disk-sector-writes-atomic) +about this by one of the authors of the NVME spec. + +> TLDR; if you are in tight control of your whole stack from application all the way down the the physical disks (so you can control and qualify the whole lot) you can arrange to have what you need to make use of disk atomicity. If you're not in that situation or you're talking about the general case, you should not depend on sector writes being atomic. + +Despite this, _most_ current best-of-breed database libraries (notably except +SQLite and potentially BoltDB), [many linux file +systems](https://lkml.org/lkml/2009/8/24/156), and all academic papers on disk +failure modes I've found so far _do_ assume that sector writes are atomic. + +I assume that the authors of these file systems, databases and papers are not +unaware of the complexities described in the above link or the possibility of +non-atomic sector writes, but rather have chosen to put those outside of the +reasonable recoverable behavior of their systems. The actual chances of +encountering a non-atomic sector write in a typical, modern system appear to be +small enough that these authors consider that a reasonable assumption even when +it's not a guarantee that can be 100% relied upon. (Although the Docker bug +linked above for [BoltDB](#boltdb) seems to indicate a real-world case of this +happening in a modern environment.) + +### Powersafe Overwrites (PSOW) + +A more subtle property that is a weaker assumption that full sector atomicity is +termed by the [SQLite authors as "Powersafe +Overwrites"](https://www.sqlite.org/psow.html) abbreviated PSOW. + +> By default, SQLite assumes that an operating system call to write a range of bytes will not damage or alter any bytes outside of that range even if a power loss or OS crash occurs during that write. We call this the "powersafe overwrite" property. Prior to version 3.7.9 (2011-11-01), SQLite did not assume powersafe overwrite. But with the standard sector size increasing from 512 to 4096 bytes on most disk drives, it has become necessary to assume powersafe overwrite in order to maintain historical performance levels and so powersafe overwrite is assumed by default in recent versions of SQLite. + +Those who assume atomic sector writes _also_ assume this property but the +reverse need not be true. SQLite's authors in the page above assume nothing +about the atomicity of the actual data written to any sector still even when +POWERSAFE_OVERWRITE is enabled (which is now the default). They simply assume +that no _other_ data is harmed while performing a write that overlaps other +sectors, even if power fails. + +It's our view that while there certainly can be cases where this assumption +doesn't hold, it's already weaker than the atomic sector write assumption that +most reliable storage software assumes today and so is safe to assume on for +this case. + +### Are fsyncs reliable? + +Even when you explicitly `fsync` a file after writing to it, some devices or +even whole operating systems (e.g. macOS) _don't actually flush to disk_ to +improve performance. + +In our case, we assume that Go's `os.File.Sync()` method is makes the best +effort it can on all modern OSes. It does now at least behave correctly on macOS +(since Go 1.12). But we can't do anything about a lying hardware device. + +# Future Extensions + + * **Auto-tuning segment size.** This format allows for segments to be different + sizes. We could start with a smaller segment size of say a single 1MiB block + and then measure how long it takes to fill each segment. If segments fill + quicker than some target rate we could double the allocated size of the next + segment. This could mean a burst of writes makes the segments grow and then + the writes slow down but the log would take a long time to free disk space + because the segments take so long to fill. Arguably not a terrible problem, + but we could also have it auto tune segment size down when write rate drops + too. The only major benefit here would be to allow trivial usages like tests + not need a whole 64MiB of disk space to just record a handful of log entries. + But those could also just manually configure a smaller segment size. + +# References + +In no particular order. + +**Files and Crash Recovery** +* [Files are hard](https://danluu.com/file-consistency/) +* [Files are fraught with peril](https://danluu.com/deconstruct-files/) +* [Ensuring data reaches disk](https://lwn.net/Articles/457667/) +* [Write Atomicity and NVME Device Design](https://www.bswd.com/FMS12/FMS12-Rudoff.pdf) +* [Durability: NVME Disks](https://www.evanjones.ca/durability-nvme.html) +* [Intel SSD Durability](https://www.evanjones.ca/intel-ssd-durability.html) +* [Are Disk Sector Writes Atomic?](https://stackoverflow.com/questions/2009063/are-disk-sector-writes-atomic/61832882#61832882) +* [Protocol Aware Recovery for Consensus-based Storage](https://www.usenix.org/system/files/conference/fast18/fast18-alagappan.pdf) +* [Atomic Commit in SQLite](https://www.sqlite.org/atomiccommit.html) +* ["Powersafe Overwrites" in SQLite](https://www.sqlite.org/psow.html) +* [An Analysis of Data Corruption in the Storage Stack](https://www.cs.toronto.edu/~bianca/papers/fast08.pdf) + +**DB Design and Storage File layout** +* [BoltDB Implementation](https://github.com/boltdb/bolt) +* LMDB Design: [slides](https://www.snia.org/sites/default/files/SDC15_presentations/database/HowardChu_The_Lighting_Memory_Database.pdf), [talk](https://www.youtube.com/watch?v=tEa5sAh-kVk) +* [SQLite file layout](https://www.sqlite.org/fileformat.html) + +**WAL implementations** +* [SQLite WAL Mode](https://sqlite.org/wal.html) +* [RocksDB WAL Format](https://github.com/facebook/rocksdb/wiki/Write-Ahead-Log-File-Format)] +* [etcd implementation](https://github.com/etcd-io/etcd/tree/master/wal) diff --git a/vendor/github.com/hashicorp/raft-wal/codec.go b/vendor/github.com/hashicorp/raft-wal/codec.go new file mode 100644 index 0000000000000..801d7c0186e88 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/codec.go @@ -0,0 +1,167 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package wal + +import ( + "encoding/binary" + "io" + "time" + + "github.com/hashicorp/raft" +) + +const ( + // FirstExternalCodecID is the lowest value an external code may use to + // identify their codec. Values lower than this are reserved for future + // internal use. + FirstExternalCodecID = 1 << 16 + + // Codec* constants identify internally-defined codec identifiers. + CodecBinaryV1 uint64 = iota +) + +// Codec is the interface required for encoding/decoding log entries. Callers +// can pass a custom one to manage their own serialization, or to add additional +// layers like encryption or compression of records. Each codec +type Codec interface { + // ID returns the globally unique identifier for this codec version. This is + // encoded into segment file headers and must remain consistent over the life + // of the log. Values up to FirstExternalCodecID are reserved and will error + // if specified externally. + ID() uint64 + + // Encode the log into the io.Writer. We pass a writer to allow the caller to + // manage buffer allocation and re-use. + Encode(l *raft.Log, w io.Writer) error + + // Decode a log from the passed byte slice into the log entry pointed to. This + // allows the caller to manage allocation and re-use of the bytes and log + // entry. The resulting raft.Log MUST NOT reference data in the input byte + // slice since the input byte slice may be returned to a pool and re-used. + Decode([]byte, *raft.Log) error +} + +// BinaryCodec is a Codec that encodes raft.Log with a simple binary format. We +// test that all fields are captured using reflection. +// +// For now we assume raft.Log is not likely to change too much. If it does we'll +// use a new Codec ID for the later version and have to support decoding either. +type BinaryCodec struct{} + +// ID returns the globally unique identifier for this codec version. This is +// encoded into segment file headers and must remain consistent over the life +// of the log. Values up to FirstExternalCodecID are reserved and will error +// if specified externally. +func (c *BinaryCodec) ID() uint64 { + return CodecBinaryV1 +} + +// Encode the log into the io.Writer. We pass a writer to allow the caller to +// manage buffer allocation and re-use. +func (c *BinaryCodec) Encode(l *raft.Log, w io.Writer) error { + enc := encoder{w: w} + enc.varint(l.Index) + enc.varint(l.Term) + enc.varint(uint64(l.Type)) + enc.bytes(l.Data) + enc.bytes(l.Extensions) + enc.time(l.AppendedAt) + return enc.err +} + +// Decode a log from the passed byte slice into the log entry pointed to. This +// allows the caller to manage allocation and re-use of the bytes and log +// entry. +func (c *BinaryCodec) Decode(bs []byte, l *raft.Log) error { + dec := decoder{buf: bs} + l.Index = dec.varint() + l.Term = dec.varint() + l.Type = raft.LogType(dec.varint()) + l.Data = dec.bytes() + l.Extensions = dec.bytes() + l.AppendedAt = dec.time() + return dec.err +} + +type encoder struct { + w io.Writer + err error + scratch [10]byte +} + +func (e *encoder) varint(v uint64) { + if e.err != nil { + return + } + + // Varint encoding might use up to 9 bytes for a uint64 + n := binary.PutUvarint(e.scratch[:], v) + _, e.err = e.w.Write(e.scratch[:n]) +} + +func (e *encoder) bytes(bs []byte) { + // Put a length prefix + e.varint(uint64(len(bs))) + if e.err != nil { + return + } + // Copy the bytes to the writer + _, e.err = e.w.Write(bs) +} + +func (e *encoder) time(t time.Time) { + if e.err != nil { + return + } + bs, err := t.MarshalBinary() + if err != nil { + e.err = err + return + } + _, e.err = e.w.Write(bs) +} + +type decoder struct { + buf []byte + err error +} + +func (d *decoder) varint() uint64 { + if d.err != nil { + return 0 + } + v, n := binary.Uvarint(d.buf) + d.buf = d.buf[n:] + return v +} + +func (d *decoder) bytes() []byte { + // Get length prefix + n := d.varint() + if d.err != nil { + return nil + } + if n == 0 { + return nil + } + if n > uint64(len(d.buf)) { + d.err = io.ErrShortBuffer + return nil + } + bs := make([]byte, n) + copy(bs, d.buf[:n]) + d.buf = d.buf[n:] + return bs +} + +func (d *decoder) time() time.Time { + var t time.Time + if d.err != nil { + return t + } + // Note that Unmarshal Binary updates d.buf to remove the bytes it read + // already. + d.err = t.UnmarshalBinary(d.buf) + return t +} diff --git a/vendor/github.com/hashicorp/raft-wal/fs/file.go b/vendor/github.com/hashicorp/raft-wal/fs/file.go new file mode 100644 index 0000000000000..98219237d0d4c --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/fs/file.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package fs + +import ( + "os" + "sync/atomic" + + "github.com/hashicorp/raft-wal/types" +) + +var _ types.WritableFile = &File{} + +// File wraps an os.File and implements types.WritableFile. It ensures that the +// first time Sync is called on the file, that the parent directory is also +// Fsynced to ensure a crash won't cause the FS to forget the file is there. +// +// Postponing this allows us to ensure that we do the minimum necessary fsyncs +// but still ensure all required fsyncs are done by the time we acknowledge +// committed data in the new file. +type File struct { + new uint32 // atomically accessed, keep it aligned! + dir string + os.File +} + +// Sync calls fsync on the underlying file. If this is the first call to Sync +// since creation it also fsyncs the parent dir. +func (f *File) Sync() error { + // Sync the underlying file + if err := f.File.Sync(); err != nil { + return err + } + new := atomic.SwapUint32(&f.new, 1) + if new == 0 { + return syncDir(f.dir) + } + return nil +} diff --git a/vendor/github.com/hashicorp/raft-wal/fs/fs.go b/vendor/github.com/hashicorp/raft-wal/fs/fs.go new file mode 100644 index 0000000000000..2de6ff044b886 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/fs/fs.go @@ -0,0 +1,128 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package fs + +import ( + "fmt" + "io/ioutil" + "math" + "os" + "path/filepath" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/hashicorp/raft-wal/types" +) + +// FS implements the wal.VFS interface using GO's built in OS Filesystem (and a +// few helpers). +// +// TODO if we changed the interface to be Dir centric we could cache the open +// dir handle and save some time opening it on each Create in order to fsync. +type FS struct { +} + +func New() *FS { + return &FS{} +} + +// ListDir returns a list of all files in the specified dir in lexicographical +// order. If the dir doesn't exist, it must return an error. Empty array with +// nil error is assumed to mean that the directory exists and was readable, +// but contains no files. +func (fs *FS) ListDir(dir string) ([]string, error) { + files, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + names := make([]string, len(files)) + for i, f := range files { + if f.IsDir() { + continue + } + names[i] = f.Name() + } + return names, nil +} + +// Create creates a new file with the given name. If a file with the same name +// already exists an error is returned. If a non-zero size is given, +// implementations should make a best effort to pre-allocate the file to be +// that size. The dir must already exist and be writable to the current +// process. +func (fs *FS) Create(dir string, name string, size uint64) (types.WritableFile, error) { + f, err := os.OpenFile(filepath.Join(dir, name), os.O_CREATE|os.O_EXCL|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return nil, err + } + // We just created the file. Preallocate it's size. + if size > 0 { + if size > math.MaxInt32 { + return nil, fmt.Errorf("maximum file size is %d bytes", math.MaxInt32) + } + if err := fileutil.Preallocate(f, int64(size), true); err != nil { + f.Close() + return nil, err + } + } + // We don't fsync here for performance reasons. Technically we need to fsync + // the file itself to make sure it is really persisted to disk, and you always + // need to fsync its parent dir after a creation because fsync doesn't ensure + // the directory entry is persisted - a crash could make the file appear to be + // missing as there is no directory entry. + // + // BUT, it doesn't actually matter if this file is crash safe, right up to the + // point where we actually commit log data. Since we always fsync the file + // when we commit logs, we don't need to again here. That does however leave + // the parent dir fsync which must be done after the first fsync to a newly + // created file to ensure it survives a crash. + // + // To handle that, we return a wrapped io.File that will fsync the parent dir + // as well the first time Sync is called (and only the first time), + fi := &File{ + new: 0, + dir: dir, + File: *f, + } + return fi, nil +} + +// Delete indicates the file is no longer required. Typically it should be +// deleted from the underlying system to free disk space. +func (fs *FS) Delete(dir string, name string) error { + if err := os.Remove(filepath.Join(dir, name)); err != nil { + return err + } + // Make sure parent directory metadata is fsynced too before we call this + // "done". + return syncDir(dir) +} + +// OpenReader opens an existing file in read-only mode. If the file doesn't +// exist or permission is denied, an error is returned, otherwise no checks +// are made about the well-formedness of the file, it may be empty, the wrong +// size or corrupt in arbitrary ways. +func (fs *FS) OpenReader(dir string, name string) (types.ReadableFile, error) { + return os.OpenFile(filepath.Join(dir, name), os.O_RDONLY, os.FileMode(0644)) +} + +// OpenWriter opens a file in read-write mode. If the file doesn't exist or +// permission is denied, an error is returned, otherwise no checks are made +// about the well-formedness of the file, it may be empty, the wrong size or +// corrupt in arbitrary ways. +func (fs *FS) OpenWriter(dir string, name string) (types.WritableFile, error) { + return os.OpenFile(filepath.Join(dir, name), os.O_RDWR, os.FileMode(0644)) +} + +func syncDir(dir string) error { + f, err := os.Open(dir) + if err != nil { + return err + } + err = f.Sync() + closeErr := f.Close() + if err != nil { + return err + } + return closeErr +} diff --git a/vendor/github.com/hashicorp/raft-wal/metadb/metadb.go b/vendor/github.com/hashicorp/raft-wal/metadb/metadb.go new file mode 100644 index 0000000000000..85a054ab932f1 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/metadb/metadb.go @@ -0,0 +1,269 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package metadb + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/hashicorp/raft-wal/types" + "go.etcd.io/bbolt" +) + +const ( + // FileName is the default file name for the bolt db file. + FileName = "wal-meta.db" + + // *Bucket are the names used for internal bolt buckets + MetaBucket = "wal-meta" + StableBucket = "stable" + + // We just need one key for now so use the byte 'm' for meta arbitrarily. + MetaKey = "m" +) + +var ( + // ErrUnintialized is returned when any call is made before Load has opened + // the DB file. + ErrUnintialized = errors.New("uninitialized") +) + +// BoltMetaDB implements types.MetaStore using BoltDB as a reliable persistent +// store. See repo README for reasons for this design choice and performance +// implications. +type BoltMetaDB struct { + dir string + db *bbolt.DB +} + +func (db *BoltMetaDB) ensureOpen(dir string) error { + if db.dir != "" && db.dir != dir { + return fmt.Errorf("can't load dir %s, already open in dir %s", dir, db.dir) + } + if db.db != nil { + return nil + } + + fileName := filepath.Join(dir, FileName) + + open := func() error { + bb, err := bbolt.Open(fileName, 0644, nil) + if err != nil { + return fmt.Errorf("failed to open %s: %w", FileName, err) + } + db.db = bb + db.dir = dir + return nil + } + + // BoltDB can get stuck in invalid states if we crash while it's initializing. + // We can't distinguish those as safe to just wipe it and start again because + // we don't know for sure if it's failing due to bad init or later corruption + // (which would loose data if we just wipe and start over). So to ensure + // initial creation of the WAL is as crash-safe as possible we will manually + // detect we have an atomic init procedure: + // 1. Check if file exits already. If yes, skip init and just open it. + // 2. Delete any existing DB file with tmp name + // 3. Creat a new BoltDB that is empty and has the buckets with a temp name. + // 4. Once that's committed, rename to final name and Fsync parent dir + _, err := os.Stat(fileName) + if err == nil { + // File exists, just open it + return open() + } + if !errors.Is(err, os.ErrNotExist) { + // Unknown err just return that + return fmt.Errorf("failed to stat %s: %w", FileName, err) + } + + // File doesn't exist, initialize a new DB in a crash-safe way + if err := safeInitBoltDB(dir); err != nil { + return fmt.Errorf("failed initializing meta DB: %w", err) + } + + // All good, now open it! + return open() +} + +func safeInitBoltDB(dir string) error { + tmpFileName := filepath.Join(dir, FileName+".tmp") + + // Delete any old attempts to init that were unsuccessful + if err := os.RemoveAll(tmpFileName); err != nil { + return err + } + + // Open bolt DB at tmp file name + bb, err := bbolt.Open(tmpFileName, 0644, nil) + if err != nil { + return err + } + + tx, err := bb.Begin(true) + defer tx.Rollback() + + if err != nil { + return err + } + _, err = tx.CreateBucket([]byte(MetaBucket)) + if err != nil { + return err + } + _, err = tx.CreateBucket([]byte(StableBucket)) + if err != nil { + return err + } + if err := tx.Commit(); err != nil { + return err + } + // Close the file ready to rename into place and re-open. This probably isn't + // necessary but it make it easier to reason about this code path being + // totally separate from the common case. + if err := bb.Close(); err != nil { + return err + } + + // We created the DB OK. Now rename it to the final name. + if err := os.Rename(tmpFileName, filepath.Join(dir, FileName)); err != nil { + return err + } + + // And Fsync that parent dir to make sure the new new file with it's new name + // is persisted! + dirF, err := os.Open(dir) + if err != nil { + return err + } + err = dirF.Sync() + closeErr := dirF.Close() + if err != nil { + return err + } + return closeErr +} + +// Load loads the existing persisted state. If there is no existing state +// implementations are expected to create initialize new storage and return an +// empty state. +func (db *BoltMetaDB) Load(dir string) (types.PersistentState, error) { + var state types.PersistentState + + if err := db.ensureOpen(dir); err != nil { + return state, err + } + + tx, err := db.db.Begin(false) + if err != nil { + return state, err + } + defer tx.Rollback() + meta := tx.Bucket([]byte(MetaBucket)) + + // We just need one key for now so use the byte 'm' for meta arbitrarily. + raw := meta.Get([]byte(MetaKey)) + if raw == nil { + // This is valid it's an "empty" log that will be initialized by the WAL. + return state, nil + } + + if err := json.Unmarshal(raw, &state); err != nil { + return state, fmt.Errorf("%w: failed to parse persisted state: %s", types.ErrCorrupt, err) + } + return state, nil +} + +// CommitState must atomically replace all persisted metadata in the current +// store with the set provided. It must not return until the data is persisted +// durably and in a crash-safe way otherwise the guarantees of the WAL will be +// compromised. The WAL will only ever call this in a single thread at one +// time and it will never be called concurrently with Load however it may be +// called concurrently with Get/SetStable operations. +func (db *BoltMetaDB) CommitState(state types.PersistentState) error { + if db.db == nil { + return ErrUnintialized + } + + encoded, err := json.Marshal(state) + if err != nil { + return fmt.Errorf("failed to encode persisted state: %w", err) + } + + tx, err := db.db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + meta := tx.Bucket([]byte(MetaBucket)) + + if err := meta.Put([]byte(MetaKey), encoded); err != nil { + return err + } + + return tx.Commit() +} + +// GetStable returns a value from stable store or nil if it doesn't exist. May +// be called concurrently by multiple threads. +func (db *BoltMetaDB) GetStable(key []byte) ([]byte, error) { + if db.db == nil { + return nil, ErrUnintialized + } + + tx, err := db.db.Begin(false) + if err != nil { + return nil, err + } + defer tx.Rollback() + stable := tx.Bucket([]byte(StableBucket)) + + val := stable.Get(key) + if val == nil { + return nil, nil + } + + // Need to copy the value since bolt only guarantees the slice is valid until + // end of txn. + ret := make([]byte, len(val)) + copy(ret, val) + return ret, nil +} + +// SetStable stores a value from stable store. May be called concurrently with +// GetStable. +func (db *BoltMetaDB) SetStable(key []byte, value []byte) error { + if db.db == nil { + return ErrUnintialized + } + + tx, err := db.db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + stable := tx.Bucket([]byte(StableBucket)) + + if value == nil { + err = stable.Delete(key) + } else { + err = stable.Put(key, value) + } + if err != nil { + return err + } + + return tx.Commit() +} + +// Close implements io.Closer +func (db *BoltMetaDB) Close() error { + if db.db == nil { + return nil + } + err := db.db.Close() + db.db = nil + return err +} diff --git a/vendor/github.com/hashicorp/raft-wal/metrics.go b/vendor/github.com/hashicorp/raft-wal/metrics.go new file mode 100644 index 0000000000000..a037b127caf07 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/metrics.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package wal + +import ( + "github.com/hashicorp/raft-wal/metrics" +) + +var ( + // MetricDefinitions describe the metrics emitted by this library via the + // provided metrics.Collector implementation. It's public so that these can be + // registered during init with metrics clients that support pre-defining + // metrics. + MetricDefinitions = metrics.Definitions{ + Counters: []metrics.Descriptor{ + { + Name: "log_entry_bytes_written", + Desc: "log_entry_bytes_written counts the bytes of log entry after encoding" + + " with Codec. Actual bytes written to disk might be slightly higher as it" + + " includes headers and index entries.", + }, + { + Name: "log_entries_written", + Desc: "log_entries_written counts the number of entries written.", + }, + { + Name: "log_appends", + Desc: "log_appends counts the number of calls to StoreLog(s) i.e." + + " number of batches of entries appended.", + }, + { + Name: "log_entry_bytes_read", + Desc: "log_entry_bytes_read counts the bytes of log entry read from" + + " segments before decoding. actual bytes read from disk might be higher" + + " as it includes headers and index entries and possible secondary reads" + + " for large entries that don't fit in buffers.", + }, + { + Name: "log_entries_read", + Desc: "log_entries_read counts the number of calls to get_log.", + }, + { + Name: "segment_rotations", + Desc: "segment_rotations counts how many times we move to a new segment file.", + }, + { + Name: "head_truncations", + Desc: "head_truncations counts how many log entries have been truncated" + + " from the head - i.e. the oldest entries. by graphing the rate of" + + " change over time you can see individual truncate calls as spikes.", + }, + { + Name: "tail_truncations", + Desc: "tail_truncations counts how many log entries have been truncated" + + " from the head - i.e. the newest entries. by graphing the rate of" + + " change over time you can see individual truncate calls as spikes.", + }, + { + Name: "stable_gets", + Desc: "stable_gets counts how many calls to StableStore.Get or GetUint64.", + }, + { + Name: "stable_sets", + Desc: "stable_sets counts how many calls to StableStore.Set or SetUint64.", + }, + }, + Gauges: []metrics.Descriptor{ + { + Name: "last_segment_age_seconds", + Desc: "last_segment_age_seconds is a gauge that is set each time we" + + " rotate a segment and describes the number of seconds between when" + + " that segment file was first created and when it was sealed. this" + + " gives a rough estimate how quickly writes are filling the disk.", + }, + }, + } +) diff --git a/vendor/github.com/hashicorp/raft-wal/metrics/atomic_collector.go b/vendor/github.com/hashicorp/raft-wal/metrics/atomic_collector.go new file mode 100644 index 0000000000000..a465e7d8b8e7b --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/metrics/atomic_collector.go @@ -0,0 +1,89 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package metrics + +import "sync/atomic" + +var ( + _ Collector = &AtomicCollector{} +) + +// AtomicCollector is a simple Collector that atomically stores +// counters and gauges in memory. +type AtomicCollector struct { + counters []uint64 + gauges []uint64 + + counterIndex, gaugeIndex map[string]int +} + +// NewAtomicCollector creates a collector for the given set of Definitions. +func NewAtomicCollector(defs Definitions) *AtomicCollector { + c := &AtomicCollector{ + counters: make([]uint64, len(defs.Counters)), + gauges: make([]uint64, len(defs.Gauges)), + counterIndex: make(map[string]int), + gaugeIndex: make(map[string]int), + } + for i, d := range defs.Counters { + if _, ok := c.counterIndex[d.Name]; ok { + panic("duplicate metrics named " + d.Name) + } + c.counterIndex[d.Name] = i + } + for i, d := range defs.Gauges { + if _, ok := c.counterIndex[d.Name]; ok { + panic("duplicate metrics named " + d.Name) + } + if _, ok := c.gaugeIndex[d.Name]; ok { + panic("duplicate metrics named " + d.Name) + } + c.gaugeIndex[d.Name] = i + } + return c +} + +// IncrementCounter record val occurrences of the named event. Names will +// follow prometheus conventions with lower_case_and_underscores. We don't +// need any additional labels currently. +func (c *AtomicCollector) IncrementCounter(name string, delta uint64) { + id, ok := c.counterIndex[name] + if !ok { + panic("invalid metric name: " + name) + } + atomic.AddUint64(&c.counters[id], delta) +} + +// SetGauge sets the value of the named gauge overriding any previous value. +func (c *AtomicCollector) SetGauge(name string, val uint64) { + id, ok := c.gaugeIndex[name] + if !ok { + panic("invalid metric name: " + name) + } + atomic.StoreUint64(&c.gauges[id], val) +} + +// Summary returns a summary of the metrics since startup. Each value is +// atomically loaded but the set is not atomic overall and may represent an +// inconsistent snapshot e.g. with some metrics reflecting the most recent +// operation while others don't. +func (c *AtomicCollector) Summary() Summary { + s := Summary{ + Counters: make(map[string]uint64, len(c.counters)), + Gauges: make(map[string]uint64, len(c.gauges)), + } + for name, id := range c.counterIndex { + s.Counters[name] = atomic.LoadUint64(&c.counters[id]) + } + for name, id := range c.gaugeIndex { + s.Gauges[name] = atomic.LoadUint64(&c.gauges[id]) + } + return s +} + +// Summary is a copy of the values recorded so far for each metric. +type Summary struct { + Counters map[string]uint64 + Gauges map[string]uint64 +} diff --git a/vendor/github.com/hashicorp/raft-wal/metrics/gometrics_collector.go b/vendor/github.com/hashicorp/raft-wal/metrics/gometrics_collector.go new file mode 100644 index 0000000000000..006dee8d1257d --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/metrics/gometrics_collector.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package metrics + +import gometrics "github.com/armon/go-metrics" + +// GoMetricsCollector implements a Collector that passes through observations to +// a go-metrics instance. The zero value works, writing metrics to the default +// global instance however to set a prefix or a static set of labels to add to +// each metric observed, or to use a non-global metrics instance use +// NewGoMetricsCollector. +type GoMetricsCollector struct { + gm *gometrics.Metrics + prefix []string + labels []gometrics.Label +} + +// NewGoMetricsCollector returns a GoMetricsCollector that will attach the +// specified name prefix and/or labels to each observation. If gm is nil the +// global metrics instance is used. +func NewGoMetricsCollector(prefix []string, labels []gometrics.Label, gm *gometrics.Metrics) *GoMetricsCollector { + if gm == nil { + gm = gometrics.Default() + } + return &GoMetricsCollector{ + gm: gm, + prefix: prefix, + labels: labels, + } +} + +// IncrementCounter record val occurrences of the named event. Names will +// follow prometheus conventions with lower_case_and_underscores. We don't +// need any additional labels currently. +func (c *GoMetricsCollector) IncrementCounter(name string, delta uint64) { + c.gm.IncrCounterWithLabels(c.name(name), float32(delta), c.labels) +} + +// SetGauge sets the value of the named gauge overriding any previous value. +func (c *GoMetricsCollector) SetGauge(name string, val uint64) { + c.gm.SetGaugeWithLabels(c.name(name), float32(val), c.labels) +} + +// name returns the metric name as a slice we don't want to risk modifying the +// prefix slice backing array since this might be called concurrently so we +// always allocate a new slice. +func (c *GoMetricsCollector) name(name string) []string { + var ss []string + return append(append(ss, c.prefix...), name) +} diff --git a/vendor/github.com/hashicorp/raft-wal/metrics/metrics.go b/vendor/github.com/hashicorp/raft-wal/metrics/metrics.go new file mode 100644 index 0000000000000..df7fb1e0f5e36 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/metrics/metrics.go @@ -0,0 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package metrics + +// Collector provides a simple abstraction for counter type metrics that +// the WAL and log verifier can use without depending on a specific metrics +// collector implementation. +type Collector interface { + // IncrementCounter record val occurrences of the named event. Names will + // follow prometheus conventions with lower_case_and_underscores. We don't + // need any additional labels currently. + IncrementCounter(name string, delta uint64) + + // SetGauge sets the value of the named gauge overriding any previous value. + SetGauge(name string, val uint64) +} + +// Definitions provides a simple description of a set of scalar metrics. +type Definitions struct { + Counters []Descriptor + Gauges []Descriptor +} + +// Descriptor describes a specific metric. +type Descriptor struct { + Name string + Desc string +} + +var _ Collector = &NoOpCollector{} + +// NoOpCollector is a Collector that does nothing. +type NoOpCollector struct{} + +// IncrementCounter record val occurrences of the named event. Names will +// follow prometheus conventions with lower_case_and_underscores. We don't +// need any additional labels currently. +func (c *NoOpCollector) IncrementCounter(name string, delta uint64) {} + +// SetGauge sets the value of the named gauge overriding any previous value. +func (c *NoOpCollector) SetGauge(name string, val uint64) {} diff --git a/vendor/github.com/hashicorp/raft-wal/options.go b/vendor/github.com/hashicorp/raft-wal/options.go new file mode 100644 index 0000000000000..743e1b8f27595 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/options.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package wal + +import ( + "fmt" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft-wal/fs" + "github.com/hashicorp/raft-wal/metadb" + "github.com/hashicorp/raft-wal/metrics" + "github.com/hashicorp/raft-wal/segment" + "github.com/hashicorp/raft-wal/types" +) + +// WithCodec is an option that allows a custom Codec to be provided to the WAL. +// If not used the default Codec is used. +func WithCodec(c Codec) walOpt { + return func(w *WAL) { + w.codec = c + } +} + +// WithMetaStore is an option that allows a custom MetaStore to be provided to +// the WAL. If not used the default MetaStore is used. +func WithMetaStore(db types.MetaStore) walOpt { + return func(w *WAL) { + w.metaDB = db + } +} + +// WithSegmentFiler is an option that allows a custom SegmentFiler (and hence +// Segment Reader/Writer implementation) to be provided to the WAL. If not used +// the default SegmentFiler is used. +func WithSegmentFiler(sf types.SegmentFiler) walOpt { + return func(w *WAL) { + w.sf = sf + } +} + +// WithLogger is an option that allows a custom logger to be used. +func WithLogger(logger hclog.Logger) walOpt { + return func(w *WAL) { + w.log = logger + } +} + +// WithSegmentSize is an option that allows a custom segmentSize to be set. +func WithSegmentSize(size int) walOpt { + return func(w *WAL) { + w.segmentSize = size + } +} + +// WithMetricsCollector is an option that allows a custom segmentSize to be set. +func WithMetricsCollector(c metrics.Collector) walOpt { + return func(w *WAL) { + w.metrics = c + } +} + +func (w *WAL) applyDefaultsAndValidate() error { + // Check if an external codec has been used that it's not using a reserved ID. + if w.codec != nil && w.codec.ID() < FirstExternalCodecID { + return fmt.Errorf("codec is using a reserved ID (below %d)", FirstExternalCodecID) + } + + // Defaults + if w.log == nil { + w.log = hclog.Default().Named("wal") + } + if w.codec == nil { + w.codec = &BinaryCodec{} + } + if w.sf == nil { + // These are not actually swappable via options right now but we override + // them in tests. Only load the default implementations if they are not set. + vfs := fs.New() + w.sf = segment.NewFiler(w.dir, vfs) + } + if w.metrics == nil { + w.metrics = &metrics.NoOpCollector{} + } + if w.metaDB == nil { + w.metaDB = &metadb.BoltMetaDB{} + } + if w.segmentSize == 0 { + w.segmentSize = DefaultSegmentSize + } + return nil +} diff --git a/vendor/github.com/hashicorp/raft-wal/segment/crc.go b/vendor/github.com/hashicorp/raft-wal/segment/crc.go new file mode 100644 index 0000000000000..94af2d93842a4 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/segment/crc.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package segment + +import ( + "hash/crc32" +) + +var castagnoliTable *crc32.Table + +func init() { + castagnoliTable = crc32.MakeTable(crc32.Castagnoli) +} diff --git a/vendor/github.com/hashicorp/raft-wal/segment/filer.go b/vendor/github.com/hashicorp/raft-wal/segment/filer.go new file mode 100644 index 0000000000000..ae7b6b3068cf4 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/segment/filer.go @@ -0,0 +1,295 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package segment + +import ( + "errors" + "fmt" + "io" + "strings" + "sync" + + "github.com/hashicorp/raft-wal/types" +) + +const ( + segmentFileSuffix = ".wal" + segmentFileNamePattern = "%020d-%016x" + segmentFileSuffix +) + +// Filer implements the abstraction for managing a set of segment files in a +// directory. It uses a VFS to abstract actual file system operations for easier +// testing. +type Filer struct { + dir string + vfs types.VFS + bufPool sync.Pool +} + +// NewFiler creates a Filer ready for use. +func NewFiler(dir string, vfs types.VFS) *Filer { + f := &Filer{ + dir: dir, + vfs: vfs, + } + f.bufPool.New = func() interface{} { + return make([]byte, minBufSize) + } + return f +} + +// FileName returns the formatted file name expected for this segment. +// SegmentFiler implementations could choose to ignore this but it's here to +func FileName(i types.SegmentInfo) string { + return fmt.Sprintf(segmentFileNamePattern, i.BaseIndex, i.ID) +} + +// Create adds a new segment with the given info and returns a writer or an +// error. +func (f *Filer) Create(info types.SegmentInfo) (types.SegmentWriter, error) { + if info.BaseIndex == 0 { + return nil, fmt.Errorf("BaseIndex must be greater than zero") + } + fname := FileName(info) + + wf, err := f.vfs.Create(f.dir, fname, uint64(info.SizeLimit)) + if err != nil { + return nil, err + } + + return createFile(info, wf, &f.bufPool) +} + +// RecoverTail is called on an unsealed segment when re-opening the WAL it will +// attempt to recover from a possible crash. It will either return an error, or +// return a valid segmentWriter that is ready for further appends. If the +// expected tail segment doesn't exist it must return an error wrapping +// os.ErrNotExist. +func (f *Filer) RecoverTail(info types.SegmentInfo) (types.SegmentWriter, error) { + fname := FileName(info) + + wf, err := f.vfs.OpenWriter(f.dir, fname) + if err != nil { + return nil, err + } + + return recoverFile(info, wf, &f.bufPool) +} + +// Open an already sealed segment for reading. Open may validate the file's +// header and return an error if it doesn't match the expected info. +func (f *Filer) Open(info types.SegmentInfo) (types.SegmentReader, error) { + fname := FileName(info) + + rf, err := f.vfs.OpenReader(f.dir, fname) + if err != nil { + return nil, err + } + + // Validate header here since openReader is re-used by writer where it's valid + // for the file header not to be committed yet after a crash so we can't check + // it there. + var hdr [fileHeaderLen]byte + + if _, err := rf.ReadAt(hdr[:], 0); err != nil { + if errors.Is(err, io.EOF) { + // Treat failure to read a header as corruption since a sealed file should + // never not have a valid header. (I.e. even if crashes happen it should + // be impossible to seal a segment with no header written so this + // indicates that something truncated the file after the fact) + return nil, fmt.Errorf("%w: failed to read header: %s", types.ErrCorrupt, err) + } + return nil, err + } + + gotInfo, err := readFileHeader(hdr[:]) + if err != nil { + return nil, err + } + + if err := validateFileHeader(*gotInfo, info); err != nil { + return nil, err + } + + return openReader(info, rf, &f.bufPool) +} + +// List returns the set of segment IDs currently stored. It's used by the WAL +// on recovery to find any segment files that need to be deleted following a +// unclean shutdown. The returned map is a map of ID -> BaseIndex. BaseIndex +// is returned to allow subsequent Delete calls to be made. +func (f *Filer) List() (map[uint64]uint64, error) { + segs, _, err := f.listInternal() + return segs, err +} + +func (f *Filer) listInternal() (map[uint64]uint64, []uint64, error) { + files, err := f.vfs.ListDir(f.dir) + if err != nil { + return nil, nil, err + } + + segs := make(map[uint64]uint64) + sorted := make([]uint64, 0) + for _, file := range files { + if !strings.HasSuffix(file, segmentFileSuffix) { + continue + } + // Parse BaseIndex and ID from the file name + var bIdx, id uint64 + n, err := fmt.Sscanf(file, segmentFileNamePattern, &bIdx, &id) + if err != nil { + return nil, nil, types.ErrCorrupt + } + if n != 2 { + // Misnamed segment files with the right suffix indicates a bug or + // tampering, we can't be sure what's happened to the data. + return nil, nil, types.ErrCorrupt + } + segs[id] = bIdx + sorted = append(sorted, id) + } + + return segs, sorted, nil +} + +// Delete removes the segment with given baseIndex and id if it exists. Note +// that baseIndex is technically redundant since ID is unique on it's own. But +// in practice we name files (or keys) with both so that they sort correctly. +// This interface allows a simpler implementation where we can just delete +// the file if it exists without having to scan the underlying storage for a. +func (f *Filer) Delete(baseIndex uint64, ID uint64) error { + fname := fmt.Sprintf(segmentFileNamePattern, baseIndex, ID) + return f.vfs.Delete(f.dir, fname) +} + +// DumpSegment attempts to read the segment file specified by the baseIndex and +// ID. It's intended purpose is for debugging the contents of segment files and +// unlike the SegmentFiler interface, it doesn't assume the caller has access to +// the correct metadata. This allows dumping log segments in a WAL that is still +// being written to by another process. Without metadata we don't know if the +// file is sealed so always recover by reading through the whole file. If after +// or before are non-zero, the specify a exclusive lower or upper bound on which +// log entries should be emitted. No error checking is done on the read data. fn +// is called for each entry passing the raft info read from the file header (so +// that the caller knows which codec to use for example) the raft index of the +// entry and the raw bytes of the entry itself. The callback must return true to +// continue reading. The data slice is only valid for the lifetime of the call. +func (f *Filer) DumpSegment(baseIndex uint64, ID uint64, after, before uint64, fn func(info types.SegmentInfo, e types.LogEntry) (bool, error)) error { + fname := fmt.Sprintf(segmentFileNamePattern, baseIndex, ID) + + rf, err := f.vfs.OpenReader(f.dir, fname) + if err != nil { + return err + } + + buf := make([]byte, 64*1024) + idx := baseIndex + + type frameInfo struct { + Index uint64 + Offset int64 + Len uint32 + } + var batch []frameInfo + + _, err = readThroughSegment(rf, func(info types.SegmentInfo, fh frameHeader, offset int64) (bool, error) { + if fh.typ == FrameCommit { + // All the previous entries have been committed. Read them and send up to + // caller. + for _, frame := range batch { + // Check the header is reasonable + if frame.Len > MaxEntrySize { + return false, fmt.Errorf("failed to read entry idx=%d, frame header length (%d) is too big: %w", + frame.Index, frame.Len, err) + } + + if frame.Len > uint32(len(buf)) { + buf = make([]byte, frame.Len) + } + + n, err := rf.ReadAt(buf[:frame.Len], frame.Offset+frameHeaderLen) + if err != nil { + return false, err + } + if uint32(n) < frame.Len { + return false, io.ErrUnexpectedEOF + } + + ok, err := fn(info, types.LogEntry{Index: frame.Index, Data: buf[:n]}) + if !ok || err != nil { + return ok, err + } + } + // Reset batch + batch = batch[:0] + return true, nil + } + + if fh.typ != FrameEntry { + return true, nil + } + + if idx <= after { + // Not in the range we care about, skip reading the entry. + idx++ + return true, nil + } + if before > 0 && idx >= before { + // We're done + return false, nil + } + + batch = append(batch, frameInfo{idx, offset, fh.len}) + idx++ + return true, nil + }) + + return err +} + +// DumpLogs attempts to read all log entries from segment files in the directory +// for debugging purposes. It does _not_ use the metadata and so may output log +// entries that are uncommitted or already truncated as far as the writing +// process is concerned. As such it should not be used for replication of data. +// It is useful though to debug the contents of the log even while the writing +// application is still running. After and before if non-zero specify exclusive +// bounds on the logs that should be returned which may allow the implementation +// to skip reading entire segment files that are not in the range. +func (f *Filer) DumpLogs(after, before uint64, fn func(info types.SegmentInfo, e types.LogEntry) (bool, error)) error { + baseIndexes, segIDsSorted, err := f.listInternal() + if err != nil { + return err + } + + for i, id := range segIDsSorted { + baseIndex := baseIndexes[id] + nextBaseIndex := uint64(0) + if i+1 < len(segIDsSorted) { + // This is not the last segment, peek at the base index of that one and + // assume that this segment won't contain indexes that high. + nextBaseIndex = baseIndexes[segIDsSorted[i+1]] + } + // See if this file contains any indexes in the range + if after > 0 && nextBaseIndex > 0 && after >= nextBaseIndex { + // This segment is all indexes before the lower bound we care about + continue + } + if before > 0 && before <= baseIndex { + // This segment is all indexes higher than the upper bound. We've output + // every log in the range at this point (barring edge cases where we race + // with a truncation which leaves multiple generations of segment files on + // disk which we are going to ignore for now). + return nil + } + + // We probably care about at least some of the entries in this segment + err := f.DumpSegment(baseIndex, id, after, before, fn) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/raft-wal/segment/format.go b/vendor/github.com/hashicorp/raft-wal/segment/format.go new file mode 100644 index 0000000000000..080c2a53c3549 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/segment/format.go @@ -0,0 +1,252 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package segment + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/hashicorp/raft-wal/types" +) + +const ( + // MaxEntrySize is the largest we allow any single raft log entry to be. This + // is larger than our raft implementation ever allows so seems safe to encode + // statically for now. We could make this configurable. It's main purpose it + // to limit allocation when reading entries back if their lengths are + // corrupted. + MaxEntrySize = 64 * 1024 * 1024 // 64 MiB + + // minBufSize is the size we allocate read and write buffers. Setting it + // larger wastes more memory but increases the chances that we'll read the + // whole frame in a single shot and not need a second allocation and trip to + // the disk. + minBufSize = 64 * 1024 + + fileHeaderLen = 32 + version = 0 + magic = 0x58eb6b0d + + // Note that this must remain a power of 2 to ensure aligning to this also + // aligns to sector boundaries. + frameHeaderLen = 8 +) + +const ( // Start iota from 0 + FrameInvalid uint8 = iota + FrameEntry + FrameIndex + FrameCommit +) + +var ( + // ErrTooBig indicates that the caller tried to write a logEntry with a + // payload that's larger than we are prepared to support. + ErrTooBig = errors.New("entries larger than 64MiB are not supported") +) + +/* + + File Header functions + + 0 1 2 3 4 5 6 7 8 + +------+------+------+------+------+------+------+------+ + | Magic | Reserved | Vsn | + +------+------+------+------+------+------+------+------+ + | BaseIndex | + +------+------+------+------+------+------+------+------+ + | SegmentID | + +------+------+------+------+------+------+------+------+ + | Codec | + +------+------+------+------+------+------+------+------+ + +*/ + +// writeFileHeader writes a file header into buf for the given file metadata. +func writeFileHeader(buf []byte, info types.SegmentInfo) error { + if len(buf) < fileHeaderLen { + return io.ErrShortBuffer + } + + binary.LittleEndian.PutUint32(buf[0:4], magic) + // Explicitly zero Reserved bytes just in case + buf[4] = 0 + buf[5] = 0 + buf[6] = 0 + buf[7] = version + binary.LittleEndian.PutUint64(buf[8:16], info.BaseIndex) + binary.LittleEndian.PutUint64(buf[16:24], info.ID) + binary.LittleEndian.PutUint64(buf[24:32], info.Codec) + return nil +} + +// readFileHeader reads a file header from buf. +func readFileHeader(buf []byte) (*types.SegmentInfo, error) { + if len(buf) < fileHeaderLen { + return nil, io.ErrShortBuffer + } + + var i types.SegmentInfo + m := binary.LittleEndian.Uint64(buf[0:8]) + if m != magic { + return nil, types.ErrCorrupt + } + if buf[7] != version { + return nil, types.ErrCorrupt + } + i.BaseIndex = binary.LittleEndian.Uint64(buf[8:16]) + i.ID = binary.LittleEndian.Uint64(buf[16:24]) + i.Codec = binary.LittleEndian.Uint64(buf[24:32]) + return &i, nil +} + +func validateFileHeader(got, expect types.SegmentInfo) error { + if expect.ID != got.ID { + return fmt.Errorf("%w: segment header ID %x doesn't match metadata %x", + types.ErrCorrupt, got.ID, expect.ID) + } + if expect.BaseIndex != got.BaseIndex { + return fmt.Errorf("%w: segment header BaseIndex %d doesn't match metadata %d", + types.ErrCorrupt, got.BaseIndex, expect.BaseIndex) + } + if expect.Codec != got.Codec { + return fmt.Errorf("%w: segment header Codec %d doesn't match metadata %d", + types.ErrCorrupt, got.Codec, expect.Codec) + } + + return nil +} + +/* + Frame Functions + + 0 1 2 3 4 5 6 7 8 + +------+------+------+------+------+------+------+------+ + | Type | Reserved | Length/CRC | + +------+------+------+------+------+------+------+------+ +*/ + +type frameHeader struct { + typ uint8 + len uint32 + crc uint32 +} + +func writeFrame(buf []byte, h frameHeader, payload []byte) error { + if len(buf) < encodedFrameSize(int(h.len)) { + return io.ErrShortBuffer + } + if err := writeFrameHeader(buf, h); err != nil { + return err + } + copy(buf[frameHeaderLen:], payload[:h.len]) + // Explicitly write null bytes for padding + padBytes := padLen(int(h.len)) + for i := 0; i < padBytes; i++ { + buf[frameHeaderLen+int(h.len)+i] = 0x0 + } + return nil +} + +func writeFrameHeader(buf []byte, h frameHeader) error { + if len(buf) < frameHeaderLen { + return io.ErrShortBuffer + } + buf[0] = h.typ + buf[1] = 0 + buf[2] = 0 + buf[3] = 0 + lOrCRC := h.len + if h.typ == FrameCommit { + lOrCRC = h.crc + } + binary.LittleEndian.PutUint32(buf[4:8], lOrCRC) + return nil +} + +var zeroHeader [frameHeaderLen]byte + +func readFrameHeader(buf []byte) (frameHeader, error) { + var h frameHeader + if len(buf) < frameHeaderLen { + return h, io.ErrShortBuffer + } + + switch buf[0] { + default: + return h, fmt.Errorf("%w: corrupt frame header with unknown type %d", types.ErrCorrupt, buf[0]) + + case FrameInvalid: + // Check if the whole header is zero and return a zero frame as this could + // just indicate we've read right off the end of the written data during + // recovery. + if bytes.Equal(buf[:frameHeaderLen], zeroHeader[:]) { + return h, nil + } + return h, fmt.Errorf("%w: corrupt frame header with type 0 but non-zero other fields", types.ErrCorrupt) + + case FrameEntry, FrameIndex: + h.typ = buf[0] + h.len = binary.LittleEndian.Uint32(buf[4:8]) + + case FrameCommit: + h.typ = buf[0] + h.crc = binary.LittleEndian.Uint32(buf[4:8]) + } + return h, nil +} + +// padLen returns how many bytes of padding should be added to a frame of length +// n to ensure it is a multiple of headerLen. We ensure frameHeaderLen is a +// power of two so that it's always a multiple of a typical sector size (e.g. +// 512 bytes) to reduce the risk that headers are torn by being written across +// sector boundaries. It will return an int in the range [0, 7]. +func padLen(n int) int { + // This looks a bit awful but it's just doing (n % 8) and subtracting that + // from 8 to get the number of bytes extra needed to get up to the next 8-byte + // boundary. The extra & 7 is to handle the case where n is a multiple of 8 + // already and so n%8 is 0 and 8-0 is 8. By &ing 8 (0b1000) with 7 (0b111) we + // effectively wrap it back around to 0. This only works as long as + // frameHeaderLen is a power of 2 but that's necessary per comment above. + return (frameHeaderLen - (n % frameHeaderLen)) & (frameHeaderLen - 1) +} + +func encodedFrameSize(payloadLen int) int { + return frameHeaderLen + payloadLen + padLen(payloadLen) +} + +func indexFrameSize(numEntries int) int { + // Index frames are completely unnecessary if the whole block is a + // continuation with no new entries. + if numEntries == 0 { + return 0 + } + return encodedFrameSize(numEntries * 4) +} + +func writeIndexFrame(buf []byte, offsets []uint32) error { + if len(buf) < indexFrameSize(len(offsets)) { + return io.ErrShortBuffer + } + fh := frameHeader{ + typ: FrameIndex, + len: uint32(len(offsets) * 4), + } + if err := writeFrameHeader(buf, fh); err != nil { + return err + } + cursor := frameHeaderLen + for _, o := range offsets { + binary.LittleEndian.PutUint32(buf[cursor:], o) + cursor += 4 + } + if (len(offsets) % 2) == 1 { + // Odd number of entries, zero pad to keep it 8-byte aligned + binary.LittleEndian.PutUint32(buf[cursor:], 0) + } + return nil +} diff --git a/vendor/github.com/hashicorp/raft-wal/segment/reader.go b/vendor/github.com/hashicorp/raft-wal/segment/reader.go new file mode 100644 index 0000000000000..2be44162a5460 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/segment/reader.go @@ -0,0 +1,160 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package segment + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "sync" + + "github.com/hashicorp/raft-wal/types" +) + +// Reader allows reading logs from a segment file. +type Reader struct { + info types.SegmentInfo + rf types.ReadableFile + + bufPool *sync.Pool + + // tail optionally providers an interface to the writer state when this is an + // unsealed segment so we can fetch from it's in-memory index. + tail tailWriter +} + +type tailWriter interface { + OffsetForFrame(idx uint64) (uint32, error) +} + +func openReader(info types.SegmentInfo, rf types.ReadableFile, bufPool *sync.Pool) (*Reader, error) { + r := &Reader{ + info: info, + rf: rf, + bufPool: bufPool, + } + + return r, nil +} + +// Close implements io.Closer +func (r *Reader) Close() error { + return r.rf.Close() +} + +// GetLog returns the raw log entry bytes associated with idx. If the log +// doesn't exist in this segment types.ErrNotFound must be returned. +func (r *Reader) GetLog(idx uint64) (*types.PooledBuffer, error) { + offset, err := r.findFrameOffset(idx) + if err != nil { + return nil, err + } + + _, payload, err := r.readFrame(offset) + if err != nil { + return nil, err + } + return payload, err +} + +func (r *Reader) readFrame(offset uint32) (frameHeader, *types.PooledBuffer, error) { + buf := r.makeBuffer() + + n, err := r.rf.ReadAt(buf.Bs, int64(offset)) + if errors.Is(err, io.EOF) && n >= frameHeaderLen { + // We might have hit EOF just because our read buffer (at least 64KiB) might + // be larger than the space left in the file (say if files are tiny or if we + // are reading a frame near the end.). So don't treat EOF as an error as + // long as we have actually managed to read a frameHeader - we'll work out + // if we got the whole thing or not below. + err = nil + + // Re-slice buf.Bs so it's len() reflect only what we actually managed to + // read. Note this doesn't impact the buffer length when it's returned to + // the pool which will still return the whole cap. + buf.Bs = buf.Bs[:n] + } + if err != nil { + return frameHeader{}, nil, err + } + fh, err := readFrameHeader(buf.Bs) + if err != nil { + return fh, nil, err + } + + if (frameHeaderLen + int(fh.len)) <= len(buf.Bs) { + // We already have all we need read, just return it sliced to just include + // the payload. + buf.Bs = buf.Bs[frameHeaderLen : frameHeaderLen+fh.len] + return fh, buf, nil + } + // Need to read again, with a bigger buffer, return this one + buf.Close() + + // Need to read more bytes, validate that len is a sensible number + if fh.len > MaxEntrySize { + return fh, nil, fmt.Errorf("%w: frame header indicates a record larger than MaxEntrySize (%d bytes)", types.ErrCorrupt, MaxEntrySize) + } + + buf = &types.PooledBuffer{ + Bs: make([]byte, fh.len), + // No closer, let outsized buffers be GCed in case they are massive and way + // bigger than we need again. Could reconsider this if we find we need to + // optimize for frequent > minBufSize reads. + } + if _, err := r.rf.ReadAt(buf.Bs, int64(offset+frameHeaderLen)); err != nil { + return fh, nil, err + } + return fh, buf, nil +} + +func (r *Reader) makeBuffer() *types.PooledBuffer { + if r.bufPool == nil { + return &types.PooledBuffer{Bs: make([]byte, minBufSize)} + } + buf := r.bufPool.Get().([]byte) + return &types.PooledBuffer{ + Bs: buf, + CloseFn: func() { + // Note we always return the whole allocated buf regardless of what Bs + // ended up being sliced to. + r.bufPool.Put(buf) + }, + } + +} + +func (r *Reader) findFrameOffset(idx uint64) (uint32, error) { + if r.tail != nil { + // This is not a sealed segment. + return r.tail.OffsetForFrame(idx) + } + + // Sealed segment, read from the on-disk index block. + if r.info.IndexStart == 0 { + return 0, fmt.Errorf("sealed segment has no index block") + } + + if idx < r.info.MinIndex || (r.info.MaxIndex > 0 && idx > r.info.MaxIndex) { + return 0, types.ErrNotFound + } + + // IndexStart is the offset to the first entry in the index array. We need to + // find the byte offset to the Nth entry + entryOffset := (idx - r.info.BaseIndex) + byteOffset := r.info.IndexStart + (entryOffset * 4) + + var bs [4]byte + n, err := r.rf.ReadAt(bs[:], int64(byteOffset)) + if err == io.EOF && n == 4 { + // Read all of it just happened to be at end of file, ignore + err = nil + } + if err != nil { + return 0, fmt.Errorf("failed to read segment index: %w", err) + } + offset := binary.LittleEndian.Uint32(bs[:]) + return offset, nil +} diff --git a/vendor/github.com/hashicorp/raft-wal/segment/writer.go b/vendor/github.com/hashicorp/raft-wal/segment/writer.go new file mode 100644 index 0000000000000..3e67cc6054b07 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/segment/writer.go @@ -0,0 +1,599 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package segment + +import ( + "fmt" + "hash/crc32" + "io" + "sync" + "sync/atomic" + + "github.com/hashicorp/raft-wal/types" +) + +// Writer allows appending logs to a segment file as well as reading them back. +type Writer struct { + // commitIdx is updated after an append batch is fully persisted to disk to + // allow readers to read the new value. Note that readers must not read values + // larger than this even if they are available in tailIndex as they are not + // yet committed to disk! + commitIdx uint64 + + // offsets is the index offset. The first element corresponds to the + // BaseIndex. It is accessed concurrently by readers and the single writer + // without locks! This is race-free via the following invariants: + // - the slice here is never mutated only copied though it may still refer to + // the same backing array. + // - readers only ever read up to len(offsets) in the atomically accessed + // slice. Those elements of the backing array are immutable and will never + // be modified once they are accessible to readers. + // - readers and writers synchronize on atomic access to the slice + // - serial writer will only append to the end which either mutates the + // shared backing array but at an index greater than the len any reader has + // seen, or a new backing array is allocated and the old one copied into it + // which also will never mutate the entries readers can already "see" via + // the old slice. + offsets atomic.Value // []uint32 + + // writer state is accessed only on the (serial) write path so doesn't need + // synchronization. + writer struct { + // commitBuf stores the pending frames waiting to be flushed to the current + // tail block. + commitBuf []byte + + // crc is the rolling crc32 Castagnoli sum of all data written since the + // last fsync. + crc uint32 + + // writeOffset is the absolute file offset up to which we've written data to + // the file. The contents of commitBuf will be written at this offset when + // it commits or we reach the end of the block, whichever happens first. + writeOffset uint32 + + // indexStart is set when the tail is sealed indicating the file offset at + // which the index array was written. + indexStart uint64 + } + + info types.SegmentInfo + wf types.WritableFile + r types.SegmentReader +} + +func createFile(info types.SegmentInfo, wf types.WritableFile, bufPool *sync.Pool) (*Writer, error) { + r, err := openReader(info, wf, bufPool) + if err != nil { + return nil, err + } + w := &Writer{ + info: info, + wf: wf, + r: r, + } + r.tail = w + if err := w.initEmpty(); err != nil { + return nil, err + } + return w, nil +} + +func recoverFile(info types.SegmentInfo, wf types.WritableFile, bufPool *sync.Pool) (*Writer, error) { + r, err := openReader(info, wf, bufPool) + if err != nil { + return nil, err + } + w := &Writer{ + info: info, + wf: wf, + r: r, + } + r.tail = w + + if err := w.recoverTail(); err != nil { + return nil, err + } + + return w, nil +} + +func (w *Writer) initEmpty() error { + // Write header into write buffer to be written out with the first commit. + w.writer.writeOffset = 0 + w.ensureBufCap(fileHeaderLen) + w.writer.commitBuf = w.writer.commitBuf[:fileHeaderLen] + + if err := writeFileHeader(w.writer.commitBuf, w.info); err != nil { + return err + } + + w.writer.crc = crc32.Checksum(w.writer.commitBuf[:fileHeaderLen], castagnoliTable) + + // Initialize the index + offsets := make([]uint32, 0, 32*1024) + w.offsets.Store(offsets) + return nil +} + +func (w *Writer) recoverTail() error { + // We need to track the last two commit frames + type commitInfo struct { + fh frameHeader + offset int64 + crcStart int64 + offsetsLen int + } + var prevCommit, finalCommit *commitInfo + + offsets := make([]uint32, 0, 32*1024) + + readInfo, err := readThroughSegment(w.wf, func(_ types.SegmentInfo, fh frameHeader, offset int64) (bool, error) { + switch fh.typ { + case FrameEntry: + // Record the frame offset + offsets = append(offsets, uint32(offset)) + + case FrameIndex: + // So this segment was sealed! (or attempted) keep track of this + // indexStart in case it turns out the Seal actually committed completely. + // We store the start of the actual array not the frame header. + w.writer.indexStart = uint64(offset) + frameHeaderLen + + case FrameCommit: + // The payload is not the length field in this case! + prevCommit = finalCommit + finalCommit = &commitInfo{ + fh: fh, + offset: offset, + crcStart: 0, // First commit includes the file header + offsetsLen: len(offsets), // Track how many entries were found up to this commit point. + } + if prevCommit != nil { + finalCommit.crcStart = prevCommit.offset + frameHeaderLen + } + } + return true, nil + }) + if err != nil { + return err + } + + if finalCommit == nil { + // There were no commit frames found at all. This segment file is + // effectively empty. Init it that way ready for appending. This overwrites + // the file header so it doesn't matter if it was valid or not. + return w.initEmpty() + } + + // Assume that the final commit is good for now and set the writer state + w.writer.writeOffset = uint32(finalCommit.offset + frameHeaderLen) + + // Just store what we have for now to ensure the defer doesn't panic we'll + // probably update this below. + w.offsets.Store(offsets) + + // Whichever path we take, fix up the commitIdx before we leave + defer func() { + ofs := w.getOffsets() + if len(ofs) > 0 { + // Non atomic is OK because this file is not visible to any other threads + // yet. + w.commitIdx = w.info.BaseIndex + uint64(len(ofs)) - 1 + } + }() + + if finalCommit.offsetsLen < len(offsets) { + // Some entries were found after the last commit. Those must be a partial + // write that was uncommitted so can be ignored. But the fact they were + // written at all means that the last commit frame must have been completed + // and acknowledged so we don't need to verify anything. Just truncate the + // extra entries from index and reset the write cursor to continue appending + // after the last commit. + offsets = offsets[:finalCommit.offsetsLen] + w.offsets.Store(offsets) + + // Since at least one commit was found, the header better be valid! + return validateFileHeader(*readInfo, w.info) + } + + // Last frame was a commit frame! Let's check that all the data written in + // that commit frame made it to disk. + // Verify the length first + bufLen := finalCommit.offset - finalCommit.crcStart + // We know bufLen can't be bigger than the whole segment file because none of + // the values above were read from the data just from the offsets we moved + // through. + batchBuf := make([]byte, bufLen) + + if _, err := w.wf.ReadAt(batchBuf, finalCommit.crcStart); err != nil { + return fmt.Errorf("failed to read last committed batch for CRC validation: %w", err) + } + + gotCrc := crc32.Checksum(batchBuf, castagnoliTable) + if gotCrc == finalCommit.fh.crc { + // All is good. We already setup the state we need for writer other than + // offsets. + w.offsets.Store(offsets) + + // Since at least one commit was found, the header better be valid! + return validateFileHeader(*readInfo, w.info) + } + + // Last commit was incomplete rewind back to the previous one or start of file + if prevCommit == nil { + // Init wil re-write the file header so it doesn't matter if it was corrupt + // or not! + return w.initEmpty() + } + + w.writer.writeOffset = uint32(prevCommit.offset + frameHeaderLen) + offsets = offsets[:prevCommit.offsetsLen] + w.offsets.Store(offsets) + + // Since at least one commit was found, the header better be valid! + return validateFileHeader(*readInfo, w.info) +} + +// Close implements io.Closer +func (w *Writer) Close() error { + return w.r.Close() +} + +// GetLog implements types.SegmentReader +func (w *Writer) GetLog(idx uint64) (*types.PooledBuffer, error) { + return w.r.GetLog(idx) +} + +// Append adds one or more entries. It must not return until the entries are +// durably stored otherwise raft's guarantees will be compromised. +func (w *Writer) Append(entries []types.LogEntry) error { + if len(entries) < 1 { + return nil + } + + if w.writer.indexStart > 0 { + return types.ErrSealed + } + + flushed := false + + // Save any state we may need to rollback. + beforeBuf := w.writer.commitBuf + beforeCRC := w.writer.crc + beforeIndexStart := w.writer.indexStart + beforeWriteOffset := w.writer.writeOffset + beforeOffsets := w.offsets.Load() + + defer func() { + if !flushed { + // rollback writer state on error + w.writer.commitBuf = beforeBuf + w.writer.crc = beforeCRC + w.writer.indexStart = beforeIndexStart + w.writer.writeOffset = beforeWriteOffset + w.offsets.Store(beforeOffsets) + } + }() + + // Iterate entries and append each one + for _, e := range entries { + if err := w.appendEntry(e); err != nil { + return err + } + } + + ofs := w.getOffsets() + // Work out if we need to seal before we commit and sync. + if (w.writer.writeOffset + uint32(len(w.writer.commitBuf)+indexFrameSize(len(ofs)))) > w.info.SizeLimit { + // Seal the segment! We seal it by writing an index frame before we commit. + if err := w.appendIndex(); err != nil { + return err + } + } + + // Write the commit frame + if err := w.appendCommit(); err != nil { + return err + } + + flushed = true + + // Commit in-memory + atomic.StoreUint64(&w.commitIdx, entries[len(entries)-1].Index) + return nil +} + +func (w *Writer) getOffsets() []uint32 { + return w.offsets.Load().([]uint32) +} + +// OffsetForFrame implements tailWriter and allows readers to lookup entry +// frames in the tail's in-memory index. +func (w *Writer) OffsetForFrame(idx uint64) (uint32, error) { + if idx < w.info.BaseIndex || idx < w.info.MinIndex || idx > w.LastIndex() { + return 0, types.ErrNotFound + } + os := w.getOffsets() + entryIndex := idx - w.info.BaseIndex + // No bounds check on entryIndex since LastIndex must ensure it's in bounds. + return os[entryIndex], nil +} + +func (w *Writer) appendEntry(e types.LogEntry) error { + offsets := w.getOffsets() + + // Check the invariant that this entry is the next one we expect otherwise our + // index logic is incorrect and will result in panics on read. + if e.Index != w.info.BaseIndex+uint64(len(offsets)) { + return fmt.Errorf("non-monotonic append to segment with BaseIndex=%d. Entry index %d, expected %d", + w.info.BaseIndex, e.Index, w.info.BaseIndex+uint64(len(offsets))) + } + + fh := frameHeader{ + typ: FrameEntry, + len: uint32(len(e.Data)), + } + bufOffset, err := w.appendFrame(fh, e.Data) + if err != nil { + return err + } + // Update the offsets index + + // Add the index entry. Note this is safe despite mutating the same backing + // array as tail because it's beyond the limit current readers will access + // until we do the atomic update below. Even if append re-allocates the + // backing array, it will only read the indexes smaller than numEntries from + // the old array to copy them into the new one and we are not mutating the + // same memory locations. Old readers might still be looking at the old + // array (lower than numEntries) through the current tail.offsets slice but + // we are not touching that at least below numEntries. + offsets = append(offsets, w.writer.writeOffset+uint32(bufOffset)) + + // Now we can make it available to readers. Note that readers still + // shouldn't read it until we actually commit to disk (and increment + // commitIdx) but it's race free for them to now! + w.offsets.Store(offsets) + return nil +} + +func (w *Writer) appendCommit() error { + fh := frameHeader{ + typ: FrameCommit, + crc: w.writer.crc, + } + if _, err := w.appendFrame(fh, nil); err != nil { + return err + } + + // Flush all writes to the file + if err := w.sync(); err != nil { + return err + } + + // Finally, reset crc so that by the time we write the next trailer + // we'll know where the append batch started. + w.writer.crc = 0 + return nil +} + +func (w *Writer) ensureBufCap(extraLen int) { + needCap := len(w.writer.commitBuf) + extraLen + if cap(w.writer.commitBuf) < needCap { + newSize := minBufSize + // Double buffer size until it's big enough to amortize cost + for newSize < needCap { + newSize = newSize * 2 + } + newBuf := make([]byte, newSize) + oldLen := len(w.writer.commitBuf) + copy(newBuf, w.writer.commitBuf) + w.writer.commitBuf = newBuf[:oldLen] + } +} + +func (w *Writer) appendIndex() error { + // Append the index record before we commit (commit and flush happen later + // generally) + offsets := w.getOffsets() + l := indexFrameSize(len(offsets)) + w.ensureBufCap(l) + + startOff := len(w.writer.commitBuf) + + if err := writeIndexFrame(w.writer.commitBuf[startOff:startOff+l], offsets); err != nil { + return err + } + w.writer.commitBuf = w.writer.commitBuf[:startOff+l] + + // Update crc with those values + w.writer.crc = crc32.Update(w.writer.crc, castagnoliTable, w.writer.commitBuf[startOff:startOff+l]) + + // Record the file offset where the index starts (the actual index data so + // after the frame header). + w.writer.indexStart = uint64(w.writer.writeOffset) + uint64(startOff+frameHeaderLen) + return nil +} + +// appendFrame appends the given frame to the current block. The frame must fit +// already otherwise an error will be returned. +func (w *Writer) appendFrame(fh frameHeader, data []byte) (int, error) { + // Encode frame header into current block buffer + l := encodedFrameSize(len(data)) + w.ensureBufCap(l) + + bufOffset := len(w.writer.commitBuf) + if err := writeFrame(w.writer.commitBuf[bufOffset:bufOffset+l], fh, data); err != nil { + return 0, err + } + // Update len of commitBuf since we resliced it for the write + w.writer.commitBuf = w.writer.commitBuf[:bufOffset+l] + + // Update the CRC + w.writer.crc = crc32.Update(w.writer.crc, castagnoliTable, w.writer.commitBuf[bufOffset:bufOffset+l]) + return bufOffset, nil +} + +func (w *Writer) flush() error { + // Write to file + n, err := w.wf.WriteAt(w.writer.commitBuf, int64(w.writer.writeOffset)) + if err == io.EOF && n == len(w.writer.commitBuf) { + // Writer may return EOF even if it wrote all bytes if it wrote right up to + // the end of the file. Ignore that case though. + err = nil + } + if err != nil { + return err + } + + // Reset writer state ready for next writes + w.writer.writeOffset += uint32(len(w.writer.commitBuf)) + w.writer.commitBuf = w.writer.commitBuf[:0] + return nil +} + +func (w *Writer) sync() error { + // Write out current buffer to file + if err := w.flush(); err != nil { + return err + } + + // Sync file + if err := w.wf.Sync(); err != nil { + return err + } + + // Update commitIdx atomically + offsets := w.getOffsets() + commitIdx := uint64(0) + if len(offsets) > 0 { + // Probably not possible for the to be less, but just in case we ever flush + // the file with only meta data written... + commitIdx = uint64(w.info.BaseIndex) + uint64(len(offsets)) - 1 + } + atomic.StoreUint64(&w.commitIdx, commitIdx) + return nil +} + +// Sealed returns whether the segment is sealed or not. If it is it returns +// true and the file offset that it's index array starts at to be saved in +// meta data. WAL will call this after every append so it should be relatively +// cheap in the common case. This design allows the final Append to write out +// the index or any additional data needed at seal time in the same fsync. +func (w *Writer) Sealed() (bool, uint64, error) { + if w.writer.indexStart == 0 { + return false, 0, nil + } + return true, w.writer.indexStart, nil +} + +// ForceSeal forces us to seal the segment by writing out an index block +// wherever we got to in the file. After calling this it is no longer valid to +// call Append on this file. +func (w *Writer) ForceSeal() (uint64, error) { + if w.writer.indexStart > 0 { + // Already sealed, this is a no-op. + return w.writer.indexStart, nil + } + + // Seal the segment! We seal it by writing an index frame before we commit. + if err := w.appendIndex(); err != nil { + return 0, err + } + + // Write the commit frame + if err := w.appendCommit(); err != nil { + return 0, err + } + + return w.writer.indexStart, nil +} + +// LastIndex returns the most recently persisted index in the log. It must +// respond without blocking on append since it's needed frequently by read +// paths that may call it concurrently. Typically this will be loaded from an +// atomic int. If the segment is empty lastIndex should return zero. +func (w *Writer) LastIndex() uint64 { + return atomic.LoadUint64(&w.commitIdx) +} + +func readThroughSegment(r types.ReadableFile, fn func(info types.SegmentInfo, fh frameHeader, offset int64) (bool, error)) (*types.SegmentInfo, error) { + // First read the file header. Note we wrote it as part of the first commit so + // it may be missing or partial written and that's OK as long as there are no + // other later commit frames! + var fh [fileHeaderLen]byte + _, err := r.ReadAt(fh[:], 0) + // EOF is ok - the file might be empty if we crashed before committing + // anything and preallocation isn't supported. + if err != io.EOF && err != nil { + return nil, err + } + + readInfo, err := readFileHeader(fh[:]) + if err == types.ErrCorrupt { + // Header is malformed or missing, don't error yet though we'll detect it + // later when we know if it's a problem or not. + err = nil + } + if err != nil { + return nil, err + } + // If header wasn't detected as corrupt, it might still be just in a way + // that's valid since we've not verified it against the expected metadata yet. + // We'll wait to see if the header was part of the last commit before decide + // if we should validate it for corruption or not though. For now just make + // sure it's not nil so we don't have to handle nil checks everywhere. + if readInfo == nil { + // Zero info will fail validation against the actual metadata if it was + // corrupt when it shouldn't be later. Just prevents a nil panic. + readInfo = &types.SegmentInfo{} + } + + // Read through file from after header until we hit zeros, EOF or corrupt + // frames. + offset := int64(fileHeaderLen) + var buf [frameHeaderLen]byte + + for { + n, err := r.ReadAt(buf[:], offset) + if err == io.EOF { + if n < frameHeaderLen { + return readInfo, nil + } + // This is OK! The last frame in file might be a commit frame so as long + // as we have it all then we can ignore the EOF for this iteration. + err = nil + } + if err != nil { + return readInfo, fmt.Errorf("failed reading frame at offset=%d: %w", offset, err) + } + fh, err := readFrameHeader(buf[:frameHeaderLen]) + if err != nil { + // This is not actually an error case. If we failed to decode it could be + // because of a torn write (since we don't assume writes are atomic). We + // assume that previously committed data is not silently corrupted by the + // FS (see README for details). So this must be due to corruption that + // happened due to non-atomic sector updates whilst committing the last + // write batch. + return readInfo, nil + } + if fh.typ == FrameInvalid { + // This means we've hit zeros at the end of the file (or due to an + // incomplete write, which we treat the same way). + return readInfo, nil + } + + // Call the callback + shouldContinue, err := fn(*readInfo, fh, offset) + if err != nil { + return readInfo, err + } + if !shouldContinue { + return readInfo, nil + } + + // Skip to next frame + offset += int64(encodedFrameSize(int(fh.len))) + } +} diff --git a/vendor/github.com/hashicorp/raft-wal/state.go b/vendor/github.com/hashicorp/raft-wal/state.go new file mode 100644 index 0000000000000..718037801e17d --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/state.go @@ -0,0 +1,215 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package wal + +import ( + "sync/atomic" + + "github.com/benbjohnson/immutable" + "github.com/hashicorp/raft-wal/types" +) + +// state is an immutable snapshot of the state of the log. Modifications must be +// made by copying and modifying the copy. This is easy enough because segments +// is an immutable map so changing and re-assigning to the clone won't impact +// the original map, and tail is just a pointer that can be mutated in the +// shallow clone. Note that methods called on the tail segmentWriter may mutate +// it's state so must only be called while holding the WAL's writeLock. +type state struct { + // refCount tracks readers that are reading segments based on this metadata. + // It is accessed atomically nd must be 64 bit aligned (i.e. leave it at the + // start of the struct). + refCount int32 + // finaliser is set at most once while WAL is holding the write lock in order + // to provide a func that must be called when all current readers are done + // with this state. It's used for deferring closing and deleting old segments + // until we can be sure no reads are still in progress on them. + finalizer atomic.Value // func() + + nextSegmentID uint64 + + // nextBaseIndex is used to signal which baseIndex to use next if there are no + // segments or current tail. + nextBaseIndex uint64 + segments *immutable.SortedMap[uint64, segmentState] + tail types.SegmentWriter +} + +type segmentState struct { + types.SegmentInfo + + // r is the SegmentReader for our in-memory state. + r types.SegmentReader +} + +// Commit converts the in-memory state into a PersistentState. +func (s *state) Persistent() types.PersistentState { + segs := make([]types.SegmentInfo, 0, s.segments.Len()) + it := s.segments.Iterator() + for !it.Done() { + _, s, _ := it.Next() + segs = append(segs, s.SegmentInfo) + } + return types.PersistentState{ + NextSegmentID: s.nextSegmentID, + Segments: segs, + } +} + +func (s *state) getLog(index uint64) (*types.PooledBuffer, error) { + // Check the tail writer first + if s.tail != nil { + raw, err := s.tail.GetLog(index) + if err != nil && err != ErrNotFound { + // Return actual errors since they might mask the fact that index really + // is in the tail but failed to read for some other reason. + return nil, err + } + if err == nil { + // No error means we found it and just need to decode. + return raw, nil + } + // Not in the tail segment, fall back to searching previous segments. + } + + seg, err := s.findSegmentReader(index) + if err != nil { + return nil, err + } + + return seg.GetLog(index) +} + +// findSegmentReader searches the segment tree for the segment that contains the +// log at index idx. It may return the tail segment which may not in fact +// contain idx if idx is larger than the last written index. Typically this is +// called after already checking with the tail writer whether the log is in +// there which means the caller can be sure it's not going to return the tail +// segment. +func (s *state) findSegmentReader(idx uint64) (types.SegmentReader, error) { + + if s.segments.Len() == 0 { + return nil, ErrNotFound + } + + // Search for a segment with baseIndex. + it := s.segments.Iterator() + + // The baseIndex we want is the first one lower or equal to idx. Seek gets us + // to the first result equal or greater so we are either at it (if equal) or + // on the one _after_ the one we need. We step back since that's most likely + it.Seek(idx) + // The first call to Next/Prev actually returns the node the iterator is + // currently on (which is probably the one after the one we want) but in some + // edge cases we might actually want this one. Rather than reversing back and + // coming forward again, just check both this and the one before it. + _, seg, ok := it.Prev() + if ok && seg.BaseIndex > idx { + _, seg, ok = it.Prev() + } + + // We either have the right segment or it doesn't exist. + if ok && seg.MinIndex <= idx && (seg.MaxIndex == 0 || seg.MaxIndex >= idx) { + return seg.r, nil + } + + return nil, ErrNotFound +} + +func (s *state) getTailInfo() *segmentState { + it := s.segments.Iterator() + it.Last() + _, tail, ok := it.Next() + if !ok { + return nil + } + return &tail +} + +func (s *state) append(entries []types.LogEntry) error { + return s.tail.Append(entries) +} + +func (s *state) firstIndex() uint64 { + it := s.segments.Iterator() + _, seg, ok := it.Next() + if !ok { + return 0 + } + if seg.SealTime.IsZero() { + // First segment is unsealed so is also the tail. Check it actually has at + // least one log in otherwise it doesn't matter what the BaseIndex/MinIndex + // are. + if s.tail.LastIndex() == 0 { + // No logs in the WAL + return 0 + } + // At least one log exists, return the MinIndex + } + return seg.MinIndex +} + +func (s *state) lastIndex() uint64 { + tailIdx := s.tail.LastIndex() + if tailIdx > 0 { + return tailIdx + } + // Current tail is empty. Check there are previous sealed segments. + it := s.segments.Iterator() + it.Last() + _, _, ok := it.Prev() + if !ok { + // No tail! shouldn't be possible but means no logs yet + return 0 + } + // Go back to the segment before the tail + _, _, ok = it.Prev() + if !ok { + // No previous segment so the whole log is empty + return 0 + } + + // There was a previous segment so it's MaxIndex will be one less than the + // tail's BaseIndex. + tailSeg := s.getTailInfo() + if tailSeg == nil || tailSeg.BaseIndex == 0 { + return 0 + } + return tailSeg.BaseIndex - 1 +} + +func (s *state) acquire() func() { + atomic.AddInt32(&s.refCount, 1) + return s.release +} + +func (s *state) release() { + // decrement on release + new := atomic.AddInt32(&s.refCount, -1) + if new == 0 { + // Cleanup state associated with this version now all refs have gone. Since + // there are no more refs and we should not set a finalizer until this state + // is no longer the active state, we can be sure this will happen only one. + // Even still lets swap the fn to ensure we only call finalizer once ever! + // We can't swap actual nil as it's not the same type as func() so do a + // dance with a nilFn below. + var nilFn func() + fnRaw := s.finalizer.Swap(nilFn) + if fn, ok := fnRaw.(func()); ok && fn != nil { + fn() + } + } +} + +// clone returns a new state which is a shallow copy of just the immutable parts +// of s. This is safer than a simple assignment copy because that "reads" the +// atomically modified state non-atomically. We never want to copy the refCount +// or finalizer anyway. +func (s *state) clone() state { + return state{ + nextSegmentID: s.nextSegmentID, + segments: s.segments, + tail: s.tail, + } +} diff --git a/vendor/github.com/hashicorp/raft-wal/types/buffer.go b/vendor/github.com/hashicorp/raft-wal/types/buffer.go new file mode 100644 index 0000000000000..e4c1d3a8600c7 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/types/buffer.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package types + +// PooledBuffer is a wrapper that allows WAL to return read buffers to segment +// implementations when we're done decoding. +type PooledBuffer struct { + Bs []byte + CloseFn func() +} + +// Close implements io.Closer and returns the buffer to the pool. It should be +// called exactly once for each buffer when it's no longer needed. It's no +// longer safe to access Bs or any slice taken from it after the call. +func (b *PooledBuffer) Close() error { + if b.CloseFn != nil { + b.CloseFn() + } + return nil +} diff --git a/vendor/github.com/hashicorp/raft-wal/types/meta.go b/vendor/github.com/hashicorp/raft-wal/types/meta.go new file mode 100644 index 0000000000000..efa9d7d7982cb --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/types/meta.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package types + +import "io" + +// MetaStore is the interface we need to some persistent, crash safe backend. We +// implement it with BoltDB for real usage but the interface allows alternatives +// to be used, or tests to mock out FS access. +type MetaStore interface { + // Load loads the existing persisted state. If there is no existing state + // implementations are expected to create initialize new storage and return an + // empty state. + Load(dir string) (PersistentState, error) + + // CommitState must atomically replace all persisted metadata in the current + // store with the set provided. It must not return until the data is persisted + // durably and in a crash-safe way otherwise the guarantees of the WAL will be + // compromised. The WAL will only ever call this in a single thread at one + // time and it will never be called concurrently with Load however it may be + // called concurrently with Get/SetStable operations. + CommitState(PersistentState) error + + // GetStable returns a value from stable store or nil if it doesn't exist. May + // be called concurrently by multiple threads. + GetStable(key []byte) ([]byte, error) + + // SetStable stores a value from stable store. May be called concurrently with + // GetStable. + SetStable(key, value []byte) error + + io.Closer +} + +// PersistentState represents the WAL file metadata we need to store reliably to +// recover on restart. +type PersistentState struct { + NextSegmentID uint64 + Segments []SegmentInfo +} diff --git a/vendor/github.com/hashicorp/raft-wal/types/segment.go b/vendor/github.com/hashicorp/raft-wal/types/segment.go new file mode 100644 index 0000000000000..b1ed55ea47523 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/types/segment.go @@ -0,0 +1,150 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package types + +import ( + "io" + "time" +) + +// SegmentInfo is the metadata describing a single WAL segment. +type SegmentInfo struct { + // ID uniquely identifies this segment file + ID uint64 + + // BaseIndex is the raft index of the first entry that will be written to the + // segment. + BaseIndex uint64 + + // MinIndex is the logical lowest index that still exists in the segment. It + // may be greater than BaseIndex if a head truncation has "deleted" a prefix + // of the segment. + MinIndex uint64 + + // MaxIndex is the logical highest index that still exists in the segment. It + // may be lower than the actual highest index if a tail truncation has + // "deleted" a suffix of the segment. It is zero for unsealed segments and + // only set one seal. + MaxIndex uint64 + + // Codec identifies the codec used to encode log entries. Codec values 0 to + // 16k (i.e. the lower 16 bits) are reserved for internal future usage. Custom + // codecs must be registered with an identifier higher than this which the + // caller is responsible for ensuring uniquely identifies the specific version + // of their codec used in any given log. uint64 provides sufficient space that + // a randomly generated identifier is almost certainly unique. + Codec uint64 + + // IndexStart is the file offset where the index can be read from it's 0 for + // tail segments and only set after a segment is sealed. + IndexStart uint64 + + // CreateTime records when the segment was first created. + CreateTime time.Time + + // SealTime records when the segment was sealed. Zero indicates that it's not + // sealed yet. + SealTime time.Time + + // SizeLimit is the soft limit for the segment's size. The segment file may be + // pre-allocated to this size on filesystems that support it. It is a soft + // limit in the sense that the final Append usually takes the segment file + // past this size before it is considered full and sealed. + SizeLimit uint32 +} + +// SegmentFiler is the interface that provides access to segments to the WAL. It +// encapsulated creating, and recovering segments and returning reader or writer +// interfaces to interact with them. It's main purpose is to abstract the core +// WAL logic both from the actual encoding layer of segment files. You can think +// of it as a layer of abstraction above the VFS which abstracts actual file +// system operations on files but knows nothing about the format. In tests for +// example we can implement a SegmentFiler that is way simpler than the real +// encoding/decoding layer on top of a VFS - even an in-memory VFS which makes +// tests much simpler to write and run. +type SegmentFiler interface { + // Create adds a new segment with the given info and returns a writer or an + // error. + Create(info SegmentInfo) (SegmentWriter, error) + + // RecoverTail is called on an unsealed segment when re-opening the WAL it + // will attempt to recover from a possible crash. It will either return an + // error, or return a valid segmentWriter that is ready for further appends. + // If the expected tail segment doesn't exist it must return an error wrapping + // os.ErrNotExist. + RecoverTail(info SegmentInfo) (SegmentWriter, error) + + // Open an already sealed segment for reading. Open may validate the file's + // header and return an error if it doesn't match the expected info. + Open(info SegmentInfo) (SegmentReader, error) + + // List returns the set of segment IDs currently stored. It's used by the WAL + // on recovery to find any segment files that need to be deleted following a + // unclean shutdown. The returned map is a map of ID -> BaseIndex. BaseIndex + // is returned to allow subsequent Delete calls to be made. + List() (map[uint64]uint64, error) + + // Delete removes the segment with given baseIndex and id if it exists. Note + // that baseIndex is technically redundant since ID is unique on it's own. But + // in practice we name files (or keys) with both so that they sort correctly. + // This interface allows a simpler implementation where we can just delete + // the file if it exists without having to scan the underlying storage for a. + Delete(baseIndex, ID uint64) error +} + +// SegmentWriter manages appending logs to the tail segment of the WAL. It's an +// interface to make testing core WAL simpler. Every SegmentWriter will have +// either `init` or `recover` called once before any other methods. When either +// returns it must either return an error or be ready to accept new writes and +// reads. +type SegmentWriter interface { + io.Closer + SegmentReader + + // Append adds one or more entries. It must not return until the entries are + // durably stored otherwise raft's guarantees will be compromised. Append must + // not be called concurrently with any other call to Sealed, Append or + // ForceSeal. + Append(entries []LogEntry) error + + // Sealed returns whether the segment is sealed or not. If it is it returns + // true and the file offset that it's index array starts at to be saved in + // meta data. WAL will call this after every append so it should be relatively + // cheap in the common case. This design allows the final Append to write out + // the index or any additional data needed at seal time in the same fsync. + // Sealed must not be called concurrently with any other call to Sealed, + // Append or ForceSeal. + Sealed() (bool, uint64, error) + + // ForceSeal causes the segment to become sealed by writing out an index + // block. This is not used in the typical flow of append and rotation, but is + // necessary during truncations where some suffix of the writer needs to be + // truncated. Rather than manipulate what is on disk in a complex way, the WAL + // will simply force seal it with whatever state it has already saved and then + // open a new segment at the right offset for continued writing. ForceSeal may + // be called on a segment that has already been sealed and should just return + // the existing index offset in that case. (We don't actually rely on that + // currently but it's easier not to assume we'll always call it at most once). + // ForceSeal must not be called concurrently with any other call to Sealed, + // Append or ForceSeal. + ForceSeal() (uint64, error) + + // LastIndex returns the most recently persisted index in the log. It must + // respond without blocking on Append since it's needed frequently by read + // paths that may call it concurrently. Typically this will be loaded from an + // atomic int. If the segment is empty lastIndex should return zero. + LastIndex() uint64 +} + +// SegmentReader wraps a ReadableFile to allow lookup of logs in an existing +// segment file. It's an interface to make testing core WAL simpler. The first +// call will always be validate which passes in the ReaderAt to be used for +// subsequent reads. +type SegmentReader interface { + io.Closer + + // GetLog returns the raw log entry bytes associated with idx. If the log + // doesn't exist in this segment ErrNotFound must be returned. + GetLog(idx uint64) (*PooledBuffer, error) +} diff --git a/vendor/github.com/hashicorp/raft-wal/types/types.go b/vendor/github.com/hashicorp/raft-wal/types/types.go new file mode 100644 index 0000000000000..4eb8513bcaab5 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/types/types.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package types + +import ( + "errors" + + "github.com/hashicorp/raft" +) + +var ( + // ErrNotFound is our own version of raft's not found error. It's important + // it's exactly the same because the raft lib checks for equality with it's + // own type as a crucial part of replication processing (detecting end of logs + // and that a snapshot is needed for a follower). + ErrNotFound = raft.ErrLogNotFound + ErrCorrupt = errors.New("WAL is corrupt") + ErrSealed = errors.New("segment is sealed") + ErrClosed = errors.New("closed") +) + +// LogEntry represents an entry that has already been encoded. +type LogEntry struct { + Index uint64 + Data []byte +} diff --git a/vendor/github.com/hashicorp/raft-wal/types/vfs.go b/vendor/github.com/hashicorp/raft-wal/types/vfs.go new file mode 100644 index 0000000000000..a58241548f409 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/types/vfs.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package types + +import "io" + +// VFS is the interface WAL needs to interact with the file system. In +// production it would normally be implemented by RealFS which interacts with +// the operating system FS using standard go os package. It's useful to allow +// testing both to run quicker (by being in memory only) and to make it easy to +// simulate all kinds of disk errors and failure modes without needing a more +// elaborate external test harness like ALICE. +type VFS interface { + // ListDir returns a list of all files in the specified dir in lexicographical + // order. If the dir doesn't exist, it must return an error. Empty array with + // nil error is assumed to mean that the directory exists and was readable, + // but contains no files. + ListDir(dir string) ([]string, error) + + // Create creates a new file with the given name. If a file with the same name + // already exists an error is returned. If a non-zero size is given, + // implementations should make a best effort to pre-allocate the file to be + // that size. The dir must already exist and be writable to the current + // process. + Create(dir, name string, size uint64) (WritableFile, error) + + // Delete indicates the file is no longer required. Typically it should be + // deleted from the underlying system to free disk space. + Delete(dir, name string) error + + // OpenReader opens an existing file in read-only mode. If the file doesn't + // exist or permission is denied, an error is returned, otherwise no checks + // are made about the well-formedness of the file, it may be empty, the wrong + // size or corrupt in arbitrary ways. + OpenReader(dir, name string) (ReadableFile, error) + + // OpenWriter opens a file in read-write mode. If the file doesn't exist or + // permission is denied, an error is returned, otherwise no checks are made + // about the well-formedness of the file, it may be empty, the wrong size or + // corrupt in arbitrary ways. + OpenWriter(dir, name string) (WritableFile, error) +} + +// WritableFile provides random read-write access to a file as well as the +// ability to fsync it to disk. +type WritableFile interface { + io.WriterAt + io.ReaderAt + io.Closer + + Sync() error +} + +// ReadableFile provides random read access to a file. +type ReadableFile interface { + io.ReaderAt + io.Closer +} diff --git a/vendor/github.com/hashicorp/raft-wal/wal.go b/vendor/github.com/hashicorp/raft-wal/wal.go new file mode 100644 index 0000000000000..b81550bb86110 --- /dev/null +++ b/vendor/github.com/hashicorp/raft-wal/wal.go @@ -0,0 +1,957 @@ +// Copyright (c) HashiCorp, Inc +// SPDX-License-Identifier: MPL-2.0 + +package wal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "sync" + "sync/atomic" + "time" + + "github.com/benbjohnson/immutable" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft" + "github.com/hashicorp/raft-wal/metrics" + "github.com/hashicorp/raft-wal/types" +) + +var ( + _ raft.LogStore = &WAL{} + _ raft.StableStore = &WAL{} + + ErrNotFound = types.ErrNotFound + ErrCorrupt = types.ErrCorrupt + ErrSealed = types.ErrSealed + ErrClosed = types.ErrClosed + + DefaultSegmentSize = 64 * 1024 * 1024 +) + +var ( + _ raft.LogStore = &WAL{} + _ raft.MonotonicLogStore = &WAL{} + _ raft.StableStore = &WAL{} +) + +// WAL is a write-ahead log suitable for github.com/hashicorp/raft. +type WAL struct { + closed uint32 // atomically accessed to keep it first in struct for alignment. + + dir string + codec Codec + sf types.SegmentFiler + metaDB types.MetaStore + metrics metrics.Collector + log hclog.Logger + segmentSize int + + // s is the current state of the WAL files. It is an immutable snapshot that + // can be accessed without a lock when reading. We only support a single + // writer so all methods that mutate either the WAL state or append to the + // tail of the log must hold the writeMu until they complete all changes. + s atomic.Value // *state + + // writeMu must be held when modifying s or while appending to the tail. + // Although we take care never to let readers block writer, we still only + // allow a single writer to be updating the meta state at once. The mutex must + // be held before s is loaded until all modifications to s or appends to the + // tail are complete. + writeMu sync.Mutex + + // These chans are used to hand off serial execution for segment rotation to a + // background goroutine so that StoreLogs can return and allow the caller to + // get on with other work while we mess with files. The next call to StoreLogs + // needs to wait until the background work is done though since the current + // log is sealed. + // + // At the end of StoreLogs, if the segment was sealed, still holding writeMu + // we make awaitRotate so it's non-nil, then send the indexStart on + // triggerRotate which is 1-buffered. We then drop the lock and return to + // caller. The rotation goroutine reads from triggerRotate in a loop, takes + // the write lock performs rotation and then closes awaitRotate and sets it to + // nil before releasing the lock. The next StoreLogs call takes the lock, + // checks if awaitRotate. If it is nil there is no rotation going on so + // StoreLogs can proceed. If it is non-nil, it releases the lock and then + // waits on the close before acquiring the lock and continuing. + triggerRotate chan uint64 + awaitRotate chan struct{} +} + +type walOpt func(*WAL) + +// Open attempts to open the WAL stored in dir. If there are no existing WAL +// files a new WAL will be initialized there. The dir must already exist and be +// readable and writable to the current process. If existing files are found, +// recovery is attempted. If recovery is not possible an error is returned, +// otherwise the returned *WAL is in a state ready for use. +func Open(dir string, opts ...walOpt) (*WAL, error) { + w := &WAL{ + dir: dir, + triggerRotate: make(chan uint64, 1), + } + // Apply options + for _, opt := range opts { + opt(w) + } + if err := w.applyDefaultsAndValidate(); err != nil { + return nil, err + } + + // Load or create metaDB + persisted, err := w.metaDB.Load(w.dir) + if err != nil { + return nil, err + } + + newState := state{ + segments: &immutable.SortedMap[uint64, segmentState]{}, + nextSegmentID: persisted.NextSegmentID, + } + + // Get the set of all persisted segments so we can prune it down to just the + // unused ones as we go. + toDelete, err := w.sf.List() + if err != nil { + return nil, err + } + + // Build the state + recoveredTail := false + for i, si := range persisted.Segments { + + // Verify we can decode the entries. + // TODO: support multiple decoders to allow rotating codec. + if si.Codec != w.codec.ID() { + return nil, fmt.Errorf("segment with BasedIndex=%d uses an unknown codec", si.BaseIndex) + } + + // We want to keep this segment since it's still in the metaDB list! + delete(toDelete, si.ID) + + if si.SealTime.IsZero() { + // This is an unsealed segment. It _must_ be the last one. Safety check! + if i < len(persisted.Segments)-1 { + return nil, fmt.Errorf("unsealed segment is not at tail") + } + + // Try to recover this segment + sw, err := w.sf.RecoverTail(si) + if errors.Is(err, os.ErrNotExist) { + // Handle no file specially. This can happen if we crashed right after + // persisting the metadata but before we managed to persist the new + // file. In fact it could happen if the whole machine looses power any + // time before the fsync of the parent dir since the FS could loose the + // dir entry for the new file until that point. We do ensure we pass + // that point before we return from Append for the first time in that + // new file so that's safe, but we have to handle recovering from that + // case here. + sw, err = w.sf.Create(si) + } + if err != nil { + return nil, err + } + // Set the tail and "reader" for this segment + ss := segmentState{ + SegmentInfo: si, + r: sw, + } + newState.tail = sw + newState.segments = newState.segments.Set(si.BaseIndex, ss) + recoveredTail = true + + // We're done with this loop, break here to avoid nesting all the rest of + // the logic! + break + } + + // This is a sealed segment + + // Open segment reader + sr, err := w.sf.Open(si) + if err != nil { + return nil, err + } + + // Store the open reader to get logs from + ss := segmentState{ + SegmentInfo: si, + r: sr, + } + newState.segments = newState.segments.Set(si.BaseIndex, ss) + } + + if !recoveredTail { + // There was no unsealed segment at the end. This can only really happen + // when the log is empty with zero segments (either on creation or after a + // truncation that removed all segments) since we otherwise never allow the + // state to have a sealed tail segment. But this logic works regardless! + + // Create a new segment. We use baseIndex of 1 even though the first append + // might be much higher - we'll allow that since we know we have no records + // yet and so lastIndex will also be 0. + si := w.newSegment(newState.nextSegmentID, 1) + newState.nextSegmentID++ + ss := segmentState{ + SegmentInfo: si, + } + newState.segments = newState.segments.Set(si.BaseIndex, ss) + + // Persist the new meta to "commit" it even before we create the file so we + // don't attempt to recreate files with duplicate IDs on a later failure. + if err := w.metaDB.CommitState(newState.Persistent()); err != nil { + return nil, err + } + + // Create the new segment file + w, err := w.sf.Create(si) + if err != nil { + return nil, err + } + newState.tail = w + // Update the segment in memory so we have a reader for the new segment. We + // don't need to commit again as this isn't changing the persisted metadata + // about the segment. + ss.r = w + newState.segments = newState.segments.Set(si.BaseIndex, ss) + } + + // Store the in-memory state (it was already persisted if we modified it + // above) there are no readers yet since we are constructing a new WAL so we + // don't need to jump through the mutateState hoops yet! + w.s.Store(&newState) + + // Delete any unused segment files left over after a crash. + w.deleteSegments(toDelete) + + // Start the rotation routine + go w.runRotate() + + return w, nil +} + +// stateTxn represents a transaction body that mutates the state under the +// writeLock. s is already a shallow copy of the current state that may be +// mutated as needed. If a nil error is returned, s will be atomically set as +// the new state. If a non-nil finalizer func is returned it will be atomically +// attached to the old state after it's been replaced but before the write lock +// is released. The finalizer will be called exactly once when all current +// readers have released the old state. If the transaction func returns a +// non-nil postCommit it is executed after the new state has been committed to +// metaDB. It may mutate the state further (captured by closure) before it is +// atomically committed in memory but the update won't be persisted to disk in +// this transaction. This is used where we need sequencing between committing +// meta and creating and opening a new file. Both need to happen in memory in +// one transaction but the disk commit isn't at the end! If postCommit returns +// an error, the state is not updated in memory and the error is returned to the +// mutate caller. +type stateTxn func(s *state) (finalizer func(), postCommit func() error, err error) + +func (w *WAL) loadState() *state { + return w.s.Load().(*state) +} + +// mutateState executes a stateTxn. writeLock MUST be held while calling this. +func (w *WAL) mutateStateLocked(tx stateTxn) error { + s := w.loadState() + s.acquire() + defer s.release() + + newS := s.clone() + fn, postCommit, err := tx(&newS) + if err != nil { + return err + } + + // Commit updates to meta + if err := w.metaDB.CommitState(newS.Persistent()); err != nil { + return err + } + + if postCommit != nil { + if err := postCommit(); err != nil { + return err + } + } + + w.s.Store(&newS) + s.finalizer.Store(fn) + return nil +} + +// acquireState should be used by all readers to fetch the current state. The +// returned release func must be called when no further accesses to state or the +// data within it will be performed to free old files that may have been +// truncated concurrently. +func (w *WAL) acquireState() (*state, func()) { + s := w.loadState() + return s, s.acquire() +} + +// newSegment creates a types.SegmentInfo with the passed ID and baseIndex, filling in +// the segment parameters based on the current WAL configuration. +func (w *WAL) newSegment(ID, baseIndex uint64) types.SegmentInfo { + return types.SegmentInfo{ + ID: ID, + BaseIndex: baseIndex, + MinIndex: baseIndex, + SizeLimit: uint32(w.segmentSize), + + // TODO make these configurable + Codec: CodecBinaryV1, + CreateTime: time.Now(), + } +} + +// FirstIndex returns the first index written. 0 for no entries. +func (w *WAL) FirstIndex() (uint64, error) { + if err := w.checkClosed(); err != nil { + return 0, err + } + s, release := w.acquireState() + defer release() + return s.firstIndex(), nil +} + +// LastIndex returns the last index written. 0 for no entries. +func (w *WAL) LastIndex() (uint64, error) { + if err := w.checkClosed(); err != nil { + return 0, err + } + s, release := w.acquireState() + defer release() + return s.lastIndex(), nil +} + +// GetLog gets a log entry at a given index. +func (w *WAL) GetLog(index uint64, log *raft.Log) error { + if err := w.checkClosed(); err != nil { + return err + } + s, release := w.acquireState() + defer release() + w.metrics.IncrementCounter("log_entries_read", 1) + + raw, err := s.getLog(index) + if err != nil { + return err + } + w.metrics.IncrementCounter("log_entry_bytes_read", uint64(len(raw.Bs))) + defer raw.Close() + + // Decode the log + return w.codec.Decode(raw.Bs, log) +} + +// StoreLog stores a log entry. +func (w *WAL) StoreLog(log *raft.Log) error { + return w.StoreLogs([]*raft.Log{log}) +} + +// StoreLogs stores multiple log entries. +func (w *WAL) StoreLogs(logs []*raft.Log) error { + if err := w.checkClosed(); err != nil { + return err + } + if len(logs) < 1 { + return nil + } + + w.writeMu.Lock() + defer w.writeMu.Unlock() + + // Ensure queued rotation has completed before us if we raced with it for + // write lock. + w.awaitRotationLocked() + + s, release := w.acquireState() + defer release() + + // Verify monotonicity since we assume it + lastIdx := s.lastIndex() + + // Special case, if the log is currently empty and this is the first append, + // we allow any starting index. We've already created an empty tail segment + // though and probably started at index 1. Rather than break the invariant + // that BaseIndex is the same as the first index in the segment (which causes + // lots of extra complexity lower down) we simply accept the additional cost + // in this rare case of removing the current tail and re-creating it with the + // correct BaseIndex for the first log we are about to append. In practice, + // this only happens on startup of a new server, or after a user snapshot + // restore which are both rare enough events that the cost is not significant + // since the cost of creating other state or restoring snapshots is larger + // anyway. We could theoretically defer creating at all until we know for sure + // but that is more complex internally since then everything has to handle the + // uninitialized case where the is no tail yet with special cases. + ti := s.getTailInfo() + // Note we check index != ti.BaseIndex rather than index != 1 so that this + // works even if we choose to initialize first segments to a BaseIndex other + // than 1. For example it might be marginally more performant to choose to + // initialize to the old MaxIndex + 1 after a truncate since that is what our + // raft library will use after a restore currently so will avoid this case on + // the next append, while still being generally safe. + if lastIdx == 0 && logs[0].Index != ti.BaseIndex { + if err := w.resetEmptyFirstSegmentBaseIndex(logs[0].Index); err != nil { + return err + } + + // Re-read state now we just changed it. + s2, release2 := w.acquireState() + defer release2() + + // Overwrite the state we read before so the code below uses the new state + s = s2 + } + + // Encode logs + nBytes := uint64(0) + encoded := make([]types.LogEntry, len(logs)) + for i, l := range logs { + if lastIdx > 0 && l.Index != (lastIdx+1) { + return fmt.Errorf("non-monotonic log entries: tried to append index %d after %d", logs[0].Index, lastIdx) + } + // Need a new buffer each time because Data is just a slice so if we re-use + // buffer then all end up pointing to the same underlying data which + // contains only the final log value! + var buf bytes.Buffer + if err := w.codec.Encode(l, &buf); err != nil { + return err + } + encoded[i].Data = buf.Bytes() + encoded[i].Index = l.Index + lastIdx = l.Index + nBytes += uint64(len(encoded[i].Data)) + } + if err := s.tail.Append(encoded); err != nil { + return err + } + w.metrics.IncrementCounter("log_appends", 1) + w.metrics.IncrementCounter("log_entries_written", uint64(len(encoded))) + w.metrics.IncrementCounter("log_entry_bytes_written", nBytes) + + // Check if we need to roll logs + sealed, indexStart, err := s.tail.Sealed() + if err != nil { + return err + } + if sealed { + // Async rotation to allow caller to do more work while we mess with files. + w.triggerRotateLocked(indexStart) + } + return nil +} + +func (w *WAL) awaitRotationLocked() { + awaitCh := w.awaitRotate + if awaitCh != nil { + // We managed to race for writeMu with the background rotate operation which + // needs to complete first. Wait for it to complete. + w.writeMu.Unlock() + <-awaitCh + w.writeMu.Lock() + } +} + +// DeleteRange deletes a range of log entries. The range is inclusive. +// Implements raft.LogStore. Note that we only support deleting ranges that are +// a suffix or prefix of the log. +func (w *WAL) DeleteRange(min uint64, max uint64) error { + if err := w.checkClosed(); err != nil { + return err + } + if min > max { + // Empty inclusive range. + return nil + } + + w.writeMu.Lock() + defer w.writeMu.Unlock() + + // Ensure queued rotation has completed before us if we raced with it for + // write lock. + w.awaitRotationLocked() + + s, release := w.acquireState() + defer release() + + // Work out what type of truncation this is. + first, last := s.firstIndex(), s.lastIndex() + switch { + // |min----max| + // |first====last| + // or + // |min----max| + // |first====last| + case max < first || min > last: + // None of the range exists at all so a no-op + return nil + + // |min----max| + // |first====last| + // or + // |min--------------max| + // |first====last| + // or + // |min--max| + // |first====last| + case min <= first: // max >= first implied by the first case not matching + // Note we allow head truncations where max > last which effectively removes + // the entire log. + return w.truncateHeadLocked(max + 1) + + // |min----max| + // |first====last| + // or + // |min--------------max| + // |first====last| + case max >= last: // min <= last implied by first case not matching + return w.truncateTailLocked(min - 1) + + // |min----max| + // |first========last| + default: + // Everything else is a neither a suffix nor prefix so unsupported. + return fmt.Errorf("only suffix or prefix ranges may be deleted from log") + } +} + +// Set implements raft.StableStore +func (w *WAL) Set(key []byte, val []byte) error { + if err := w.checkClosed(); err != nil { + return err + } + w.metrics.IncrementCounter("stable_sets", 1) + return w.metaDB.SetStable(key, val) +} + +// Get implements raft.StableStore +func (w *WAL) Get(key []byte) ([]byte, error) { + if err := w.checkClosed(); err != nil { + return nil, err + } + w.metrics.IncrementCounter("stable_gets", 1) + return w.metaDB.GetStable(key) +} + +// SetUint64 implements raft.StableStore. We assume the same key space as Set +// and Get so the caller is responsible for ensuring they don't call both Set +// and SetUint64 for the same key. +func (w *WAL) SetUint64(key []byte, val uint64) error { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], val) + return w.Set(key, buf[:]) +} + +// GetUint64 implements raft.StableStore. We assume the same key space as Set +// and Get. We assume that the key was previously set with `SetUint64` and +// returns an undefined value (possibly with nil error) if not. +func (w *WAL) GetUint64(key []byte) (uint64, error) { + raw, err := w.Get(key) + if err != nil { + return 0, err + } + if len(raw) == 0 { + // Not set, return zero per interface contract + return 0, nil + } + // At least a tiny bit of checking is possible + if len(raw) != 8 { + return 0, fmt.Errorf("GetUint64 called on a non-uint64 key") + } + return binary.LittleEndian.Uint64(raw), nil +} + +func (w *WAL) triggerRotateLocked(indexStart uint64) { + if atomic.LoadUint32(&w.closed) == 1 { + return + } + w.awaitRotate = make(chan struct{}) + w.triggerRotate <- indexStart +} + +func (w *WAL) runRotate() { + for { + indexStart := <-w.triggerRotate + + w.writeMu.Lock() + + // Either triggerRotate was closed by Close, or Close raced with a real + // trigger, either way shut down without changing anything else. In the + // second case the segment file is sealed but meta data isn't updated yet + // but we have to handle that case during recovery anyway so it's simpler + // not to try and complete the rotation here on an already-closed WAL. + closed := atomic.LoadUint32(&w.closed) + if closed == 1 { + w.writeMu.Unlock() + return + } + + err := w.rotateSegmentLocked(indexStart) + if err != nil { + // The only possible errors indicate bugs and could probably validly be + // panics, but be conservative and just attempt to log them instead! + w.log.Error("rotate error", "err", err) + } + done := w.awaitRotate + w.awaitRotate = nil + w.writeMu.Unlock() + // Now we are done, close the channel to unblock the waiting writer if there + // is one + close(done) + } +} + +func (w *WAL) rotateSegmentLocked(indexStart uint64) error { + txn := func(newState *state) (func(), func() error, error) { + // Mark current tail as sealed in segments + tail := newState.getTailInfo() + if tail == nil { + // Can't happen + return nil, nil, fmt.Errorf("no tail found during rotate") + } + + // Note that tail is a copy since it's a value type. Even though this is a + // pointer here it's pointing to a copy on the heap that was made in + // getTailInfo above, so we can mutate it safely and update the immutable + // state with our version. + tail.SealTime = time.Now() + tail.MaxIndex = newState.tail.LastIndex() + tail.IndexStart = indexStart + w.metrics.SetGauge("last_segment_age_seconds", uint64(tail.SealTime.Sub(tail.CreateTime).Seconds())) + + // Update the old tail with the seal time etc. + newState.segments = newState.segments.Set(tail.BaseIndex, *tail) + + post, err := w.createNextSegment(newState) + return nil, post, err + } + w.metrics.IncrementCounter("segment_rotations", 1) + return w.mutateStateLocked(txn) +} + +// createNextSegment is passes a mutable copy of the new state ready to have a +// new segment appended. newState must be a copy, taken under write lock which +// is still held by the caller and its segments map must contain all non-tail +// segments that should be in the log, all must be sealed at this point. The new +// segment's baseIndex will be the current last-segment's MaxIndex + 1 (or 1 if +// no current tail segment). The func returned is to be executed post +// transaction commit to create the actual segment file. +func (w *WAL) createNextSegment(newState *state) (func() error, error) { + // Find existing sealed tail + tail := newState.getTailInfo() + + // If there is no tail, next baseIndex is 1 (or the requested next base index) + nextBaseIndex := uint64(1) + if tail != nil { + nextBaseIndex = tail.MaxIndex + 1 + } else if newState.nextBaseIndex > 0 { + nextBaseIndex = newState.nextBaseIndex + } + + // Create a new segment + newTail := w.newSegment(newState.nextSegmentID, nextBaseIndex) + newState.nextSegmentID++ + ss := segmentState{ + SegmentInfo: newTail, + } + newState.segments = newState.segments.Set(newTail.BaseIndex, ss) + + // We're ready to commit now! Return a postCommit that will actually create + // the segment file once meta is persisted. We don't do it in parallel because + // we don't want to persist a file with an ID before that ID is durably stored + // in case the metaDB write doesn't happen. + post := func() error { + // Now create the new segment for writing. + sw, err := w.sf.Create(newTail) + if err != nil { + return err + } + newState.tail = sw + + // Also cache the reader/log getter which is also the writer. We don't bother + // reopening read only since we assume we have exclusive access anyway and + // only use this read-only interface once the segment is sealed. + ss.r = newState.tail + + // We need to re-insert it since newTail is a copy not a reference + newState.segments = newState.segments.Set(newTail.BaseIndex, ss) + return nil + } + return post, nil +} + +// resetEmptyFirstSegmentBaseIndex is used to change the baseIndex of the tail +// segment file if its empty. This is needed when the first log written has a +// different index to the base index that was assumed when the tail was created +// (e.g. on startup). It will return an error if the log is not currently empty. +func (w *WAL) resetEmptyFirstSegmentBaseIndex(newBaseIndex uint64) error { + txn := stateTxn(func(newState *state) (func(), func() error, error) { + if newState.lastIndex() > 0 { + return nil, nil, fmt.Errorf("can't reset BaseIndex on segment, log is not empty") + } + + fin := func() {} + + tailSeg := newState.getTailInfo() + if tailSeg != nil { + // There is an existing tail. Check if it needs to be replaced + if tailSeg.BaseIndex == newBaseIndex { + // It's fine as it is, no-op + return nil, nil, nil + } + // It needs to be removed + newState.segments = newState.segments.Delete(tailSeg.BaseIndex) + newState.tail = nil + fin = func() { + w.closeSegments([]io.Closer{tailSeg.r}) + w.deleteSegments(map[uint64]uint64{tailSeg.ID: tailSeg.BaseIndex}) + } + } + + // Ensure the newly created tail has the right base index + newState.nextBaseIndex = newBaseIndex + + // Create the new segment + post, err := w.createNextSegment(newState) + if err != nil { + return nil, nil, err + } + + return fin, post, nil + }) + + return w.mutateStateLocked(txn) +} + +func (w *WAL) truncateHeadLocked(newMin uint64) error { + txn := stateTxn(func(newState *state) (func(), func() error, error) { + oldLastIndex := newState.lastIndex() + + // Iterate the segments to find any that are entirely deleted. + toDelete := make(map[uint64]uint64) + toClose := make([]io.Closer, 0, 1) + it := newState.segments.Iterator() + var head *segmentState + nTruncated := uint64(0) + for !it.Done() { + _, seg, _ := it.Next() + + maxIdx := seg.MaxIndex + // If the segment is the tail (unsealed) or a sealed segment that contains + // this new min then we've found the new head. + if seg.SealTime.IsZero() { + maxIdx = newState.lastIndex() + // This is the tail, check if it actually has any content to keep + if maxIdx >= newMin { + head = &seg + break + } + } else if seg.MaxIndex >= newMin { + head = &seg + break + } + + toDelete[seg.ID] = seg.BaseIndex + toClose = append(toClose, seg.r) + newState.segments = newState.segments.Delete(seg.BaseIndex) + nTruncated += (maxIdx - seg.MinIndex + 1) // +1 because MaxIndex is inclusive + } + + // There may not be any segments (left) but if there are, update the new + // head's MinIndex. + var postCommit func() error + if head != nil { + // new + nTruncated += (newMin - head.MinIndex) + head.MinIndex = newMin + newState.segments = newState.segments.Set(head.BaseIndex, *head) + } else { + // If there is no head any more, then there is no tail either! We should + // create a new blank one ready for use when we next append like we do + // during initialization. As an optimization, we create it with a + // BaseIndex of the old MaxIndex + 1 since this is what our Raft library + // uses as the next log index after a restore so this avoids recreating + // the files a second time on the next append. + newState.nextBaseIndex = oldLastIndex + 1 + pc, err := w.createNextSegment(newState) + if err != nil { + return nil, nil, err + } + postCommit = pc + } + w.metrics.IncrementCounter("head_truncations", nTruncated) + + // Return a finalizer that will be called when all readers are done with the + // segments in the current state to close and delete old segments. + fin := func() { + w.closeSegments(toClose) + w.deleteSegments(toDelete) + } + return fin, postCommit, nil + }) + + return w.mutateStateLocked(txn) +} + +func (w *WAL) truncateTailLocked(newMax uint64) error { + txn := stateTxn(func(newState *state) (func(), func() error, error) { + // Reverse iterate the segments to find any that are entirely deleted. + toDelete := make(map[uint64]uint64) + toClose := make([]io.Closer, 0, 1) + it := newState.segments.Iterator() + it.Last() + + nTruncated := uint64(0) + for !it.Done() { + _, seg, _ := it.Prev() + + if seg.BaseIndex <= newMax { + // We're done + break + } + + maxIdx := seg.MaxIndex + if seg.SealTime.IsZero() { + maxIdx = newState.lastIndex() + } + + toDelete[seg.ID] = seg.BaseIndex + toClose = append(toClose, seg.r) + newState.segments = newState.segments.Delete(seg.BaseIndex) + nTruncated += (maxIdx - seg.MinIndex + 1) // +1 because MaxIndex is inclusive + } + + tail := newState.getTailInfo() + if tail != nil { + maxIdx := tail.MaxIndex + + // Check that the tail is sealed (it won't be if we didn't need to remove + // the actual partial tail above). + if tail.SealTime.IsZero() { + // Actually seal it (i.e. force it to write out an index block wherever + // it got to). + indexStart, err := newState.tail.ForceSeal() + if err != nil { + return nil, nil, err + } + tail.IndexStart = indexStart + tail.SealTime = time.Now() + maxIdx = newState.lastIndex() + } + // Update the MaxIndex + nTruncated += (maxIdx - newMax) + tail.MaxIndex = newMax + + // And update the tail in the new state + newState.segments = newState.segments.Set(tail.BaseIndex, *tail) + } + + // Create the new tail segment + pc, err := w.createNextSegment(newState) + if err != nil { + return nil, nil, err + } + w.metrics.IncrementCounter("tail_truncations", nTruncated) + + // Return a finalizer that will be called when all readers are done with the + // segments in the current state to close and delete old segments. + fin := func() { + w.closeSegments(toClose) + w.deleteSegments(toDelete) + } + return fin, pc, nil + }) + + return w.mutateStateLocked(txn) +} + +func (w *WAL) deleteSegments(toDelete map[uint64]uint64) { + for ID, baseIndex := range toDelete { + if err := w.sf.Delete(baseIndex, ID); err != nil { + // This is not fatal. We can continue just old files might need manual + // cleanup somehow. + w.log.Error("failed to delete old segment", "baseIndex", baseIndex, "id", ID, "err", err) + } + } +} + +func (w *WAL) closeSegments(toClose []io.Closer) { + for _, c := range toClose { + if c != nil { + if err := c.Close(); err != nil { + // Shouldn't happen! + w.log.Error("error closing old segment file", "err", err) + } + } + } +} + +func (w *WAL) checkClosed() error { + closed := atomic.LoadUint32(&w.closed) + if closed != 0 { + return ErrClosed + } + return nil +} + +// Close closes all open files related to the WAL. The WAL is in an invalid +// state and should not be used again after this is called. It is safe (though a +// no-op) to call it multiple times and concurrent reads and writes will either +// complete safely or get ErrClosed returned depending on sequencing. Generally +// reads and writes should be stopped before calling this to avoid propagating +// errors to users during shutdown but it's safe from a data-race perspective. +func (w *WAL) Close() error { + if old := atomic.SwapUint32(&w.closed, 1); old != 0 { + // Only close once + return nil + } + + // Wait for writes + w.writeMu.Lock() + defer w.writeMu.Unlock() + + // It doesn't matter if there is a rotation scheduled because runRotate will + // exist when it sees we are closed anyway. + w.awaitRotate = nil + // Awake and terminate the runRotate + close(w.triggerRotate) + + // Replace state with nil state + s := w.loadState() + s.acquire() + defer s.release() + + w.s.Store(&state{}) + + // Old state might be still in use by readers, attach closers to all open + // segment files. + toClose := make([]io.Closer, 0, s.segments.Len()) + it := s.segments.Iterator() + for !it.Done() { + _, seg, _ := it.Next() + if seg.r != nil { + toClose = append(toClose, seg.r) + } + } + // Store finalizer to run once all readers are done. There can't be an + // existing finalizer since this was the active state read under a write + // lock and finalizers are only set on states that have been replaced under + // that same lock. + s.finalizer.Store(func() { + w.closeSegments(toClose) + }) + + return w.metaDB.Close() +} + +// IsMonotonic implements raft.MonotonicLogStore and informs the raft library +// that this store will only allow consecutive log indexes with no gaps. +func (w *WAL) IsMonotonic() bool { + return true +} diff --git a/vendor/github.com/hashicorp/raft/.gitignore b/vendor/github.com/hashicorp/raft/.gitignore new file mode 100644 index 0000000000000..836562412fe8a --- /dev/null +++ b/vendor/github.com/hashicorp/raft/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/raft/.gitmodules b/vendor/github.com/hashicorp/raft/.gitmodules new file mode 100644 index 0000000000000..cbcd5cc91bddc --- /dev/null +++ b/vendor/github.com/hashicorp/raft/.gitmodules @@ -0,0 +1,3 @@ +[submodule "raft-compat/raft-latest"] + path = raft-compat/raft-previous-version + url = https://github.com/hashicorp/raft.git diff --git a/vendor/github.com/hashicorp/raft/.golangci-lint.yml b/vendor/github.com/hashicorp/raft/.golangci-lint.yml new file mode 100644 index 0000000000000..5f2a2d9f3286f --- /dev/null +++ b/vendor/github.com/hashicorp/raft/.golangci-lint.yml @@ -0,0 +1,52 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +run: + deadline: 5m + +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + +linters: + disable-all: true + enable: + - gofmt + #- golint + - govet + #- varcheck + #- typecheck + #- gosimple + +issues: + exclude-use-default: false + exclude: + # ignore the false positive erros resulting from not including a comment above every `package` keyword + - should have a package comment, unless it's in another file for this package (golint) + # golint: Annoying issue about not having a comment. The rare codebase has such comments + # - (comment on exported (method|function|type|const)|should have( a package)? comment|comment should be of the form) + # errcheck: Almost all programs ignore errors on these functions and in most cases it's ok + - Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked + + # golint: False positive when tests are defined in package 'test' + - func name will be used as test\.Test.* by other packages, and that stutters; consider calling this + + # staticcheck: Developers tend to write in C-style with an + # explicit 'break' in a 'switch', so it's ok to ignore + - ineffective break statement. Did you mean to break out of the outer loop + # gosec: Too many false-positives on 'unsafe' usage + - Use of unsafe calls should be audited + + # gosec: Too many false-positives for parametrized shell calls + - Subprocess launch(ed with variable|ing should be audited) + + # gosec: Duplicated errcheck checks + - G104 + + # gosec: Too many issues in popular repos + - (Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less) + + # gosec: False positive is triggered by 'src, err := ioutil.ReadFile(filename)' + - Potential file inclusion via variable diff --git a/vendor/github.com/hashicorp/raft/.travis.yml b/vendor/github.com/hashicorp/raft/.travis.yml new file mode 100644 index 0000000000000..f214436caa6a3 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/.travis.yml @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +language: go + +go: + # Disabled until https://github.com/armon/go-metrics/issues/59 is fixed + # - 1.6 + - 1.8 + - 1.9 + - 1.12 + - tip + +install: + - make deps + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin latest + +script: + - make integ + +notifications: + flowdock: + secure: fZrcf9rlh2IrQrlch1sHkn3YI7SKvjGnAl/zyV5D6NROe1Bbr6d3QRMuCXWWdhJHzjKmXk5rIzbqJhUc0PNF7YjxGNKSzqWMQ56KcvN1k8DzlqxpqkcA3Jbs6fXCWo2fssRtZ7hj/wOP1f5n6cc7kzHDt9dgaYJ6nO2fqNPJiTc= + diff --git a/vendor/github.com/hashicorp/raft/CHANGELOG.md b/vendor/github.com/hashicorp/raft/CHANGELOG.md new file mode 100644 index 0000000000000..b0fef7eb6466d --- /dev/null +++ b/vendor/github.com/hashicorp/raft/CHANGELOG.md @@ -0,0 +1,169 @@ +# UNRELEASED + +# 1.7.0 (June 5th, 2024) + +CHANGES + +* Raft multi version testing [GH-559](https://github.com/hashicorp/raft/pull/559) + +IMPROVEMENTS + +* Raft pre-vote extension implementation, activated by default. [GH-530](https://github.com/hashicorp/raft/pull/530) + +BUG FIXES + +* Fix serialize NetworkTransport data race on ServerAddr(). [GH-591](https://github.com/hashicorp/raft/pull/591) + +# 1.6.1 (January 8th, 2024) + +CHANGES + +* Add reference use of Hashicorp Raft. [GH-584](https://github.com/hashicorp/raft/pull/584) +* [COMPLIANCE] Add Copyright and License Headers. [GH-580](https://github.com/hashicorp/raft/pull/580) + +IMPROVEMENTS + +* Bump github.com/hashicorp/go-hclog from 1.5.0 to 1.6.2. [GH-583](https://github.com/hashicorp/raft/pull/583) + +BUG FIXES + +* Fix rare leadership transfer failures when writes happen during transfer. [GH-581](https://github.com/hashicorp/raft/pull/581) + +# 1.6.0 (November 15th, 2023) + +CHANGES + +* Upgrade hashicorp/go-msgpack to v2, with go.mod upgraded from v0.5.5 to v2.1.1. [GH-577](https://github.com/hashicorp/raft/pull/577) + + go-msgpack v2.1.1 is by default binary compatible with v0.5.5 ("non-builtin" encoding of `time.Time`), but can decode messages produced by v1.1.5 as well ("builtin" encoding of `time.Time`). + + However, if users of this libary overrode the version of go-msgpack (especially to v1), this **could break** compatibility if raft nodes are running a mix of versions. + + This compatibility can be configured at runtime in Raft using `NetworkTransportConfig.MsgpackUseNewTimeFormat` -- the default is `false`, which maintains compatibility with `go-msgpack` v0.5.5, but if set to `true`, will be compatible with `go-msgpack` v1.1.5. + +IMPROVEMENTS + +* Push to notify channel when shutting down. [GH-567](https://github.com/hashicorp/raft/pull/567) +* Add CommitIndex API [GH-560](https://github.com/hashicorp/raft/pull/560) +* Document some Apply error cases better [GH-561](https://github.com/hashicorp/raft/pull/561) + +BUG FIXES + +* Race with `candidateFromLeadershipTransfer` [GH-570](https://github.com/hashicorp/raft/pull/570) + + +# 1.5.0 (April 21st, 2023) + +IMPROVEMENTS +* Fixed a performance anomaly related to pipelining RPCs that caused large increases in commit latency under high write throughput. Default behavior has changed. For more information see #541. + +# 1.4.0 (March 17th, 2023) + +FEATURES +* Support log stores with a monotonically increasing index. Implementing a log store with the `MonotonicLogStore` interface where `IsMonotonic()` returns true will allow Raft to clear all previous logs on user restores of Raft snapshots. + +BUG FIXES +* Restoring a snapshot with the raft-wal log store caused a panic due to index gap that is created during snapshot restores. + +# 1.3.0 (April 22nd, 2021) + +IMPROVEMENTS + +* Added metrics for `oldestLogAge` and `lastRestoreDuration` to monitor capacity issues that can cause unrecoverable cluster failure [[GH-452](https://github.com/hashicorp/raft/pull/452)][[GH-454](https://github.com/hashicorp/raft/pull/454/files)] +* Made `TrailingLogs`, `SnapshotInterval` and `SnapshotThreshold` reloadable at runtime using a new `ReloadConfig` method. This allows recovery from cases where there are not enough logs retained for followers to catchup after a restart. [[GH-444](https://github.com/hashicorp/raft/pull/444)] +* Inclusify the repository by switching to main [[GH-446](https://github.com/hashicorp/raft/pull/446)] +* Add option for a buffered `ApplyCh` if `MaxAppendEntries` is enabled [[GH-445](https://github.com/hashicorp/raft/pull/445)] +* Add string to `LogType` for more human readable debugging [[GH-442](https://github.com/hashicorp/raft/pull/442)] +* Extract fuzzy testing into its own module [[GH-459](https://github.com/hashicorp/raft/pull/459)] + +BUG FIXES +* Update LogCache `StoreLogs()` to capture an error that would previously cause a panic [[GH-460](https://github.com/hashicorp/raft/pull/460)] + +# 1.2.0 (October 5th, 2020) + +IMPROVEMENTS + +* Remove `StartAsLeader` configuration option [[GH-364](https://github.com/hashicorp/raft/pull/386)] +* Allow futures to react to `Shutdown()` to prevent a deadlock with `takeSnapshot()` [[GH-390](https://github.com/hashicorp/raft/pull/390)] +* Prevent non-voters from becoming eligible for leadership elections [[GH-398](https://github.com/hashicorp/raft/pull/398)] +* Remove an unneeded `io.Copy` from snapshot writes [[GH-399](https://github.com/hashicorp/raft/pull/399)] +* Log decoded candidate address in `duplicate requestVote` warning [[GH-400](https://github.com/hashicorp/raft/pull/400)] +* Prevent starting a TCP transport when IP address is `nil` [[GH-403](https://github.com/hashicorp/raft/pull/403)] +* Reject leadership transfer requests when in candidate state to prevent indefinite blocking while unable to elect a leader [[GH-413](https://github.com/hashicorp/raft/pull/413)] +* Add labels for metric metadata to reduce cardinality of metric names [[GH-409](https://github.com/hashicorp/raft/pull/409)] +* Add peers metric [[GH-413](https://github.com/hashicorp/raft/pull/431)] + +BUG FIXES + +* Make `LeaderCh` always deliver the latest leadership transition [[GH-384](https://github.com/hashicorp/raft/pull/384)] +* Handle updating an existing peer in `startStopReplication` [[GH-419](https://github.com/hashicorp/raft/pull/419)] + +# 1.1.2 (January 17th, 2020) + +FEATURES + +* Improve FSM apply performance through batching. Implementing the `BatchingFSM` interface enables this new feature [[GH-364](https://github.com/hashicorp/raft/pull/364)] +* Add ability to obtain Raft configuration before Raft starts with GetConfiguration [[GH-369](https://github.com/hashicorp/raft/pull/369)] + +IMPROVEMENTS + +* Remove lint violations and add a `make` rule for running the linter. +* Replace logger with hclog [[GH-360](https://github.com/hashicorp/raft/pull/360)] +* Read latest configuration independently from main loop [[GH-379](https://github.com/hashicorp/raft/pull/379)] + +BUG FIXES + +* Export the leader field in LeaderObservation [[GH-357](https://github.com/hashicorp/raft/pull/357)] +* Fix snapshot to not attempt to truncate a negative range [[GH-358](https://github.com/hashicorp/raft/pull/358)] +* Check for shutdown in inmemPipeline before sending RPCs [[GH-276](https://github.com/hashicorp/raft/pull/276)] + +# 1.1.1 (July 23rd, 2019) + +FEATURES + +* Add support for extensions to be sent on log entries [[GH-353](https://github.com/hashicorp/raft/pull/353)] +* Add config option to skip snapshot restore on startup [[GH-340](https://github.com/hashicorp/raft/pull/340)] +* Add optional configuration store interface [[GH-339](https://github.com/hashicorp/raft/pull/339)] + +IMPROVEMENTS + +* Break out of group commit early when no logs are present [[GH-341](https://github.com/hashicorp/raft/pull/341)] + +BUGFIXES + +* Fix 64-bit counters on 32-bit platforms [[GH-344](https://github.com/hashicorp/raft/pull/344)] +* Don't defer closing source in recover/restore operations since it's in a loop [[GH-337](https://github.com/hashicorp/raft/pull/337)] + +# 1.1.0 (May 23rd, 2019) + +FEATURES + +* Add transfer leadership extension [[GH-306](https://github.com/hashicorp/raft/pull/306)] + +IMPROVEMENTS + +* Move to `go mod` [[GH-323](https://github.com/hashicorp/consul/pull/323)] +* Leveled log [[GH-321](https://github.com/hashicorp/consul/pull/321)] +* Add peer changes to observations [[GH-326](https://github.com/hashicorp/consul/pull/326)] + +BUGFIXES + +* Copy the contents of an InmemSnapshotStore when opening a snapshot [[GH-270](https://github.com/hashicorp/consul/pull/270)] +* Fix logging panic when converting parameters to strings [[GH-332](https://github.com/hashicorp/consul/pull/332)] + +# 1.0.1 (April 12th, 2019) + +IMPROVEMENTS + +* InMemTransport: Add timeout for sending a message [[GH-313](https://github.com/hashicorp/raft/pull/313)] +* ensure 'make deps' downloads test dependencies like testify [[GH-310](https://github.com/hashicorp/raft/pull/310)] +* Clarifies function of CommitTimeout [[GH-309](https://github.com/hashicorp/raft/pull/309)] +* Add additional metrics regarding log dispatching and committal [[GH-316](https://github.com/hashicorp/raft/pull/316)] + +# 1.0.0 (October 3rd, 2017) + +v1.0.0 takes the changes that were staged in the library-v2-stage-one branch. This version manages server identities using a UUID, so introduces some breaking API changes. It also versions the Raft protocol, and requires some special steps when interoperating with Raft servers running older versions of the library (see the detailed comment in config.go about version compatibility). You can reference https://github.com/hashicorp/consul/pull/2222 for an idea of what was required to port Consul to these new interfaces. + +# 0.1.0 (September 29th, 2017) + +v0.1.0 is the original stable version of the library that was in main and has been maintained with no breaking API changes. This was in use by Consul prior to version 0.7.0. diff --git a/vendor/github.com/hashicorp/raft/LICENSE b/vendor/github.com/hashicorp/raft/LICENSE new file mode 100644 index 0000000000000..c72625e4cc88b --- /dev/null +++ b/vendor/github.com/hashicorp/raft/LICENSE @@ -0,0 +1,356 @@ +Copyright (c) 2013 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/raft/Makefile b/vendor/github.com/hashicorp/raft/Makefile new file mode 100644 index 0000000000000..c988f0ab2f4aa --- /dev/null +++ b/vendor/github.com/hashicorp/raft/Makefile @@ -0,0 +1,45 @@ +DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...) +ENV = $(shell go env GOPATH) +GO_VERSION = $(shell go version) +GOLANG_CI_VERSION = v1.19.0 + +# Look for versions prior to 1.10 which have a different fmt output +# and don't lint with gofmt against them. +ifneq (,$(findstring go version go1.8, $(GO_VERSION))) + FMT= +else ifneq (,$(findstring go version go1.9, $(GO_VERSION))) + FMT= +else + FMT=--enable gofmt +endif + +TEST_RESULTS_DIR?=/tmp/test-results + +test: + GOTRACEBACK=all go test $(TESTARGS) -timeout=180s -race . + GOTRACEBACK=all go test $(TESTARGS) -timeout=180s -tags batchtest -race . + +integ: test + INTEG_TESTS=yes go test $(TESTARGS) -timeout=60s -run=Integ . + INTEG_TESTS=yes go test $(TESTARGS) -timeout=60s -tags batchtest -run=Integ . + +fuzz: + cd ./fuzzy && go test $(TESTARGS) -timeout=20m . + cd ./fuzzy && go test $(TESTARGS) -timeout=20m -tags batchtest . + +deps: + go get -t -d -v ./... + echo $(DEPS) | xargs -n1 go get -d + +lint: + gofmt -s -w . + golangci-lint run -c .golangci-lint.yml $(FMT) . + +dep-linter: + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(ENV)/bin $(GOLANG_CI_VERSION) + +cov: + INTEG_TESTS=yes gocov test github.com/hashicorp/raft | gocov-html > /tmp/coverage.html + open /tmp/coverage.html + +.PHONY: test cov integ deps dep-linter lint diff --git a/vendor/github.com/hashicorp/raft/README.md b/vendor/github.com/hashicorp/raft/README.md new file mode 100644 index 0000000000000..ded5bd02e8ff8 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/README.md @@ -0,0 +1,112 @@ +raft [![Build Status](https://github.com/hashicorp/raft/workflows/ci/badge.svg)](https://github.com/hashicorp/raft/actions) +==== + +raft is a [Go](http://www.golang.org) library that manages a replicated +log and can be used with an FSM to manage replicated state machines. It +is a library for providing [consensus](http://en.wikipedia.org/wiki/Consensus_(computer_science)). + +The use cases for such a library are far-reaching, such as replicated state +machines which are a key component of many distributed systems. They enable +building Consistent, Partition Tolerant (CP) systems, with limited +fault tolerance as well. + +## Building + +If you wish to build raft you'll need Go version 1.16+ installed. + +Please check your installation with: + +``` +go version +``` + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/raft). + +To prevent complications with cgo, the primary backend `MDBStore` is in a separate repository, +called [raft-mdb](http://github.com/hashicorp/raft-mdb). That is the recommended implementation +for the `LogStore` and `StableStore`. + +A pure Go backend using [Bbolt](https://github.com/etcd-io/bbolt) is also available called +[raft-boltdb](https://github.com/hashicorp/raft-boltdb). It can also be used as a `LogStore` +and `StableStore`. + + +## Community Contributed Examples +- [Raft gRPC Example](https://github.com/Jille/raft-grpc-example) - Utilizing the Raft repository with gRPC +- [Raft-based KV-store Example](https://github.com/otoolep/hraftd) - Uses Hashicorp Raft to build a distributed key-value store + + +## Tagged Releases + +As of September 2017, HashiCorp will start using tags for this library to clearly indicate +major version updates. We recommend you vendor your application's dependency on this library. + +* v0.1.0 is the original stable version of the library that was in main and has been maintained +with no breaking API changes. This was in use by Consul prior to version 0.7.0. + +* v1.0.0 takes the changes that were staged in the library-v2-stage-one branch. This version +manages server identities using a UUID, so introduces some breaking API changes. It also versions +the Raft protocol, and requires some special steps when interoperating with Raft servers running +older versions of the library (see the detailed comment in config.go about version compatibility). +You can reference https://github.com/hashicorp/consul/pull/2222 for an idea of what was required +to port Consul to these new interfaces. + + This version includes some new features as well, including non voting servers, a new address + provider abstraction in the transport layer, and more resilient snapshots. + +## Protocol + +raft is based on ["Raft: In Search of an Understandable Consensus Algorithm"](https://raft.github.io/raft.pdf) + +A high level overview of the Raft protocol is described below, but for details please read the full +[Raft paper](https://raft.github.io/raft.pdf) +followed by the raft source. Any questions about the raft protocol should be sent to the +[raft-dev mailing list](https://groups.google.com/forum/#!forum/raft-dev). + +### Protocol Description + +Raft nodes are always in one of three states: follower, candidate or leader. All +nodes initially start out as a follower. In this state, nodes can accept log entries +from a leader and cast votes. If no entries are received for some time, nodes +self-promote to the candidate state. In the candidate state nodes request votes from +their peers. If a candidate receives a quorum of votes, then it is promoted to a leader. +The leader must accept new log entries and replicate to all the other followers. +In addition, if stale reads are not acceptable, all queries must also be performed on +the leader. + +Once a cluster has a leader, it is able to accept new log entries. A client can +request that a leader append a new log entry, which is an opaque binary blob to +Raft. The leader then writes the entry to durable storage and attempts to replicate +to a quorum of followers. Once the log entry is considered *committed*, it can be +*applied* to a finite state machine. The finite state machine is application specific, +and is implemented using an interface. + +An obvious question relates to the unbounded nature of a replicated log. Raft provides +a mechanism by which the current state is snapshotted, and the log is compacted. Because +of the FSM abstraction, restoring the state of the FSM must result in the same state +as a replay of old logs. This allows Raft to capture the FSM state at a point in time, +and then remove all the logs that were used to reach that state. This is performed automatically +without user intervention, and prevents unbounded disk usage as well as minimizing +time spent replaying logs. + +Lastly, there is the issue of updating the peer set when new servers are joining +or existing servers are leaving. As long as a quorum of nodes is available, this +is not an issue as Raft provides mechanisms to dynamically update the peer set. +If a quorum of nodes is unavailable, then this becomes a very challenging issue. +For example, suppose there are only 2 peers, A and B. The quorum size is also +2, meaning both nodes must agree to commit a log entry. If either A or B fails, +it is now impossible to reach quorum. This means the cluster is unable to add, +or remove a node, or commit any additional log entries. This results in *unavailability*. +At this point, manual intervention would be required to remove either A or B, +and to restart the remaining node in bootstrap mode. + +A Raft cluster of 3 nodes can tolerate a single node failure, while a cluster +of 5 can tolerate 2 node failures. The recommended configuration is to either +run 3 or 5 raft servers. This maximizes availability without +greatly sacrificing performance. + +In terms of performance, Raft is comparable to Paxos. Assuming stable leadership, +committing a log entry requires a single round trip to half of the cluster. +Thus performance is bound by disk I/O and network latency. diff --git a/vendor/github.com/hashicorp/raft/api.go b/vendor/github.com/hashicorp/raft/api.go new file mode 100644 index 0000000000000..cff2eaac224bc --- /dev/null +++ b/vendor/github.com/hashicorp/raft/api.go @@ -0,0 +1,1268 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "errors" + "fmt" + "io" + "strconv" + "sync" + "sync/atomic" + "time" + + metrics "github.com/armon/go-metrics" + hclog "github.com/hashicorp/go-hclog" +) + +const ( + // SuggestedMaxDataSize of the data in a raft log entry, in bytes. + // + // The value is based on current architecture, default timing, etc. Clients can + // ignore this value if they want as there is no actual hard checking + // within the library. As the library is enhanced this value may change + // over time to reflect current suggested maximums. + // + // Applying log entries with data greater than this size risks RPC IO taking + // too long and preventing timely heartbeat signals. These signals are sent in serial + // in current transports, potentially causing leadership instability. + SuggestedMaxDataSize = 512 * 1024 +) + +var ( + // ErrLeader is returned when an operation can't be completed on a + // leader node. + ErrLeader = errors.New("node is the leader") + + // ErrNotLeader is returned when an operation can't be completed on a + // follower or candidate node. + ErrNotLeader = errors.New("node is not the leader") + + // ErrNotVoter is returned when an operation can't be completed on a + // non-voter node. + ErrNotVoter = errors.New("node is not a voter") + + // ErrLeadershipLost is returned when a leader fails to commit a log entry + // because it's been deposed in the process. + ErrLeadershipLost = errors.New("leadership lost while committing log") + + // ErrAbortedByRestore is returned when a leader fails to commit a log + // entry because it's been superseded by a user snapshot restore. + ErrAbortedByRestore = errors.New("snapshot restored while committing log") + + // ErrRaftShutdown is returned when operations are requested against an + // inactive Raft. + ErrRaftShutdown = errors.New("raft is already shutdown") + + // ErrEnqueueTimeout is returned when a command fails due to a timeout. + ErrEnqueueTimeout = errors.New("timed out enqueuing operation") + + // ErrNothingNewToSnapshot is returned when trying to create a snapshot + // but there's nothing new commited to the FSM since we started. + ErrNothingNewToSnapshot = errors.New("nothing new to snapshot") + + // ErrUnsupportedProtocol is returned when an operation is attempted + // that's not supported by the current protocol version. + ErrUnsupportedProtocol = errors.New("operation not supported with current protocol version") + + // ErrCantBootstrap is returned when attempt is made to bootstrap a + // cluster that already has state present. + ErrCantBootstrap = errors.New("bootstrap only works on new clusters") + + // ErrLeadershipTransferInProgress is returned when the leader is rejecting + // client requests because it is attempting to transfer leadership. + ErrLeadershipTransferInProgress = errors.New("leadership transfer in progress") +) + +// Raft implements a Raft node. +type Raft struct { + raftState + + // protocolVersion is used to inter-operate with Raft servers running + // different versions of the library. See comments in config.go for more + // details. + protocolVersion ProtocolVersion + + // applyCh is used to async send logs to the main thread to + // be committed and applied to the FSM. + applyCh chan *logFuture + + // conf stores the current configuration to use. This is the most recent one + // provided. All reads of config values should use the config() helper method + // to read this safely. + conf atomic.Value + + // confReloadMu ensures that only one thread can reload config at once since + // we need to read-modify-write the atomic. It is NOT necessary to hold this + // for any other operation e.g. reading config using config(). + confReloadMu sync.Mutex + + // FSM is the client state machine to apply commands to + fsm FSM + + // fsmMutateCh is used to send state-changing updates to the FSM. This + // receives pointers to commitTuple structures when applying logs or + // pointers to restoreFuture structures when restoring a snapshot. We + // need control over the order of these operations when doing user + // restores so that we finish applying any old log applies before we + // take a user snapshot on the leader, otherwise we might restore the + // snapshot and apply old logs to it that were in the pipe. + fsmMutateCh chan interface{} + + // fsmSnapshotCh is used to trigger a new snapshot being taken + fsmSnapshotCh chan *reqSnapshotFuture + + // lastContact is the last time we had contact from the + // leader node. This can be used to gauge staleness. + lastContact time.Time + lastContactLock sync.RWMutex + + // leaderAddr is the current cluster leader Address + leaderAddr ServerAddress + // LeaderID is the current cluster leader ID + leaderID ServerID + leaderLock sync.RWMutex + + // leaderCh is used to notify of leadership changes + leaderCh chan bool + + // leaderState used only while state is leader + leaderState leaderState + + // candidateFromLeadershipTransfer is used to indicate that this server became + // candidate because the leader tries to transfer leadership. This flag is + // used in RequestVoteRequest to express that a leadership transfer is going + // on. + candidateFromLeadershipTransfer atomic.Bool + + // Stores our local server ID, used to avoid sending RPCs to ourself + localID ServerID + + // Stores our local addr + localAddr ServerAddress + + // Used for our logging + logger hclog.Logger + + // LogStore provides durable storage for logs + logs LogStore + + // Used to request the leader to make configuration changes. + configurationChangeCh chan *configurationChangeFuture + + // Tracks the latest configuration and latest committed configuration from + // the log/snapshot. + configurations configurations + + // Holds a copy of the latest configuration which can be read independently + // of the main loop. + latestConfiguration atomic.Value + + // RPC chan comes from the transport layer + rpcCh <-chan RPC + + // Shutdown channel to exit, protected to prevent concurrent exits + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + + // snapshots is used to store and retrieve snapshots + snapshots SnapshotStore + + // userSnapshotCh is used for user-triggered snapshots + userSnapshotCh chan *userSnapshotFuture + + // userRestoreCh is used for user-triggered restores of external + // snapshots + userRestoreCh chan *userRestoreFuture + + // stable is a StableStore implementation for durable state + // It provides stable storage for many fields in raftState + stable StableStore + + // The transport layer we use + trans Transport + + // verifyCh is used to async send verify futures to the main thread + // to verify we are still the leader + verifyCh chan *verifyFuture + + // configurationsCh is used to get the configuration data safely from + // outside of the main thread. + configurationsCh chan *configurationsFuture + + // bootstrapCh is used to attempt an initial bootstrap from outside of + // the main thread. + bootstrapCh chan *bootstrapFuture + + // List of observers and the mutex that protects them. The observers list + // is indexed by an artificial ID which is used for deregistration. + observersLock sync.RWMutex + observers map[uint64]*Observer + + // leadershipTransferCh is used to start a leadership transfer from outside of + // the main thread. + leadershipTransferCh chan *leadershipTransferFuture + + // leaderNotifyCh is used to tell leader that config has changed + leaderNotifyCh chan struct{} + + // followerNotifyCh is used to tell followers that config has changed + followerNotifyCh chan struct{} + + // mainThreadSaturation measures the saturation of the main raft goroutine. + mainThreadSaturation *saturationMetric + + // preVoteDisabled control if the pre-vote feature is activated, + // prevote feature is disabled if set to true. + preVoteDisabled bool +} + +// BootstrapCluster initializes a server's storage with the given cluster +// configuration. This should only be called at the beginning of time for the +// cluster with an identical configuration listing all Voter servers. There is +// no need to bootstrap Nonvoter and Staging servers. +// +// A cluster can only be bootstrapped once from a single participating Voter +// server. Any further attempts to bootstrap will return an error that can be +// safely ignored. +// +// One approach is to bootstrap a single server with a configuration +// listing just itself as a Voter, then invoke AddVoter() on it to add other +// servers to the cluster. +func BootstrapCluster(conf *Config, logs LogStore, stable StableStore, + snaps SnapshotStore, trans Transport, configuration Configuration) error { + // Validate the Raft server config. + if err := ValidateConfig(conf); err != nil { + return err + } + + // Sanity check the Raft peer configuration. + if err := checkConfiguration(configuration); err != nil { + return err + } + + // Make sure the cluster is in a clean state. + hasState, err := HasExistingState(logs, stable, snaps) + if err != nil { + return fmt.Errorf("failed to check for existing state: %v", err) + } + if hasState { + return ErrCantBootstrap + } + + // Set current term to 1. + if err := stable.SetUint64(keyCurrentTerm, 1); err != nil { + return fmt.Errorf("failed to save current term: %v", err) + } + + // Append configuration entry to log. + entry := &Log{ + Index: 1, + Term: 1, + } + if conf.ProtocolVersion < 3 { + entry.Type = LogRemovePeerDeprecated + entry.Data = encodePeers(configuration, trans) + } else { + entry.Type = LogConfiguration + entry.Data = EncodeConfiguration(configuration) + } + if err := logs.StoreLog(entry); err != nil { + return fmt.Errorf("failed to append configuration entry to log: %v", err) + } + + return nil +} + +// RecoverCluster is used to manually force a new configuration in order to +// recover from a loss of quorum where the current configuration cannot be +// restored, such as when several servers die at the same time. This works by +// reading all the current state for this server, creating a snapshot with the +// supplied configuration, and then truncating the Raft log. This is the only +// safe way to force a given configuration without actually altering the log to +// insert any new entries, which could cause conflicts with other servers with +// different state. +// +// WARNING! This operation implicitly commits all entries in the Raft log, so +// in general this is an extremely unsafe operation. If you've lost your other +// servers and are performing a manual recovery, then you've also lost the +// commit information, so this is likely the best you can do, but you should be +// aware that calling this can cause Raft log entries that were in the process +// of being replicated but not yet be committed to be committed. +// +// Note the FSM passed here is used for the snapshot operations and will be +// left in a state that should not be used by the application. Be sure to +// discard this FSM and any associated state and provide a fresh one when +// calling NewRaft later. +// +// A typical way to recover the cluster is to shut down all servers and then +// run RecoverCluster on every server using an identical configuration. When +// the cluster is then restarted, and election should occur and then Raft will +// resume normal operation. If it's desired to make a particular server the +// leader, this can be used to inject a new configuration with that server as +// the sole voter, and then join up other new clean-state peer servers using +// the usual APIs in order to bring the cluster back into a known state. +func RecoverCluster(conf *Config, fsm FSM, logs LogStore, stable StableStore, + snaps SnapshotStore, trans Transport, configuration Configuration) error { + // Validate the Raft server config. + if err := ValidateConfig(conf); err != nil { + return err + } + + // Sanity check the Raft peer configuration. + if err := checkConfiguration(configuration); err != nil { + return err + } + + // Refuse to recover if there's no existing state. This would be safe to + // do, but it is likely an indication of an operator error where they + // expect data to be there and it's not. By refusing, we force them + // to show intent to start a cluster fresh by explicitly doing a + // bootstrap, rather than quietly fire up a fresh cluster here. + if hasState, err := HasExistingState(logs, stable, snaps); err != nil { + return fmt.Errorf("failed to check for existing state: %v", err) + } else if !hasState { + return fmt.Errorf("refused to recover cluster with no initial state, this is probably an operator error") + } + + // Attempt to restore any snapshots we find, newest to oldest. + var ( + snapshotIndex uint64 + snapshotTerm uint64 + snapshots, err = snaps.List() + ) + if err != nil { + return fmt.Errorf("failed to list snapshots: %v", err) + } + + logger := conf.getOrCreateLogger() + + for _, snapshot := range snapshots { + var source io.ReadCloser + _, source, err = snaps.Open(snapshot.ID) + if err != nil { + // Skip this one and try the next. We will detect if we + // couldn't open any snapshots. + continue + } + + // Note this is the one place we call fsm.Restore without the + // fsmRestoreAndMeasure wrapper since this function should only be called to + // reset state on disk and the FSM passed will not be used for a running + // server instance. If the same process will eventually become a Raft peer + // then it will call NewRaft and restore again from disk then which will + // report metrics. + snapLogger := logger.With( + "id", snapshot.ID, + "last-index", snapshot.Index, + "last-term", snapshot.Term, + "size-in-bytes", snapshot.Size, + ) + crc := newCountingReadCloser(source) + monitor := startSnapshotRestoreMonitor(snapLogger, crc, snapshot.Size, false) + err = fsm.Restore(crc) + // Close the source after the restore has completed + source.Close() + monitor.StopAndWait() + if err != nil { + // Same here, skip and try the next one. + continue + } + + snapshotIndex = snapshot.Index + snapshotTerm = snapshot.Term + break + } + if len(snapshots) > 0 && (snapshotIndex == 0 || snapshotTerm == 0) { + return fmt.Errorf("failed to restore any of the available snapshots") + } + + // The snapshot information is the best known end point for the data + // until we play back the Raft log entries. + lastIndex := snapshotIndex + lastTerm := snapshotTerm + + // Apply any Raft log entries past the snapshot. + lastLogIndex, err := logs.LastIndex() + if err != nil { + return fmt.Errorf("failed to find last log: %v", err) + } + for index := snapshotIndex + 1; index <= lastLogIndex; index++ { + var entry Log + if err = logs.GetLog(index, &entry); err != nil { + return fmt.Errorf("failed to get log at index %d: %v", index, err) + } + if entry.Type == LogCommand { + _ = fsm.Apply(&entry) + } + lastIndex = entry.Index + lastTerm = entry.Term + } + + // Create a new snapshot, placing the configuration in as if it was + // committed at index 1. + snapshot, err := fsm.Snapshot() + if err != nil { + return fmt.Errorf("failed to snapshot FSM: %v", err) + } + version := getSnapshotVersion(conf.ProtocolVersion) + sink, err := snaps.Create(version, lastIndex, lastTerm, configuration, 1, trans) + if err != nil { + return fmt.Errorf("failed to create snapshot: %v", err) + } + if err = snapshot.Persist(sink); err != nil { + return fmt.Errorf("failed to persist snapshot: %v", err) + } + if err = sink.Close(); err != nil { + return fmt.Errorf("failed to finalize snapshot: %v", err) + } + + // Compact the log so that we don't get bad interference from any + // configuration change log entries that might be there. + firstLogIndex, err := logs.FirstIndex() + if err != nil { + return fmt.Errorf("failed to get first log index: %v", err) + } + if err := logs.DeleteRange(firstLogIndex, lastLogIndex); err != nil { + return fmt.Errorf("log compaction failed: %v", err) + } + + return nil +} + +// GetConfiguration returns the persisted configuration of the Raft cluster +// without starting a Raft instance or connecting to the cluster. This function +// has identical behavior to Raft.GetConfiguration. +func GetConfiguration(conf *Config, fsm FSM, logs LogStore, stable StableStore, + snaps SnapshotStore, trans Transport) (Configuration, error) { + conf.skipStartup = true + r, err := NewRaft(conf, fsm, logs, stable, snaps, trans) + if err != nil { + return Configuration{}, err + } + future := r.GetConfiguration() + if err = future.Error(); err != nil { + return Configuration{}, err + } + return future.Configuration(), nil +} + +// HasExistingState returns true if the server has any existing state (logs, +// knowledge of a current term, or any snapshots). +func HasExistingState(logs LogStore, stable StableStore, snaps SnapshotStore) (bool, error) { + // Make sure we don't have a current term. + currentTerm, err := stable.GetUint64(keyCurrentTerm) + if err == nil { + if currentTerm > 0 { + return true, nil + } + } else { + if err.Error() != "not found" { + return false, fmt.Errorf("failed to read current term: %v", err) + } + } + + // Make sure we have an empty log. + lastIndex, err := logs.LastIndex() + if err != nil { + return false, fmt.Errorf("failed to get last log index: %v", err) + } + if lastIndex > 0 { + return true, nil + } + + // Make sure we have no snapshots + snapshots, err := snaps.List() + if err != nil { + return false, fmt.Errorf("failed to list snapshots: %v", err) + } + if len(snapshots) > 0 { + return true, nil + } + + return false, nil +} + +// NewRaft is used to construct a new Raft node. It takes a configuration, as well +// as implementations of various interfaces that are required. If we have any +// old state, such as snapshots, logs, peers, etc, all those will be restored +// when creating the Raft node. +func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps SnapshotStore, trans Transport) (*Raft, error) { + // Validate the configuration. + if err := ValidateConfig(conf); err != nil { + return nil, err + } + + // Ensure we have a LogOutput. + logger := conf.getOrCreateLogger() + + // Try to restore the current term. + currentTerm, err := stable.GetUint64(keyCurrentTerm) + if err != nil && err.Error() != "not found" { + return nil, fmt.Errorf("failed to load current term: %v", err) + } + + // Read the index of the last log entry. + lastIndex, err := logs.LastIndex() + if err != nil { + return nil, fmt.Errorf("failed to find last log: %v", err) + } + + // Get the last log entry. + var lastLog Log + if lastIndex > 0 { + if err = logs.GetLog(lastIndex, &lastLog); err != nil { + return nil, fmt.Errorf("failed to get last log at index %d: %v", lastIndex, err) + } + } + + // Make sure we have a valid server address and ID. + protocolVersion := conf.ProtocolVersion + localAddr := trans.LocalAddr() + localID := conf.LocalID + + // TODO (slackpad) - When we deprecate protocol version 2, remove this + // along with the AddPeer() and RemovePeer() APIs. + if protocolVersion < 3 && string(localID) != string(localAddr) { + return nil, fmt.Errorf("when running with ProtocolVersion < 3, LocalID must be set to the network address") + } + + // Buffer applyCh to MaxAppendEntries if the option is enabled + applyCh := make(chan *logFuture) + if conf.BatchApplyCh { + applyCh = make(chan *logFuture, conf.MaxAppendEntries) + } + + _, transportSupportPreVote := trans.(WithPreVote) + // Create Raft struct. + r := &Raft{ + protocolVersion: protocolVersion, + applyCh: applyCh, + fsm: fsm, + fsmMutateCh: make(chan interface{}, 128), + fsmSnapshotCh: make(chan *reqSnapshotFuture), + leaderCh: make(chan bool, 1), + localID: localID, + localAddr: localAddr, + logger: logger, + logs: logs, + configurationChangeCh: make(chan *configurationChangeFuture), + configurations: configurations{}, + rpcCh: trans.Consumer(), + snapshots: snaps, + userSnapshotCh: make(chan *userSnapshotFuture), + userRestoreCh: make(chan *userRestoreFuture), + shutdownCh: make(chan struct{}), + stable: stable, + trans: trans, + verifyCh: make(chan *verifyFuture, 64), + configurationsCh: make(chan *configurationsFuture, 8), + bootstrapCh: make(chan *bootstrapFuture), + observers: make(map[uint64]*Observer), + leadershipTransferCh: make(chan *leadershipTransferFuture, 1), + leaderNotifyCh: make(chan struct{}, 1), + followerNotifyCh: make(chan struct{}, 1), + mainThreadSaturation: newSaturationMetric([]string{"raft", "thread", "main", "saturation"}, 1*time.Second), + preVoteDisabled: conf.PreVoteDisabled || !transportSupportPreVote, + } + if !transportSupportPreVote && !conf.PreVoteDisabled { + r.logger.Warn("pre-vote is disabled because it is not supported by the Transport") + } + + r.conf.Store(*conf) + + // Initialize as a follower. + r.setState(Follower) + + // Restore the current term and the last log. + r.setCurrentTerm(currentTerm) + r.setLastLog(lastLog.Index, lastLog.Term) + + // Attempt to restore a snapshot if there are any. + if err := r.restoreSnapshot(); err != nil { + return nil, err + } + + // Scan through the log for any configuration change entries. + snapshotIndex, _ := r.getLastSnapshot() + for index := snapshotIndex + 1; index <= lastLog.Index; index++ { + var entry Log + if err := r.logs.GetLog(index, &entry); err != nil { + r.logger.Error("failed to get log", "index", index, "error", err) + panic(err) + } + if err := r.processConfigurationLogEntry(&entry); err != nil { + return nil, err + } + } + r.logger.Info("initial configuration", + "index", r.configurations.latestIndex, + "servers", hclog.Fmt("%+v", r.configurations.latest.Servers)) + + // Setup a heartbeat fast-path to avoid head-of-line + // blocking where possible. It MUST be safe for this + // to be called concurrently with a blocking RPC. + trans.SetHeartbeatHandler(r.processHeartbeat) + + if conf.skipStartup { + return r, nil + } + // Start the background work. + r.goFunc(r.run) + r.goFunc(r.runFSM) + r.goFunc(r.runSnapshots) + return r, nil +} + +// restoreSnapshot attempts to restore the latest snapshots, and fails if none +// of them can be restored. This is called at initialization time, and is +// completely unsafe to call at any other time. +func (r *Raft) restoreSnapshot() error { + snapshots, err := r.snapshots.List() + if err != nil { + r.logger.Error("failed to list snapshots", "error", err) + return err + } + + // Try to load in order of newest to oldest + for _, snapshot := range snapshots { + if success := r.tryRestoreSingleSnapshot(snapshot); !success { + continue + } + + // Update the lastApplied so we don't replay old logs + r.setLastApplied(snapshot.Index) + + // Update the last stable snapshot info + r.setLastSnapshot(snapshot.Index, snapshot.Term) + + // Update the configuration + var conf Configuration + var index uint64 + if snapshot.Version > 0 { + conf = snapshot.Configuration + index = snapshot.ConfigurationIndex + } else { + var err error + if conf, err = decodePeers(snapshot.Peers, r.trans); err != nil { + return err + } + index = snapshot.Index + } + r.setCommittedConfiguration(conf, index) + r.setLatestConfiguration(conf, index) + + // Success! + return nil + } + + // If we had snapshots and failed to load them, its an error + if len(snapshots) > 0 { + return fmt.Errorf("failed to load any existing snapshots") + } + return nil +} + +func (r *Raft) tryRestoreSingleSnapshot(snapshot *SnapshotMeta) bool { + if r.config().NoSnapshotRestoreOnStart { + return true + } + + snapLogger := r.logger.With( + "id", snapshot.ID, + "last-index", snapshot.Index, + "last-term", snapshot.Term, + "size-in-bytes", snapshot.Size, + ) + + snapLogger.Info("starting restore from snapshot") + + _, source, err := r.snapshots.Open(snapshot.ID) + if err != nil { + snapLogger.Error("failed to open snapshot", "error", err) + return false + } + + if err := fsmRestoreAndMeasure(snapLogger, r.fsm, source, snapshot.Size); err != nil { + source.Close() + snapLogger.Error("failed to restore snapshot", "error", err) + return false + } + source.Close() + + snapLogger.Info("restored from snapshot") + + return true +} + +func (r *Raft) config() Config { + return r.conf.Load().(Config) +} + +// ReloadConfig updates the configuration of a running raft node. If the new +// configuration is invalid an error is returned and no changes made to the +// instance. All fields will be copied from rc into the new configuration, even +// if they are zero valued. +func (r *Raft) ReloadConfig(rc ReloadableConfig) error { + r.confReloadMu.Lock() + defer r.confReloadMu.Unlock() + + // Load the current config (note we are under a lock so it can't be changed + // between this read and a later Store). + oldCfg := r.config() + + // Set the reloadable fields + newCfg := rc.apply(oldCfg) + + if err := ValidateConfig(&newCfg); err != nil { + return err + } + r.conf.Store(newCfg) + + if rc.HeartbeatTimeout < oldCfg.HeartbeatTimeout { + // On leader, ensure replication loops running with a longer + // timeout than what we want now discover the change. + asyncNotifyCh(r.leaderNotifyCh) + // On follower, update current timer to use the shorter new value. + asyncNotifyCh(r.followerNotifyCh) + } + return nil +} + +// ReloadableConfig returns the current state of the reloadable fields in Raft's +// configuration. This is useful for programs to discover the current state for +// reporting to users or tests. It is safe to call from any goroutine. It is +// intended for reporting and testing purposes primarily; external +// synchronization would be required to safely use this in a read-modify-write +// pattern for reloadable configuration options. +func (r *Raft) ReloadableConfig() ReloadableConfig { + cfg := r.config() + var rc ReloadableConfig + rc.fromConfig(cfg) + return rc +} + +// BootstrapCluster is equivalent to non-member BootstrapCluster but can be +// called on an un-bootstrapped Raft instance after it has been created. This +// should only be called at the beginning of time for the cluster with an +// identical configuration listing all Voter servers. There is no need to +// bootstrap Nonvoter and Staging servers. +// +// A cluster can only be bootstrapped once from a single participating Voter +// server. Any further attempts to bootstrap will return an error that can be +// safely ignored. +// +// One sane approach is to bootstrap a single server with a configuration +// listing just itself as a Voter, then invoke AddVoter() on it to add other +// servers to the cluster. +func (r *Raft) BootstrapCluster(configuration Configuration) Future { + bootstrapReq := &bootstrapFuture{} + bootstrapReq.init() + bootstrapReq.configuration = configuration + select { + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.bootstrapCh <- bootstrapReq: + return bootstrapReq + } +} + +// Leader is used to return the current leader of the cluster. +// Deprecated: use LeaderWithID instead +// It may return empty string if there is no current leader +// or the leader is unknown. +// Deprecated: use LeaderWithID instead. +func (r *Raft) Leader() ServerAddress { + r.leaderLock.RLock() + leaderAddr := r.leaderAddr + r.leaderLock.RUnlock() + return leaderAddr +} + +// LeaderWithID is used to return the current leader address and ID of the cluster. +// It may return empty strings if there is no current leader +// or the leader is unknown. +func (r *Raft) LeaderWithID() (ServerAddress, ServerID) { + r.leaderLock.RLock() + leaderAddr := r.leaderAddr + leaderID := r.leaderID + r.leaderLock.RUnlock() + return leaderAddr, leaderID +} + +// Apply is used to apply a command to the FSM in a highly consistent +// manner. This returns a future that can be used to wait on the application. +// An optional timeout can be provided to limit the amount of time we wait +// for the command to be started. This must be run on the leader or it +// will fail. +// +// If the node discovers it is no longer the leader while applying the command, +// it will return ErrLeadershipLost. There is no way to guarantee whether the +// write succeeded or failed in this case. For example, if the leader is +// partitioned it can't know if a quorum of followers wrote the log to disk. If +// at least one did, it may survive into the next leader's term. +// +// If a user snapshot is restored while the command is in-flight, an +// ErrAbortedByRestore is returned. In this case the write effectively failed +// since its effects will not be present in the FSM after the restore. +func (r *Raft) Apply(cmd []byte, timeout time.Duration) ApplyFuture { + return r.ApplyLog(Log{Data: cmd}, timeout) +} + +// ApplyLog performs Apply but takes in a Log directly. The only values +// currently taken from the submitted Log are Data and Extensions. See +// Apply for details on error cases. +func (r *Raft) ApplyLog(log Log, timeout time.Duration) ApplyFuture { + metrics.IncrCounter([]string{"raft", "apply"}, 1) + + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Create a log future, no index or term yet + logFuture := &logFuture{ + log: Log{ + Type: LogCommand, + Data: log.Data, + Extensions: log.Extensions, + }, + } + logFuture.init() + + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.applyCh <- logFuture: + return logFuture + } +} + +// Barrier is used to issue a command that blocks until all preceding +// operations have been applied to the FSM. It can be used to ensure the +// FSM reflects all queued writes. An optional timeout can be provided to +// limit the amount of time we wait for the command to be started. This +// must be run on the leader, or it will fail. +func (r *Raft) Barrier(timeout time.Duration) Future { + metrics.IncrCounter([]string{"raft", "barrier"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Create a log future, no index or term yet + logFuture := &logFuture{log: Log{Type: LogBarrier}} + logFuture.init() + + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.applyCh <- logFuture: + return logFuture + } +} + +// VerifyLeader is used to ensure this peer is still the leader. It may be used +// to prevent returning stale data from the FSM after the peer has lost +// leadership. +func (r *Raft) VerifyLeader() Future { + metrics.IncrCounter([]string{"raft", "verify_leader"}, 1) + verifyFuture := &verifyFuture{} + verifyFuture.init() + select { + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.verifyCh <- verifyFuture: + return verifyFuture + } +} + +// GetConfiguration returns the latest configuration. This may not yet be +// committed. The main loop can access this directly. +func (r *Raft) GetConfiguration() ConfigurationFuture { + configReq := &configurationsFuture{} + configReq.init() + configReq.configurations = configurations{latest: r.getLatestConfiguration()} + configReq.respond(nil) + return configReq +} + +// AddPeer to the cluster configuration. Must be run on the leader, or it will fail. +// +// Deprecated: Use AddVoter/AddNonvoter instead. +func (r *Raft) AddPeer(peer ServerAddress) Future { + if r.protocolVersion > 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: AddVoter, + serverID: ServerID(peer), + serverAddress: peer, + prevIndex: 0, + }, 0) +} + +// RemovePeer from the cluster configuration. If the current leader is being +// removed, it will cause a new election to occur. Must be run on the leader, +// or it will fail. + +// Deprecated: Use RemoveServer instead. +func (r *Raft) RemovePeer(peer ServerAddress) Future { + if r.protocolVersion > 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: RemoveServer, + serverID: ServerID(peer), + prevIndex: 0, + }, 0) +} + +// AddVoter will add the given server to the cluster as a staging server. If the +// server is already in the cluster as a voter, this updates the server's address. +// This must be run on the leader or it will fail. The leader will promote the +// staging server to a voter once that server is ready. If nonzero, prevIndex is +// the index of the only configuration upon which this change may be applied; if +// another configuration entry has been added in the meantime, this request will +// fail. If nonzero, timeout is how long this server should wait before the +// configuration change log entry is appended. +func (r *Raft) AddVoter(id ServerID, address ServerAddress, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: AddVoter, + serverID: id, + serverAddress: address, + prevIndex: prevIndex, + }, timeout) +} + +// AddNonvoter will add the given server to the cluster but won't assign it a +// vote. The server will receive log entries, but it won't participate in +// elections or log entry commitment. If the server is already in the cluster, +// this updates the server's address. This must be run on the leader or it will +// fail. For prevIndex and timeout, see AddVoter. +func (r *Raft) AddNonvoter(id ServerID, address ServerAddress, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: AddNonvoter, + serverID: id, + serverAddress: address, + prevIndex: prevIndex, + }, timeout) +} + +// RemoveServer will remove the given server from the cluster. If the current +// leader is being removed, it will cause a new election to occur. This must be +// run on the leader or it will fail. For prevIndex and timeout, see AddVoter. +func (r *Raft) RemoveServer(id ServerID, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: RemoveServer, + serverID: id, + prevIndex: prevIndex, + }, timeout) +} + +// DemoteVoter will take away a server's vote, if it has one. If present, the +// server will continue to receive log entries, but it won't participate in +// elections or log entry commitment. If the server is not in the cluster, this +// does nothing. This must be run on the leader or it will fail. For prevIndex +// and timeout, see AddVoter. +func (r *Raft) DemoteVoter(id ServerID, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: DemoteVoter, + serverID: id, + prevIndex: prevIndex, + }, timeout) +} + +// Shutdown is used to stop the Raft background routines. +// This is not a graceful operation. Provides a future that +// can be used to block until all background routines have exited. +func (r *Raft) Shutdown() Future { + r.shutdownLock.Lock() + defer r.shutdownLock.Unlock() + + if !r.shutdown { + close(r.shutdownCh) + r.shutdown = true + r.setState(Shutdown) + return &shutdownFuture{r} + } + + // avoid closing transport twice + return &shutdownFuture{nil} +} + +// Snapshot is used to manually force Raft to take a snapshot. Returns a future +// that can be used to block until complete, and that contains a function that +// can be used to open the snapshot. +func (r *Raft) Snapshot() SnapshotFuture { + future := &userSnapshotFuture{} + future.init() + select { + case r.userSnapshotCh <- future: + return future + case <-r.shutdownCh: + future.respond(ErrRaftShutdown) + return future + } +} + +// Restore is used to manually force Raft to consume an external snapshot, such +// as if restoring from a backup. We will use the current Raft configuration, +// not the one from the snapshot, so that we can restore into a new cluster. We +// will also use the max of the index of the snapshot, or the current index, +// and then add 1 to that, so we force a new state with a hole in the Raft log, +// so that the snapshot will be sent to followers and used for any new joiners. +// This can only be run on the leader, and blocks until the restore is complete +// or an error occurs. +// +// WARNING! This operation has the leader take on the state of the snapshot and +// then sets itself up so that it replicates that to its followers though the +// install snapshot process. This involves a potentially dangerous period where +// the leader commits ahead of its followers, so should only be used for disaster +// recovery into a fresh cluster, and should not be used in normal operations. +func (r *Raft) Restore(meta *SnapshotMeta, reader io.Reader, timeout time.Duration) error { + metrics.IncrCounter([]string{"raft", "restore"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Perform the restore. + restore := &userRestoreFuture{ + meta: meta, + reader: reader, + } + restore.init() + select { + case <-timer: + return ErrEnqueueTimeout + case <-r.shutdownCh: + return ErrRaftShutdown + case r.userRestoreCh <- restore: + // If the restore is ingested then wait for it to complete. + if err := restore.Error(); err != nil { + return err + } + } + + // Apply a no-op log entry. Waiting for this allows us to wait until the + // followers have gotten the restore and replicated at least this new + // entry, which shows that we've also faulted and installed the + // snapshot with the contents of the restore. + noop := &logFuture{ + log: Log{ + Type: LogNoop, + }, + } + noop.init() + select { + case <-timer: + return ErrEnqueueTimeout + case <-r.shutdownCh: + return ErrRaftShutdown + case r.applyCh <- noop: + return noop.Error() + } +} + +// State returns the state of this raft peer. +func (r *Raft) State() RaftState { + return r.getState() +} + +// LeaderCh is used to get a channel which delivers signals on acquiring or +// losing leadership. It sends true if we become the leader, and false if we +// lose it. +// +// Receivers can expect to receive a notification only if leadership +// transition has occured. +// +// If receivers aren't ready for the signal, signals may drop and only the +// latest leadership transition. For example, if a receiver receives subsequent +// `true` values, they may deduce that leadership was lost and regained while +// the the receiver was processing first leadership transition. +func (r *Raft) LeaderCh() <-chan bool { + return r.leaderCh +} + +// String returns a string representation of this Raft node. +func (r *Raft) String() string { + return fmt.Sprintf("Node at %s [%v]", r.localAddr, r.getState()) +} + +// LastContact returns the time of last contact by a leader. +// This only makes sense if we are currently a follower. +func (r *Raft) LastContact() time.Time { + r.lastContactLock.RLock() + last := r.lastContact + r.lastContactLock.RUnlock() + return last +} + +// Stats is used to return a map of various internal stats. This +// should only be used for informative purposes or debugging. +// +// Keys are: "state", "term", "last_log_index", "last_log_term", +// "commit_index", "applied_index", "fsm_pending", +// "last_snapshot_index", "last_snapshot_term", +// "latest_configuration", "last_contact", and "num_peers". +// +// The value of "state" is a numeric constant representing one of +// the possible leadership states the node is in at any given time. +// the possible states are: "Follower", "Candidate", "Leader", "Shutdown". +// +// The value of "latest_configuration" is a string which contains +// the id of each server, its suffrage status, and its address. +// +// The value of "last_contact" is either "never" if there +// has been no contact with a leader, "0" if the node is in the +// leader state, or the time since last contact with a leader +// formatted as a string. +// +// The value of "num_peers" is the number of other voting servers in the +// cluster, not including this node. If this node isn't part of the +// configuration then this will be "0". +// +// All other values are uint64s, formatted as strings. +func (r *Raft) Stats() map[string]string { + toString := func(v uint64) string { + return strconv.FormatUint(v, 10) + } + lastLogIndex, lastLogTerm := r.getLastLog() + lastSnapIndex, lastSnapTerm := r.getLastSnapshot() + s := map[string]string{ + "state": r.getState().String(), + "term": toString(r.getCurrentTerm()), + "last_log_index": toString(lastLogIndex), + "last_log_term": toString(lastLogTerm), + "commit_index": toString(r.getCommitIndex()), + "applied_index": toString(r.getLastApplied()), + "fsm_pending": toString(uint64(len(r.fsmMutateCh))), + "last_snapshot_index": toString(lastSnapIndex), + "last_snapshot_term": toString(lastSnapTerm), + "protocol_version": toString(uint64(r.protocolVersion)), + "protocol_version_min": toString(uint64(ProtocolVersionMin)), + "protocol_version_max": toString(uint64(ProtocolVersionMax)), + "snapshot_version_min": toString(uint64(SnapshotVersionMin)), + "snapshot_version_max": toString(uint64(SnapshotVersionMax)), + } + + future := r.GetConfiguration() + if err := future.Error(); err != nil { + r.logger.Warn("could not get configuration for stats", "error", err) + } else { + configuration := future.Configuration() + s["latest_configuration_index"] = toString(future.Index()) + s["latest_configuration"] = fmt.Sprintf("%+v", configuration.Servers) + + // This is a legacy metric that we've seen people use in the wild. + hasUs := false + numPeers := 0 + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + if server.ID == r.localID { + hasUs = true + } else { + numPeers++ + } + } + } + if !hasUs { + numPeers = 0 + } + s["num_peers"] = toString(uint64(numPeers)) + } + + last := r.LastContact() + if r.getState() == Leader { + s["last_contact"] = "0" + } else if last.IsZero() { + s["last_contact"] = "never" + } else { + s["last_contact"] = fmt.Sprintf("%v", time.Now().Sub(last)) + } + return s +} + +// LastIndex returns the last index in stable storage, +// either from the last log or from the last snapshot. +func (r *Raft) LastIndex() uint64 { + return r.getLastIndex() +} + +// CommitIndex returns the committed index. +// This API maybe helpful for server to implement the read index optimization +// as described in the Raft paper. +func (r *Raft) CommitIndex() uint64 { + return r.getCommitIndex() +} + +// AppliedIndex returns the last index applied to the FSM. This is generally +// lagging behind the last index, especially for indexes that are persisted but +// have not yet been considered committed by the leader. NOTE - this reflects +// the last index that was sent to the application's FSM over the apply channel +// but DOES NOT mean that the application's FSM has yet consumed it and applied +// it to its internal state. Thus, the application's state may lag behind this +// index. +func (r *Raft) AppliedIndex() uint64 { + return r.getLastApplied() +} + +// LeadershipTransfer will transfer leadership to a server in the cluster. +// This can only be called from the leader, or it will fail. The leader will +// stop accepting client requests, make sure the target server is up to date +// and starts the transfer with a TimeoutNow message. This message has the same +// effect as if the election timeout on the target server fires. Since +// it is unlikely that another server is starting an election, it is very +// likely that the target server is able to win the election. Note that raft +// protocol version 3 is not sufficient to use LeadershipTransfer. A recent +// version of that library has to be used that includes this feature. Using +// transfer leadership is safe however in a cluster where not every node has +// the latest version. If a follower cannot be promoted, it will fail +// gracefully. +func (r *Raft) LeadershipTransfer() Future { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.initiateLeadershipTransfer(nil, nil) +} + +// LeadershipTransferToServer does the same as LeadershipTransfer but takes a +// server in the arguments in case a leadership should be transitioned to a +// specific server in the cluster. Note that raft protocol version 3 is not +// sufficient to use LeadershipTransfer. A recent version of that library has +// to be used that includes this feature. Using transfer leadership is safe +// however in a cluster where not every node has the latest version. If a +// follower cannot be promoted, it will fail gracefully. +func (r *Raft) LeadershipTransferToServer(id ServerID, address ServerAddress) Future { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.initiateLeadershipTransfer(&id, &address) +} diff --git a/vendor/github.com/hashicorp/raft/commands.go b/vendor/github.com/hashicorp/raft/commands.go new file mode 100644 index 0000000000000..1ec76cb27d6a2 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/commands.go @@ -0,0 +1,223 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +// RPCHeader is a common sub-structure used to pass along protocol version and +// other information about the cluster. For older Raft implementations before +// versioning was added this will default to a zero-valued structure when read +// by newer Raft versions. +type RPCHeader struct { + // ProtocolVersion is the version of the protocol the sender is + // speaking. + ProtocolVersion ProtocolVersion + // ID is the ServerID of the node sending the RPC Request or Response + ID []byte + // Addr is the ServerAddr of the node sending the RPC Request or Response + Addr []byte +} + +// WithRPCHeader is an interface that exposes the RPC header. +type WithRPCHeader interface { + GetRPCHeader() RPCHeader +} + +// AppendEntriesRequest is the command used to append entries to the +// replicated log. +type AppendEntriesRequest struct { + RPCHeader + + // Provide the current term and leader + Term uint64 + + // Deprecated: use RPCHeader.Addr instead + Leader []byte + + // Provide the previous entries for integrity checking + PrevLogEntry uint64 + PrevLogTerm uint64 + + // New entries to commit + Entries []*Log + + // Commit index on the leader + LeaderCommitIndex uint64 +} + +// GetRPCHeader - See WithRPCHeader. +func (r *AppendEntriesRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// AppendEntriesResponse is the response returned from an +// AppendEntriesRequest. +type AppendEntriesResponse struct { + RPCHeader + + // Newer term if leader is out of date + Term uint64 + + // Last Log is a hint to help accelerate rebuilding slow nodes + LastLog uint64 + + // We may not succeed if we have a conflicting entry + Success bool + + // There are scenarios where this request didn't succeed + // but there's no need to wait/back-off the next attempt. + NoRetryBackoff bool +} + +// GetRPCHeader - See WithRPCHeader. +func (r *AppendEntriesResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// RequestVoteRequest is the command used by a candidate to ask a Raft peer +// for a vote in an election. +type RequestVoteRequest struct { + RPCHeader + + // Provide the term and our id + Term uint64 + + // Deprecated: use RPCHeader.Addr instead + Candidate []byte + + // Used to ensure safety + LastLogIndex uint64 + LastLogTerm uint64 + + // Used to indicate to peers if this vote was triggered by a leadership + // transfer. It is required for leadership transfer to work, because servers + // wouldn't vote otherwise if they are aware of an existing leader. + LeadershipTransfer bool +} + +// GetRPCHeader - See WithRPCHeader. +func (r *RequestVoteRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// RequestVoteResponse is the response returned from a RequestVoteRequest. +type RequestVoteResponse struct { + RPCHeader + + // Newer term if leader is out of date. + Term uint64 + + // Peers is deprecated, but required by servers that only understand + // protocol version 0. This is not populated in protocol version 2 + // and later. + Peers []byte + + // Is the vote granted. + Granted bool +} + +// GetRPCHeader - See WithRPCHeader. +func (r *RequestVoteResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// RequestPreVoteRequest is the command used by a candidate to ask a Raft peer +// for a vote in an election. +type RequestPreVoteRequest struct { + RPCHeader + + // Provide the term and our id + Term uint64 + + // Used to ensure safety + LastLogIndex uint64 + LastLogTerm uint64 +} + +// GetRPCHeader - See WithRPCHeader. +func (r *RequestPreVoteRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// RequestPreVoteResponse is the response returned from a RequestPreVoteRequest. +type RequestPreVoteResponse struct { + RPCHeader + + // Newer term if leader is out of date. + Term uint64 + + // Is the vote granted. + Granted bool +} + +// GetRPCHeader - See WithRPCHeader. +func (r *RequestPreVoteResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// InstallSnapshotRequest is the command sent to a Raft peer to bootstrap its +// log (and state machine) from a snapshot on another peer. +type InstallSnapshotRequest struct { + RPCHeader + SnapshotVersion SnapshotVersion + + Term uint64 + Leader []byte + + // These are the last index/term included in the snapshot + LastLogIndex uint64 + LastLogTerm uint64 + + // Peer Set in the snapshot. + // but remains here in case we receive an InstallSnapshot from a leader + // that's running old code. + // Deprecated: This is deprecated in favor of Configuration + Peers []byte + + // Cluster membership. + Configuration []byte + // Log index where 'Configuration' entry was originally written. + ConfigurationIndex uint64 + + // Size of the snapshot + Size int64 +} + +// GetRPCHeader - See WithRPCHeader. +func (r *InstallSnapshotRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// InstallSnapshotResponse is the response returned from an +// InstallSnapshotRequest. +type InstallSnapshotResponse struct { + RPCHeader + + Term uint64 + Success bool +} + +// GetRPCHeader - See WithRPCHeader. +func (r *InstallSnapshotResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// TimeoutNowRequest is the command used by a leader to signal another server to +// start an election. +type TimeoutNowRequest struct { + RPCHeader +} + +// GetRPCHeader - See WithRPCHeader. +func (r *TimeoutNowRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// TimeoutNowResponse is the response to TimeoutNowRequest. +type TimeoutNowResponse struct { + RPCHeader +} + +// GetRPCHeader - See WithRPCHeader. +func (r *TimeoutNowResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} diff --git a/vendor/github.com/hashicorp/raft/commitment.go b/vendor/github.com/hashicorp/raft/commitment.go new file mode 100644 index 0000000000000..7d100a63e06ba --- /dev/null +++ b/vendor/github.com/hashicorp/raft/commitment.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "sort" + "sync" +) + +// Commitment is used to advance the leader's commit index. The leader and +// replication goroutines report in newly written entries with match(), and +// this notifies on commitCh when the commit index has advanced. +type commitment struct { + // protects matchIndexes and commitIndex + sync.Mutex + // notified when commitIndex increases + commitCh chan struct{} + // voter ID to log index: the server stores up through this log entry + matchIndexes map[ServerID]uint64 + // a quorum stores up through this log entry. monotonically increases. + commitIndex uint64 + // the first index of this leader's term: this needs to be replicated to a + // majority of the cluster before this leader may mark anything committed + // (per Raft's commitment rule) + startIndex uint64 +} + +// newCommitment returns a commitment struct that notifies the provided +// channel when log entries have been committed. A new commitment struct is +// created each time this server becomes leader for a particular term. +// 'configuration' is the servers in the cluster. +// 'startIndex' is the first index created in this term (see +// its description above). +func newCommitment(commitCh chan struct{}, configuration Configuration, startIndex uint64) *commitment { + matchIndexes := make(map[ServerID]uint64) + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + matchIndexes[server.ID] = 0 + } + } + return &commitment{ + commitCh: commitCh, + matchIndexes: matchIndexes, + commitIndex: 0, + startIndex: startIndex, + } +} + +// Called when a new cluster membership configuration is created: it will be +// used to determine commitment from now on. 'configuration' is the servers in +// the cluster. +func (c *commitment) setConfiguration(configuration Configuration) { + c.Lock() + defer c.Unlock() + oldMatchIndexes := c.matchIndexes + c.matchIndexes = make(map[ServerID]uint64) + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + c.matchIndexes[server.ID] = oldMatchIndexes[server.ID] // defaults to 0 + } + } + c.recalculate() +} + +// Called by leader after commitCh is notified +func (c *commitment) getCommitIndex() uint64 { + c.Lock() + defer c.Unlock() + return c.commitIndex +} + +// Match is called once a server completes writing entries to disk: either the +// leader has written the new entry or a follower has replied to an +// AppendEntries RPC. The given server's disk agrees with this server's log up +// through the given index. +func (c *commitment) match(server ServerID, matchIndex uint64) { + c.Lock() + defer c.Unlock() + if prev, hasVote := c.matchIndexes[server]; hasVote && matchIndex > prev { + c.matchIndexes[server] = matchIndex + c.recalculate() + } +} + +// Internal helper to calculate new commitIndex from matchIndexes. +// Must be called with lock held. +func (c *commitment) recalculate() { + if len(c.matchIndexes) == 0 { + return + } + + matched := make([]uint64, 0, len(c.matchIndexes)) + for _, idx := range c.matchIndexes { + matched = append(matched, idx) + } + sort.Sort(uint64Slice(matched)) + quorumMatchIndex := matched[(len(matched)-1)/2] + + if quorumMatchIndex > c.commitIndex && quorumMatchIndex >= c.startIndex { + c.commitIndex = quorumMatchIndex + asyncNotifyCh(c.commitCh) + } +} diff --git a/vendor/github.com/hashicorp/raft/config.go b/vendor/github.com/hashicorp/raft/config.go new file mode 100644 index 0000000000000..d14392fc3f848 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/config.go @@ -0,0 +1,371 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/hashicorp/go-hclog" +) + +// ProtocolVersion is the version of the protocol (which includes RPC messages +// as well as Raft-specific log entries) that this server can _understand_. Use +// the ProtocolVersion member of the Config object to control the version of +// the protocol to use when _speaking_ to other servers. Note that depending on +// the protocol version being spoken, some otherwise understood RPC messages +// may be refused. See dispositionRPC for details of this logic. +// +// There are notes about the upgrade path in the description of the versions +// below. If you are starting a fresh cluster then there's no reason not to +// jump right to the latest protocol version. If you need to interoperate with +// older, version 0 Raft servers you'll need to drive the cluster through the +// different versions in order. +// +// The version details are complicated, but here's a summary of what's required +// to get from a version 0 cluster to version 3: +// +// 1. In version N of your app that starts using the new Raft library with +// versioning, set ProtocolVersion to 1. +// 2. Make version N+1 of your app require version N as a prerequisite (all +// servers must be upgraded). For version N+1 of your app set ProtocolVersion +// to 2. +// 3. Similarly, make version N+2 of your app require version N+1 as a +// prerequisite. For version N+2 of your app, set ProtocolVersion to 3. +// +// During this upgrade, older cluster members will still have Server IDs equal +// to their network addresses. To upgrade an older member and give it an ID, it +// needs to leave the cluster and re-enter: +// +// 1. Remove the server from the cluster with RemoveServer, using its network +// address as its ServerID. +// 2. Update the server's config to use a UUID or something else that is +// not tied to the machine as the ServerID (restarting the server). +// 3. Add the server back to the cluster with AddVoter, using its new ID. +// +// You can do this during the rolling upgrade from N+1 to N+2 of your app, or +// as a rolling change at any time after the upgrade. +// +// # Version History +// +// 0: Original Raft library before versioning was added. Servers running this +// +// version of the Raft library use AddPeerDeprecated/RemovePeerDeprecated +// for all configuration changes, and have no support for LogConfiguration. +// +// 1: First versioned protocol, used to interoperate with old servers, and begin +// +// the migration path to newer versions of the protocol. Under this version +// all configuration changes are propagated using the now-deprecated +// RemovePeerDeprecated Raft log entry. This means that server IDs are always +// set to be the same as the server addresses (since the old log entry type +// cannot transmit an ID), and only AddPeer/RemovePeer APIs are supported. +// Servers running this version of the protocol can understand the new +// LogConfiguration Raft log entry but will never generate one so they can +// remain compatible with version 0 Raft servers in the cluster. +// +// 2: Transitional protocol used when migrating an existing cluster to the new +// +// server ID system. Server IDs are still set to be the same as server +// addresses, but all configuration changes are propagated using the new +// LogConfiguration Raft log entry type, which can carry full ID information. +// This version supports the old AddPeer/RemovePeer APIs as well as the new +// ID-based AddVoter/RemoveServer APIs which should be used when adding +// version 3 servers to the cluster later. This version sheds all +// interoperability with version 0 servers, but can interoperate with newer +// Raft servers running with protocol version 1 since they can understand the +// new LogConfiguration Raft log entry, and this version can still understand +// their RemovePeerDeprecated Raft log entries. We need this protocol version +// as an intermediate step between 1 and 3 so that servers will propagate the +// ID information that will come from newly-added (or -rolled) servers using +// protocol version 3, but since they are still using their address-based IDs +// from the previous step they will still be able to track commitments and +// their own voting status properly. If we skipped this step, servers would +// be started with their new IDs, but they wouldn't see themselves in the old +// address-based configuration, so none of the servers would think they had a +// vote. +// +// 3: Protocol adding full support for server IDs and new ID-based server APIs +// +// (AddVoter, AddNonvoter, etc.), old AddPeer/RemovePeer APIs are no longer +// supported. Version 2 servers should be swapped out by removing them from +// the cluster one-by-one and re-adding them with updated configuration for +// this protocol version, along with their server ID. The remove/add cycle +// is required to populate their server ID. Note that removing must be done +// by ID, which will be the old server's address. +type ProtocolVersion int + +const ( + // ProtocolVersionMin is the minimum protocol version + ProtocolVersionMin ProtocolVersion = 0 + // ProtocolVersionMax is the maximum protocol version + ProtocolVersionMax = 3 +) + +// SnapshotVersion is the version of snapshots that this server can understand. +// Currently, it is always assumed that the server generates the latest version, +// though this may be changed in the future to include a configurable version. +// +// # Version History +// +// 0: Original Raft library before versioning was added. The peers portion of +// +// these snapshots is encoded in the legacy format which requires decodePeers +// to parse. This version of snapshots should only be produced by the +// unversioned Raft library. +// +// 1: New format which adds support for a full configuration structure and its +// +// associated log index, with support for server IDs and non-voting server +// modes. To ease upgrades, this also includes the legacy peers structure but +// that will never be used by servers that understand version 1 snapshots. +// Since the original Raft library didn't enforce any versioning, we must +// include the legacy peers structure for this version, but we can deprecate +// it in the next snapshot version. +type SnapshotVersion int + +const ( + // SnapshotVersionMin is the minimum snapshot version + SnapshotVersionMin SnapshotVersion = 0 + // SnapshotVersionMax is the maximum snapshot version + SnapshotVersionMax = 1 +) + +// Config provides any necessary configuration for the Raft server. +type Config struct { + // ProtocolVersion allows a Raft server to inter-operate with older + // Raft servers running an older version of the code. This is used to + // version the wire protocol as well as Raft-specific log entries that + // the server uses when _speaking_ to other servers. There is currently + // no auto-negotiation of versions so all servers must be manually + // configured with compatible versions. See ProtocolVersionMin and + // ProtocolVersionMax for the versions of the protocol that this server + // can _understand_. + ProtocolVersion ProtocolVersion + + // HeartbeatTimeout specifies the time in follower state without contact + // from a leader before we attempt an election. + HeartbeatTimeout time.Duration + + // ElectionTimeout specifies the time in candidate state without contact + // from a leader before we attempt an election. + ElectionTimeout time.Duration + + // CommitTimeout specifies the time without an Apply operation before the + // leader sends an AppendEntry RPC to followers, to ensure a timely commit of + // log entries. + // Due to random staggering, may be delayed as much as 2x this value. + CommitTimeout time.Duration + + // MaxAppendEntries controls the maximum number of append entries + // to send at once. We want to strike a balance between efficiency + // and avoiding waste if the follower is going to reject because of + // an inconsistent log. + MaxAppendEntries int + + // BatchApplyCh indicates whether we should buffer applyCh + // to size MaxAppendEntries. This enables batch log commitment, + // but breaks the timeout guarantee on Apply. Specifically, + // a log can be added to the applyCh buffer but not actually be + // processed until after the specified timeout. + BatchApplyCh bool + + // If we are a member of a cluster, and RemovePeer is invoked for the + // local node, then we forget all peers and transition into the follower state. + // If ShutdownOnRemove is set, we additional shutdown Raft. Otherwise, + // we can become a leader of a cluster containing only this node. + ShutdownOnRemove bool + + // TrailingLogs controls how many logs we leave after a snapshot. This is used + // so that we can quickly replay logs on a follower instead of being forced to + // send an entire snapshot. The value passed here is the initial setting used. + // This can be tuned during operation using ReloadConfig. + TrailingLogs uint64 + + // SnapshotInterval controls how often we check if we should perform a + // snapshot. We randomly stagger between this value and 2x this value to avoid + // the entire cluster from performing a snapshot at once. The value passed + // here is the initial setting used. This can be tuned during operation using + // ReloadConfig. + SnapshotInterval time.Duration + + // SnapshotThreshold controls how many outstanding logs there must be before + // we perform a snapshot. This is to prevent excessive snapshotting by + // replaying a small set of logs instead. The value passed here is the initial + // setting used. This can be tuned during operation using ReloadConfig. + SnapshotThreshold uint64 + + // LeaderLeaseTimeout is used to control how long the "lease" lasts + // for being the leader without being able to contact a quorum + // of nodes. If we reach this interval without contact, we will + // step down as leader. + LeaderLeaseTimeout time.Duration + + // LocalID is a unique ID for this server across all time. When running with + // ProtocolVersion < 3, you must set this to be the same as the network + // address of your transport. + LocalID ServerID + + // NotifyCh is used to provide a channel that will be notified of leadership + // changes. Raft will block writing to this channel, so it should either be + // buffered or aggressively consumed. + NotifyCh chan<- bool + + // LogOutput is used as a sink for logs, unless Logger is specified. + // Defaults to os.Stderr. + LogOutput io.Writer + + // LogLevel represents a log level. If the value does not match a known + // logging level hclog.NoLevel is used. + LogLevel string + + // Logger is a user-provided logger. If nil, a logger writing to + // LogOutput with LogLevel is used. + Logger hclog.Logger + + // NoSnapshotRestoreOnStart controls if raft will restore a snapshot to the + // FSM on start. This is useful if your FSM recovers from other mechanisms + // than raft snapshotting. Snapshot metadata will still be used to initialize + // raft's configuration and index values. + NoSnapshotRestoreOnStart bool + + // PreVoteDisabled deactivate the pre-vote feature when set to true + PreVoteDisabled bool + + // skipStartup allows NewRaft() to bypass all background work goroutines + skipStartup bool +} + +func (conf *Config) getOrCreateLogger() hclog.Logger { + if conf.Logger != nil { + return conf.Logger + } + if conf.LogOutput == nil { + conf.LogOutput = os.Stderr + } + + return hclog.New(&hclog.LoggerOptions{ + Name: "raft", + Level: hclog.LevelFromString(conf.LogLevel), + Output: conf.LogOutput, + }) +} + +// ReloadableConfig is the subset of Config that may be reconfigured during +// runtime using raft.ReloadConfig. We choose to duplicate fields over embedding +// or accepting a Config but only using specific fields to keep the API clear. +// Reconfiguring some fields is potentially dangerous so we should only +// selectively enable it for fields where that is allowed. +type ReloadableConfig struct { + // TrailingLogs controls how many logs we leave after a snapshot. This is used + // so that we can quickly replay logs on a follower instead of being forced to + // send an entire snapshot. The value passed here updates the setting at runtime + // which will take effect as soon as the next snapshot completes and truncation + // occurs. + TrailingLogs uint64 + + // SnapshotInterval controls how often we check if we should perform a snapshot. + // We randomly stagger between this value and 2x this value to avoid the entire + // cluster from performing a snapshot at once. + SnapshotInterval time.Duration + + // SnapshotThreshold controls how many outstanding logs there must be before + // we perform a snapshot. This is to prevent excessive snapshots when we can + // just replay a small set of logs. + SnapshotThreshold uint64 + + // HeartbeatTimeout specifies the time in follower state without + // a leader before we attempt an election. + HeartbeatTimeout time.Duration + + // ElectionTimeout specifies the time in candidate state without + // a leader before we attempt an election. + ElectionTimeout time.Duration +} + +// apply sets the reloadable fields on the passed Config to the values in +// `ReloadableConfig`. It returns a copy of Config with the fields from this +// ReloadableConfig set. +func (rc *ReloadableConfig) apply(to Config) Config { + to.TrailingLogs = rc.TrailingLogs + to.SnapshotInterval = rc.SnapshotInterval + to.SnapshotThreshold = rc.SnapshotThreshold + to.HeartbeatTimeout = rc.HeartbeatTimeout + to.ElectionTimeout = rc.ElectionTimeout + return to +} + +// fromConfig copies the reloadable fields from the passed Config. +func (rc *ReloadableConfig) fromConfig(from Config) { + rc.TrailingLogs = from.TrailingLogs + rc.SnapshotInterval = from.SnapshotInterval + rc.SnapshotThreshold = from.SnapshotThreshold + rc.HeartbeatTimeout = from.HeartbeatTimeout + rc.ElectionTimeout = from.ElectionTimeout +} + +// DefaultConfig returns a Config with usable defaults. +func DefaultConfig() *Config { + return &Config{ + ProtocolVersion: ProtocolVersionMax, + HeartbeatTimeout: 1000 * time.Millisecond, + ElectionTimeout: 1000 * time.Millisecond, + CommitTimeout: 50 * time.Millisecond, + MaxAppendEntries: 64, + ShutdownOnRemove: true, + TrailingLogs: 10240, + SnapshotInterval: 120 * time.Second, + SnapshotThreshold: 8192, + LeaderLeaseTimeout: 500 * time.Millisecond, + LogLevel: "DEBUG", + } +} + +// ValidateConfig is used to validate a sane configuration +func ValidateConfig(config *Config) error { + // We don't actually support running as 0 in the library any more, but + // we do understand it. + protocolMin := ProtocolVersionMin + if protocolMin == 0 { + protocolMin = 1 + } + if config.ProtocolVersion < protocolMin || + config.ProtocolVersion > ProtocolVersionMax { + return fmt.Errorf("ProtocolVersion %d must be >= %d and <= %d", + config.ProtocolVersion, protocolMin, ProtocolVersionMax) + } + if len(config.LocalID) == 0 { + return fmt.Errorf("LocalID cannot be empty") + } + if config.HeartbeatTimeout < 5*time.Millisecond { + return fmt.Errorf("HeartbeatTimeout is too low") + } + if config.ElectionTimeout < 5*time.Millisecond { + return fmt.Errorf("ElectionTimeout is too low") + } + if config.CommitTimeout < time.Millisecond { + return fmt.Errorf("CommitTimeout is too low") + } + if config.MaxAppendEntries <= 0 { + return fmt.Errorf("MaxAppendEntries must be positive") + } + if config.MaxAppendEntries > 1024 { + return fmt.Errorf("MaxAppendEntries is too large") + } + if config.SnapshotInterval < 5*time.Millisecond { + return fmt.Errorf("SnapshotInterval is too low") + } + if config.LeaderLeaseTimeout < 5*time.Millisecond { + return fmt.Errorf("LeaderLeaseTimeout is too low") + } + if config.LeaderLeaseTimeout > config.HeartbeatTimeout { + return fmt.Errorf("LeaderLeaseTimeout (%s) cannot be larger than heartbeat timeout (%s)", config.LeaderLeaseTimeout, config.HeartbeatTimeout) + } + if config.ElectionTimeout < config.HeartbeatTimeout { + return fmt.Errorf("ElectionTimeout (%s) must be equal or greater than Heartbeat Timeout (%s)", config.ElectionTimeout, config.HeartbeatTimeout) + } + return nil +} diff --git a/vendor/github.com/hashicorp/raft/configuration.go b/vendor/github.com/hashicorp/raft/configuration.go new file mode 100644 index 0000000000000..9bfad14f7601e --- /dev/null +++ b/vendor/github.com/hashicorp/raft/configuration.go @@ -0,0 +1,372 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import "fmt" + +// ServerSuffrage determines whether a Server in a Configuration gets a vote. +type ServerSuffrage int + +// Note: Don't renumber these, since the numbers are written into the log. +const ( + // Voter is a server whose vote is counted in elections and whose match index + // is used in advancing the leader's commit index. + Voter ServerSuffrage = iota + // Nonvoter is a server that receives log entries but is not considered for + // elections or commitment purposes. + Nonvoter + // Staging is a server that acts like a Nonvoter. A configuration change + // with a ConfigurationChangeCommand of Promote can change a Staging server + // into a Voter. + // Deprecated: use Nonvoter instead. + Staging +) + +func (s ServerSuffrage) String() string { + switch s { + case Voter: + return "Voter" + case Nonvoter: + return "Nonvoter" + case Staging: + return "Staging" + } + return "ServerSuffrage" +} + +// ConfigurationStore provides an interface that can optionally be implemented by FSMs +// to store configuration updates made in the replicated log. In general this is only +// necessary for FSMs that mutate durable state directly instead of applying changes +// in memory and snapshotting periodically. By storing configuration changes, the +// persistent FSM state can behave as a complete snapshot, and be able to recover +// without an external snapshot just for persisting the raft configuration. +type ConfigurationStore interface { + // ConfigurationStore is a superset of the FSM functionality + FSM + + // StoreConfiguration is invoked once a log entry containing a configuration + // change is committed. It takes the index at which the configuration was + // written and the configuration value. + StoreConfiguration(index uint64, configuration Configuration) +} + +type nopConfigurationStore struct{} + +func (s nopConfigurationStore) StoreConfiguration(_ uint64, _ Configuration) {} + +// ServerID is a unique string identifying a server for all time. +type ServerID string + +// ServerAddress is a network address for a server that a transport can contact. +type ServerAddress string + +// Server tracks the information about a single server in a configuration. +type Server struct { + // Suffrage determines whether the server gets a vote. + Suffrage ServerSuffrage + // ID is a unique string identifying this server for all time. + ID ServerID + // Address is its network address that a transport can contact. + Address ServerAddress +} + +// Configuration tracks which servers are in the cluster, and whether they have +// votes. This should include the local server, if it's a member of the cluster. +// The servers are listed no particular order, but each should only appear once. +// These entries are appended to the log during membership changes. +type Configuration struct { + Servers []Server +} + +// Clone makes a deep copy of a Configuration. +func (c *Configuration) Clone() (copy Configuration) { + copy.Servers = append(copy.Servers, c.Servers...) + return +} + +// ConfigurationChangeCommand is the different ways to change the cluster +// configuration. +type ConfigurationChangeCommand uint8 + +const ( + // AddVoter adds a server with Suffrage of Voter. + AddVoter ConfigurationChangeCommand = iota + // AddNonvoter makes a server Nonvoter unless its Staging or Voter. + AddNonvoter + // DemoteVoter makes a server Nonvoter unless its absent. + DemoteVoter + // RemoveServer removes a server entirely from the cluster membership. + RemoveServer + // Promote changes a server from Staging to Voter. The command will be a + // no-op if the server is not Staging. + // Deprecated: use AddVoter instead. + Promote + // AddStaging makes a server a Voter. + // Deprecated: AddStaging was actually AddVoter. Use AddVoter instead. + AddStaging = 0 // explicit 0 to preserve the old value. +) + +func (c ConfigurationChangeCommand) String() string { + switch c { + case AddVoter: + return "AddVoter" + case AddNonvoter: + return "AddNonvoter" + case DemoteVoter: + return "DemoteVoter" + case RemoveServer: + return "RemoveServer" + case Promote: + return "Promote" + } + return "ConfigurationChangeCommand" +} + +// configurationChangeRequest describes a change that a leader would like to +// make to its current configuration. It's used only within a single server +// (never serialized into the log), as part of `configurationChangeFuture`. +type configurationChangeRequest struct { + command ConfigurationChangeCommand + serverID ServerID + serverAddress ServerAddress // only present for AddVoter, AddNonvoter + // prevIndex, if nonzero, is the index of the only configuration upon which + // this change may be applied; if another configuration entry has been + // added in the meantime, this request will fail. + prevIndex uint64 +} + +// configurations is state tracked on every server about its Configurations. +// Note that, per Diego's dissertation, there can be at most one uncommitted +// configuration at a time (the next configuration may not be created until the +// prior one has been committed). +// +// One downside to storing just two configurations is that if you try to take a +// snapshot when your state machine hasn't yet applied the committedIndex, we +// have no record of the configuration that would logically fit into that +// snapshot. We disallow snapshots in that case now. An alternative approach, +// which LogCabin uses, is to track every configuration change in the +// log. +type configurations struct { + // committed is the latest configuration in the log/snapshot that has been + // committed (the one with the largest index). + committed Configuration + // committedIndex is the log index where 'committed' was written. + committedIndex uint64 + // latest is the latest configuration in the log/snapshot (may be committed + // or uncommitted) + latest Configuration + // latestIndex is the log index where 'latest' was written. + latestIndex uint64 +} + +// Clone makes a deep copy of a configurations object. +func (c *configurations) Clone() (copy configurations) { + copy.committed = c.committed.Clone() + copy.committedIndex = c.committedIndex + copy.latest = c.latest.Clone() + copy.latestIndex = c.latestIndex + return +} + +// hasVote returns true if the server identified by 'id' is a Voter in the +// provided Configuration. +func hasVote(configuration Configuration, id ServerID) bool { + for _, server := range configuration.Servers { + if server.ID == id { + return server.Suffrage == Voter + } + } + return false +} + +// inConfiguration returns true if the server identified by 'id' is in in the +// provided Configuration. +func inConfiguration(configuration Configuration, id ServerID) bool { + for _, server := range configuration.Servers { + if server.ID == id { + return true + } + } + return false +} + +// checkConfiguration tests a cluster membership configuration for common +// errors. +func checkConfiguration(configuration Configuration) error { + idSet := make(map[ServerID]bool) + addressSet := make(map[ServerAddress]bool) + var voters int + for _, server := range configuration.Servers { + if server.ID == "" { + return fmt.Errorf("empty ID in configuration: %v", configuration) + } + if server.Address == "" { + return fmt.Errorf("empty address in configuration: %v", server) + } + if idSet[server.ID] { + return fmt.Errorf("found duplicate ID in configuration: %v", server.ID) + } + idSet[server.ID] = true + if addressSet[server.Address] { + return fmt.Errorf("found duplicate address in configuration: %v", server.Address) + } + addressSet[server.Address] = true + if server.Suffrage == Voter { + voters++ + } + } + if voters == 0 { + return fmt.Errorf("need at least one voter in configuration: %v", configuration) + } + return nil +} + +// nextConfiguration generates a new Configuration from the current one and a +// configuration change request. It's split from appendConfigurationEntry so +// that it can be unit tested easily. +func nextConfiguration(current Configuration, currentIndex uint64, change configurationChangeRequest) (Configuration, error) { + if change.prevIndex > 0 && change.prevIndex != currentIndex { + return Configuration{}, fmt.Errorf("configuration changed since %v (latest is %v)", change.prevIndex, currentIndex) + } + + configuration := current.Clone() + switch change.command { + case AddVoter: + newServer := Server{ + Suffrage: Voter, + ID: change.serverID, + Address: change.serverAddress, + } + found := false + for i, server := range configuration.Servers { + if server.ID == change.serverID { + if server.Suffrage == Voter { + configuration.Servers[i].Address = change.serverAddress + } else { + configuration.Servers[i] = newServer + } + found = true + break + } + } + if !found { + configuration.Servers = append(configuration.Servers, newServer) + } + case AddNonvoter: + newServer := Server{ + Suffrage: Nonvoter, + ID: change.serverID, + Address: change.serverAddress, + } + found := false + for i, server := range configuration.Servers { + if server.ID == change.serverID { + if server.Suffrage != Nonvoter { + configuration.Servers[i].Address = change.serverAddress + } else { + configuration.Servers[i] = newServer + } + found = true + break + } + } + if !found { + configuration.Servers = append(configuration.Servers, newServer) + } + case DemoteVoter: + for i, server := range configuration.Servers { + if server.ID == change.serverID { + configuration.Servers[i].Suffrage = Nonvoter + break + } + } + case RemoveServer: + for i, server := range configuration.Servers { + if server.ID == change.serverID { + configuration.Servers = append(configuration.Servers[:i], configuration.Servers[i+1:]...) + break + } + } + case Promote: + for i, server := range configuration.Servers { + if server.ID == change.serverID && server.Suffrage == Staging { + configuration.Servers[i].Suffrage = Voter + break + } + } + } + + // Make sure we didn't do something bad like remove the last voter + if err := checkConfiguration(configuration); err != nil { + return Configuration{}, err + } + + return configuration, nil +} + +// encodePeers is used to serialize a Configuration into the old peers format. +// This is here for backwards compatibility when operating with a mix of old +// servers and should be removed once we deprecate support for protocol version 1. +func encodePeers(configuration Configuration, trans Transport) []byte { + // Gather up all the voters, other suffrage types are not supported by + // this data format. + var encPeers [][]byte + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + encPeers = append(encPeers, trans.EncodePeer(server.ID, server.Address)) + } + } + + // Encode the entire array. + buf, err := encodeMsgPack(encPeers) + if err != nil { + panic(fmt.Errorf("failed to encode peers: %v", err)) + } + + return buf.Bytes() +} + +// decodePeers is used to deserialize an old list of peers into a Configuration. +// This is here for backwards compatibility with old log entries and snapshots; +// it should be removed eventually. +func decodePeers(buf []byte, trans Transport) (Configuration, error) { + // Decode the buffer first. + var encPeers [][]byte + if err := decodeMsgPack(buf, &encPeers); err != nil { + return Configuration{}, fmt.Errorf("failed to decode peers: %v", err) + } + + // Deserialize each peer. + var servers []Server + for _, enc := range encPeers { + p := trans.DecodePeer(enc) + servers = append(servers, Server{ + Suffrage: Voter, + ID: ServerID(p), + Address: p, + }) + } + + return Configuration{Servers: servers}, nil +} + +// EncodeConfiguration serializes a Configuration using MsgPack, or panics on +// errors. +func EncodeConfiguration(configuration Configuration) []byte { + buf, err := encodeMsgPack(configuration) + if err != nil { + panic(fmt.Errorf("failed to encode configuration: %v", err)) + } + return buf.Bytes() +} + +// DecodeConfiguration deserializes a Configuration using MsgPack, or panics on +// errors. +func DecodeConfiguration(buf []byte) Configuration { + var configuration Configuration + if err := decodeMsgPack(buf, &configuration); err != nil { + panic(fmt.Errorf("failed to decode configuration: %v", err)) + } + return configuration +} diff --git a/vendor/github.com/hashicorp/raft/discard_snapshot.go b/vendor/github.com/hashicorp/raft/discard_snapshot.go new file mode 100644 index 0000000000000..aa148fb78fca2 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/discard_snapshot.go @@ -0,0 +1,67 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "fmt" + "io" +) + +// DiscardSnapshotStore is used to successfully snapshot while +// always discarding the snapshot. This is useful for when the +// log should be truncated but no snapshot should be retained. +// This should never be used for production use, and is only +// suitable for testing. +type DiscardSnapshotStore struct{} + +// DiscardSnapshotSink is used to fulfill the SnapshotSink interface +// while always discarding the . This is useful for when the log +// should be truncated but no snapshot should be retained. This +// should never be used for production use, and is only suitable +// for testing. +type DiscardSnapshotSink struct{} + +// NewDiscardSnapshotStore is used to create a new DiscardSnapshotStore. +func NewDiscardSnapshotStore() *DiscardSnapshotStore { + return &DiscardSnapshotStore{} +} + +// Create returns a valid type implementing the SnapshotSink which +// always discards the snapshot. +func (d *DiscardSnapshotStore) Create(version SnapshotVersion, index, term uint64, + configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) { + return &DiscardSnapshotSink{}, nil +} + +// List returns successfully with a nil for []*SnapshotMeta. +func (d *DiscardSnapshotStore) List() ([]*SnapshotMeta, error) { + return nil, nil +} + +// Open returns an error since the DiscardSnapshotStore does not +// support opening snapshots. +func (d *DiscardSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + return nil, nil, fmt.Errorf("open is not supported") +} + +// Write returns successfully with the length of the input byte slice +// to satisfy the WriteCloser interface +func (d *DiscardSnapshotSink) Write(b []byte) (int, error) { + return len(b), nil +} + +// Close returns a nil error +func (d *DiscardSnapshotSink) Close() error { + return nil +} + +// ID returns "discard" for DiscardSnapshotSink +func (d *DiscardSnapshotSink) ID() string { + return "discard" +} + +// Cancel returns successfully with a nil error +func (d *DiscardSnapshotSink) Cancel() error { + return nil +} diff --git a/vendor/github.com/hashicorp/raft/file_snapshot.go b/vendor/github.com/hashicorp/raft/file_snapshot.go new file mode 100644 index 0000000000000..25ace6c3b6b4d --- /dev/null +++ b/vendor/github.com/hashicorp/raft/file_snapshot.go @@ -0,0 +1,551 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "hash" + "hash/crc64" + "io" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "time" + + hclog "github.com/hashicorp/go-hclog" +) + +const ( + testPath = "permTest" + snapPath = "snapshots" + metaFilePath = "meta.json" + stateFilePath = "state.bin" + tmpSuffix = ".tmp" +) + +// FileSnapshotStore implements the SnapshotStore interface and allows +// snapshots to be made on the local disk. +type FileSnapshotStore struct { + path string + retain int + logger hclog.Logger + + // noSync, if true, skips crash-safe file fsync api calls. + // It's a private field, only used in testing + noSync bool +} + +type snapMetaSlice []*fileSnapshotMeta + +// FileSnapshotSink implements SnapshotSink with a file. +type FileSnapshotSink struct { + store *FileSnapshotStore + logger hclog.Logger + dir string + parentDir string + meta fileSnapshotMeta + + noSync bool + + stateFile *os.File + stateHash hash.Hash64 + buffered *bufio.Writer + + closed bool +} + +// fileSnapshotMeta is stored on disk. We also put a CRC +// on disk so that we can verify the snapshot. +type fileSnapshotMeta struct { + SnapshotMeta + CRC []byte +} + +// bufferedFile is returned when we open a snapshot. This way +// reads are buffered and the file still gets closed. +type bufferedFile struct { + bh *bufio.Reader + fh *os.File +} + +func (b *bufferedFile) Read(p []byte) (n int, err error) { + return b.bh.Read(p) +} + +func (b *bufferedFile) Close() error { + return b.fh.Close() +} + +// NewFileSnapshotStoreWithLogger creates a new FileSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewFileSnapshotStoreWithLogger(base string, retain int, logger hclog.Logger) (*FileSnapshotStore, error) { + if retain < 1 { + return nil, fmt.Errorf("must retain at least one snapshot") + } + if logger == nil { + logger = hclog.New(&hclog.LoggerOptions{ + Name: "snapshot", + Output: hclog.DefaultOutput, + Level: hclog.DefaultLevel, + }) + } + + // Ensure our path exists + path := filepath.Join(base, snapPath) + if err := os.MkdirAll(path, 0o755); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("snapshot path not accessible: %v", err) + } + + // Setup the store + store := &FileSnapshotStore{ + path: path, + retain: retain, + logger: logger, + } + + // Do a permissions test + if err := store.testPermissions(); err != nil { + return nil, fmt.Errorf("permissions test failed: %v", err) + } + return store, nil +} + +// NewFileSnapshotStore creates a new FileSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewFileSnapshotStore(base string, retain int, logOutput io.Writer) (*FileSnapshotStore, error) { + if logOutput == nil { + logOutput = os.Stderr + } + return NewFileSnapshotStoreWithLogger(base, retain, hclog.New(&hclog.LoggerOptions{ + Name: "snapshot", + Output: logOutput, + Level: hclog.DefaultLevel, + })) +} + +// testPermissions tries to touch a file in our path to see if it works. +func (f *FileSnapshotStore) testPermissions() error { + path := filepath.Join(f.path, testPath) + fh, err := os.Create(path) + if err != nil { + return err + } + + if err = fh.Close(); err != nil { + return err + } + + if err = os.Remove(path); err != nil { + return err + } + return nil +} + +// snapshotName generates a name for the snapshot. +func snapshotName(term, index uint64) string { + now := time.Now() + msec := now.UnixNano() / int64(time.Millisecond) + return fmt.Sprintf("%d-%d-%d", term, index, msec) +} + +// Create is used to start a new snapshot +func (f *FileSnapshotStore) Create(version SnapshotVersion, index, term uint64, + configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) { + // We only support version 1 snapshots at this time. + if version != 1 { + return nil, fmt.Errorf("unsupported snapshot version %d", version) + } + + // Create a new path + name := snapshotName(term, index) + path := filepath.Join(f.path, name+tmpSuffix) + f.logger.Info("creating new snapshot", "path", path) + + // Make the directory + if err := os.MkdirAll(path, 0o755); err != nil { + f.logger.Error("failed to make snapshot directly", "error", err) + return nil, err + } + + // Create the sink + sink := &FileSnapshotSink{ + store: f, + logger: f.logger, + dir: path, + parentDir: f.path, + noSync: f.noSync, + meta: fileSnapshotMeta{ + SnapshotMeta: SnapshotMeta{ + Version: version, + ID: name, + Index: index, + Term: term, + Peers: encodePeers(configuration, trans), + Configuration: configuration, + ConfigurationIndex: configurationIndex, + }, + CRC: nil, + }, + } + + // Write out the meta data + if err := sink.writeMeta(); err != nil { + f.logger.Error("failed to write metadata", "error", err) + return nil, err + } + + // Open the state file + statePath := filepath.Join(path, stateFilePath) + fh, err := os.Create(statePath) + if err != nil { + f.logger.Error("failed to create state file", "error", err) + return nil, err + } + sink.stateFile = fh + + // Create a CRC64 hash + sink.stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Wrap both the hash and file in a MultiWriter with buffering + multi := io.MultiWriter(sink.stateFile, sink.stateHash) + sink.buffered = bufio.NewWriter(multi) + + // Done + return sink, nil +} + +// List returns available snapshots in the store. +func (f *FileSnapshotStore) List() ([]*SnapshotMeta, error) { + // Get the eligible snapshots + snapshots, err := f.getSnapshots() + if err != nil { + f.logger.Error("failed to get snapshots", "error", err) + return nil, err + } + + var snapMeta []*SnapshotMeta + for _, meta := range snapshots { + snapMeta = append(snapMeta, &meta.SnapshotMeta) + if len(snapMeta) == f.retain { + break + } + } + return snapMeta, nil +} + +// getSnapshots returns all the known snapshots. +func (f *FileSnapshotStore) getSnapshots() ([]*fileSnapshotMeta, error) { + // Get the eligible snapshots + snapshots, err := os.ReadDir(f.path) + if err != nil { + f.logger.Error("failed to scan snapshot directory", "error", err) + return nil, err + } + + // Populate the metadata + var snapMeta []*fileSnapshotMeta + for _, snap := range snapshots { + // Ignore any files + if !snap.IsDir() { + continue + } + + // Ignore any temporary snapshots + dirName := snap.Name() + if strings.HasSuffix(dirName, tmpSuffix) { + f.logger.Warn("found temporary snapshot", "name", dirName) + continue + } + + // Try to read the meta data + meta, err := f.readMeta(dirName) + if err != nil { + f.logger.Warn("failed to read metadata", "name", dirName, "error", err) + continue + } + + // Make sure we can understand this version. + if meta.Version < SnapshotVersionMin || meta.Version > SnapshotVersionMax { + f.logger.Warn("snapshot version not supported", "name", dirName, "version", meta.Version) + continue + } + + // Append, but only return up to the retain count + snapMeta = append(snapMeta, meta) + } + + // Sort the snapshot, reverse so we get new -> old + sort.Sort(sort.Reverse(snapMetaSlice(snapMeta))) + + return snapMeta, nil +} + +// readMeta is used to read the meta data for a given named backup +func (f *FileSnapshotStore) readMeta(name string) (*fileSnapshotMeta, error) { + // Open the meta file + metaPath := filepath.Join(f.path, name, metaFilePath) + fh, err := os.Open(metaPath) + if err != nil { + return nil, err + } + defer fh.Close() + + // Buffer the file IO + buffered := bufio.NewReader(fh) + + // Read in the JSON + meta := &fileSnapshotMeta{} + dec := json.NewDecoder(buffered) + if err := dec.Decode(meta); err != nil { + return nil, err + } + return meta, nil +} + +// Open takes a snapshot ID and returns a ReadCloser for that snapshot. +func (f *FileSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + // Get the metadata + meta, err := f.readMeta(id) + if err != nil { + f.logger.Error("failed to get meta data to open snapshot", "error", err) + return nil, nil, err + } + + // Open the state file + statePath := filepath.Join(f.path, id, stateFilePath) + fh, err := os.Open(statePath) + if err != nil { + f.logger.Error("failed to open state file", "error", err) + return nil, nil, err + } + + // Create a CRC64 hash + stateHash := crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Compute the hash + _, err = io.Copy(stateHash, fh) + if err != nil { + f.logger.Error("failed to read state file", "error", err) + fh.Close() + return nil, nil, err + } + + // Verify the hash + computed := stateHash.Sum(nil) + if bytes.Compare(meta.CRC, computed) != 0 { + f.logger.Error("CRC checksum failed", "stored", meta.CRC, "computed", computed) + fh.Close() + return nil, nil, fmt.Errorf("CRC mismatch") + } + + // Seek to the start + if _, err := fh.Seek(0, 0); err != nil { + f.logger.Error("state file seek failed", "error", err) + fh.Close() + return nil, nil, err + } + + // Return a buffered file + buffered := &bufferedFile{ + bh: bufio.NewReader(fh), + fh: fh, + } + + return &meta.SnapshotMeta, buffered, nil +} + +// ReapSnapshots reaps any snapshots beyond the retain count. +func (f *FileSnapshotStore) ReapSnapshots() error { + snapshots, err := f.getSnapshots() + if err != nil { + f.logger.Error("failed to get snapshots", "error", err) + return err + } + + for i := f.retain; i < len(snapshots); i++ { + path := filepath.Join(f.path, snapshots[i].ID) + f.logger.Info("reaping snapshot", "path", path) + if err := os.RemoveAll(path); err != nil { + f.logger.Error("failed to reap snapshot", "path", path, "error", err) + return err + } + } + return nil +} + +// ID returns the ID of the snapshot, can be used with Open() +// after the snapshot is finalized. +func (s *FileSnapshotSink) ID() string { + return s.meta.ID +} + +// Write is used to append to the state file. We write to the +// buffered IO object to reduce the amount of context switches. +func (s *FileSnapshotSink) Write(b []byte) (int, error) { + return s.buffered.Write(b) +} + +// Close is used to indicate a successful end. +func (s *FileSnapshotSink) Close() error { + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + // Close the open handles + if err := s.finalize(); err != nil { + s.logger.Error("failed to finalize snapshot", "error", err) + if delErr := os.RemoveAll(s.dir); delErr != nil { + s.logger.Error("failed to delete temporary snapshot directory", "path", s.dir, "error", delErr) + return delErr + } + return err + } + + // Write out the meta data + if err := s.writeMeta(); err != nil { + s.logger.Error("failed to write metadata", "error", err) + return err + } + + // Move the directory into place + newPath := strings.TrimSuffix(s.dir, tmpSuffix) + if err := os.Rename(s.dir, newPath); err != nil { + s.logger.Error("failed to move snapshot into place", "error", err) + return err + } + + if !s.noSync && runtime.GOOS != "windows" { // skipping fsync for directory entry edits on Windows, only needed for *nix style file systems + parentFH, err := os.Open(s.parentDir) + if err != nil { + s.logger.Error("failed to open snapshot parent directory", "path", s.parentDir, "error", err) + return err + } + defer parentFH.Close() + + if err = parentFH.Sync(); err != nil { + s.logger.Error("failed syncing parent directory", "path", s.parentDir, "error", err) + return err + } + } + + // Reap any old snapshots + if err := s.store.ReapSnapshots(); err != nil { + return err + } + + return nil +} + +// Cancel is used to indicate an unsuccessful end. +func (s *FileSnapshotSink) Cancel() error { + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + // Close the open handles + if err := s.finalize(); err != nil { + s.logger.Error("failed to finalize snapshot", "error", err) + return err + } + + // Attempt to remove all artifacts + return os.RemoveAll(s.dir) +} + +// finalize is used to close all of our resources. +func (s *FileSnapshotSink) finalize() error { + // Flush any remaining data + if err := s.buffered.Flush(); err != nil { + return err + } + + // Sync to force fsync to disk + if !s.noSync { + if err := s.stateFile.Sync(); err != nil { + return err + } + } + + // Get the file size + stat, statErr := s.stateFile.Stat() + + // Close the file + if err := s.stateFile.Close(); err != nil { + return err + } + + // Set the file size, check after we close + if statErr != nil { + return statErr + } + s.meta.Size = stat.Size() + + // Set the CRC + s.meta.CRC = s.stateHash.Sum(nil) + return nil +} + +// writeMeta is used to write out the metadata we have. +func (s *FileSnapshotSink) writeMeta() error { + var err error + // Open the meta file + metaPath := filepath.Join(s.dir, metaFilePath) + var fh *os.File + fh, err = os.Create(metaPath) + if err != nil { + return err + } + defer fh.Close() + + // Buffer the file IO + buffered := bufio.NewWriter(fh) + + // Write out as JSON + enc := json.NewEncoder(buffered) + if err = enc.Encode(&s.meta); err != nil { + return err + } + + if err = buffered.Flush(); err != nil { + return err + } + + if !s.noSync { + if err = fh.Sync(); err != nil { + return err + } + } + + return nil +} + +// Implement the sort interface for []*fileSnapshotMeta. +func (s snapMetaSlice) Len() int { + return len(s) +} + +func (s snapMetaSlice) Less(i, j int) bool { + if s[i].Term != s[j].Term { + return s[i].Term < s[j].Term + } + if s[i].Index != s[j].Index { + return s[i].Index < s[j].Index + } + return s[i].ID < s[j].ID +} + +func (s snapMetaSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/hashicorp/raft/fsm.go b/vendor/github.com/hashicorp/raft/fsm.go new file mode 100644 index 0000000000000..a84bde345332f --- /dev/null +++ b/vendor/github.com/hashicorp/raft/fsm.go @@ -0,0 +1,279 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "fmt" + "io" + "time" + + "github.com/armon/go-metrics" + hclog "github.com/hashicorp/go-hclog" +) + +// FSM is implemented by clients to make use of the replicated log. +type FSM interface { + // Apply is called once a log entry is committed by a majority of the cluster. + // + // Apply should apply the log to the FSM. Apply must be deterministic and + // produce the same result on all peers in the cluster. + // + // The returned value is returned to the client as the ApplyFuture.Response. + Apply(*Log) interface{} + + // Snapshot returns an FSMSnapshot used to: support log compaction, to + // restore the FSM to a previous state, or to bring out-of-date followers up + // to a recent log index. + // + // The Snapshot implementation should return quickly, because Apply can not + // be called while Snapshot is running. Generally this means Snapshot should + // only capture a pointer to the state, and any expensive IO should happen + // as part of FSMSnapshot.Persist. + // + // Apply and Snapshot are always called from the same thread, but Apply will + // be called concurrently with FSMSnapshot.Persist. This means the FSM should + // be implemented to allow for concurrent updates while a snapshot is happening. + Snapshot() (FSMSnapshot, error) + + // Restore is used to restore an FSM from a snapshot. It is not called + // concurrently with any other command. The FSM must discard all previous + // state before restoring the snapshot. + Restore(snapshot io.ReadCloser) error +} + +// BatchingFSM extends the FSM interface to add an ApplyBatch function. This can +// optionally be implemented by clients to enable multiple logs to be applied to +// the FSM in batches. Up to MaxAppendEntries could be sent in a batch. +type BatchingFSM interface { + // ApplyBatch is invoked once a batch of log entries has been committed and + // are ready to be applied to the FSM. ApplyBatch will take in an array of + // log entries. These log entries will be in the order they were committed, + // will not have gaps, and could be of a few log types. Clients should check + // the log type prior to attempting to decode the data attached. Presently + // the LogCommand and LogConfiguration types will be sent. + // + // The returned slice must be the same length as the input and each response + // should correlate to the log at the same index of the input. The returned + // values will be made available in the ApplyFuture returned by Raft.Apply + // method if that method was called on the same Raft node as the FSM. + ApplyBatch([]*Log) []interface{} + + FSM +} + +// FSMSnapshot is returned by an FSM in response to a Snapshot +// It must be safe to invoke FSMSnapshot methods with concurrent +// calls to Apply. +type FSMSnapshot interface { + // Persist should dump all necessary state to the WriteCloser 'sink', + // and call sink.Close() when finished or call sink.Cancel() on error. + Persist(sink SnapshotSink) error + + // Release is invoked when we are finished with the snapshot. + Release() +} + +// runFSM is a long running goroutine responsible for applying logs +// to the FSM. This is done async of other logs since we don't want +// the FSM to block our internal operations. +func (r *Raft) runFSM() { + var lastIndex, lastTerm uint64 + + batchingFSM, batchingEnabled := r.fsm.(BatchingFSM) + configStore, configStoreEnabled := r.fsm.(ConfigurationStore) + + applySingle := func(req *commitTuple) { + // Apply the log if a command or config change + var resp interface{} + // Make sure we send a response + defer func() { + // Invoke the future if given + if req.future != nil { + req.future.response = resp + req.future.respond(nil) + } + }() + + switch req.log.Type { + case LogCommand: + start := time.Now() + resp = r.fsm.Apply(req.log) + metrics.MeasureSince([]string{"raft", "fsm", "apply"}, start) + + case LogConfiguration: + if !configStoreEnabled { + // Return early to avoid incrementing the index and term for + // an unimplemented operation. + return + } + + start := time.Now() + configStore.StoreConfiguration(req.log.Index, DecodeConfiguration(req.log.Data)) + metrics.MeasureSince([]string{"raft", "fsm", "store_config"}, start) + } + + // Update the indexes + lastIndex = req.log.Index + lastTerm = req.log.Term + } + + applyBatch := func(reqs []*commitTuple) { + if !batchingEnabled { + for _, ct := range reqs { + applySingle(ct) + } + return + } + + // Only send LogCommand and LogConfiguration log types. LogBarrier types + // will not be sent to the FSM. + shouldSend := func(l *Log) bool { + switch l.Type { + case LogCommand, LogConfiguration: + return true + } + return false + } + + var lastBatchIndex, lastBatchTerm uint64 + sendLogs := make([]*Log, 0, len(reqs)) + for _, req := range reqs { + if shouldSend(req.log) { + sendLogs = append(sendLogs, req.log) + } + lastBatchIndex = req.log.Index + lastBatchTerm = req.log.Term + } + + var responses []interface{} + if len(sendLogs) > 0 { + start := time.Now() + responses = batchingFSM.ApplyBatch(sendLogs) + metrics.MeasureSince([]string{"raft", "fsm", "applyBatch"}, start) + metrics.AddSample([]string{"raft", "fsm", "applyBatchNum"}, float32(len(reqs))) + + // Ensure we get the expected responses + if len(sendLogs) != len(responses) { + panic("invalid number of responses") + } + } + + // Update the indexes + lastIndex = lastBatchIndex + lastTerm = lastBatchTerm + + var i int + for _, req := range reqs { + var resp interface{} + // If the log was sent to the FSM, retrieve the response. + if shouldSend(req.log) { + resp = responses[i] + i++ + } + + if req.future != nil { + req.future.response = resp + req.future.respond(nil) + } + } + } + + restore := func(req *restoreFuture) { + // Open the snapshot + meta, source, err := r.snapshots.Open(req.ID) + if err != nil { + req.respond(fmt.Errorf("failed to open snapshot %v: %v", req.ID, err)) + return + } + defer source.Close() + + snapLogger := r.logger.With( + "id", req.ID, + "last-index", meta.Index, + "last-term", meta.Term, + "size-in-bytes", meta.Size, + ) + + // Attempt to restore + if err := fsmRestoreAndMeasure(snapLogger, r.fsm, source, meta.Size); err != nil { + req.respond(fmt.Errorf("failed to restore snapshot %v: %v", req.ID, err)) + return + } + + // Update the last index and term + lastIndex = meta.Index + lastTerm = meta.Term + req.respond(nil) + } + + snapshot := func(req *reqSnapshotFuture) { + // Is there something to snapshot? + if lastIndex == 0 { + req.respond(ErrNothingNewToSnapshot) + return + } + + // Start a snapshot + start := time.Now() + snap, err := r.fsm.Snapshot() + metrics.MeasureSince([]string{"raft", "fsm", "snapshot"}, start) + + // Respond to the request + req.index = lastIndex + req.term = lastTerm + req.snapshot = snap + req.respond(err) + } + + saturation := newSaturationMetric([]string{"raft", "thread", "fsm", "saturation"}, 1*time.Second) + + for { + saturation.sleeping() + + select { + case ptr := <-r.fsmMutateCh: + saturation.working() + + switch req := ptr.(type) { + case []*commitTuple: + applyBatch(req) + + case *restoreFuture: + restore(req) + + default: + panic(fmt.Errorf("bad type passed to fsmMutateCh: %#v", ptr)) + } + + case req := <-r.fsmSnapshotCh: + saturation.working() + + snapshot(req) + + case <-r.shutdownCh: + return + } + } +} + +// fsmRestoreAndMeasure wraps the Restore call on an FSM to consistently measure +// and report timing metrics. The caller is still responsible for calling Close +// on the source in all cases. +func fsmRestoreAndMeasure(logger hclog.Logger, fsm FSM, source io.ReadCloser, snapshotSize int64) error { + start := time.Now() + + crc := newCountingReadCloser(source) + + monitor := startSnapshotRestoreMonitor(logger, crc, snapshotSize, false) + defer monitor.StopAndWait() + + if err := fsm.Restore(crc); err != nil { + return err + } + metrics.MeasureSince([]string{"raft", "fsm", "restore"}, start) + metrics.SetGauge([]string{"raft", "fsm", "lastRestoreDuration"}, + float32(time.Since(start).Milliseconds())) + + return nil +} diff --git a/vendor/github.com/hashicorp/raft/future.go b/vendor/github.com/hashicorp/raft/future.go new file mode 100644 index 0000000000000..303da448742b4 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/future.go @@ -0,0 +1,314 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "fmt" + "io" + "sync" + "time" +) + +// Future is used to represent an action that may occur in the future. +type Future interface { + // Error blocks until the future arrives and then returns the error status + // of the future. This may be called any number of times - all calls will + // return the same value, however is not OK to call this method twice + // concurrently on the same Future instance. + // Error will only return generic errors related to raft, such + // as ErrLeadershipLost, or ErrRaftShutdown. Some operations, such as + // ApplyLog, may also return errors from other methods. + Error() error +} + +// IndexFuture is used for future actions that can result in a raft log entry +// being created. +type IndexFuture interface { + Future + + // Index holds the index of the newly applied log entry. + // This must not be called until after the Error method has returned. + Index() uint64 +} + +// ApplyFuture is used for Apply and can return the FSM response. +type ApplyFuture interface { + IndexFuture + + // Response returns the FSM response as returned by the FSM.Apply method. This + // must not be called until after the Error method has returned. + // Note that if FSM.Apply returns an error, it will be returned by Response, + // and not by the Error method, so it is always important to check Response + // for errors from the FSM. + Response() interface{} +} + +// ConfigurationFuture is used for GetConfiguration and can return the +// latest configuration in use by Raft. +type ConfigurationFuture interface { + IndexFuture + + // Configuration contains the latest configuration. This must + // not be called until after the Error method has returned. + Configuration() Configuration +} + +// SnapshotFuture is used for waiting on a user-triggered snapshot to complete. +type SnapshotFuture interface { + Future + + // Open is a function you can call to access the underlying snapshot and + // its metadata. This must not be called until after the Error method + // has returned. + Open() (*SnapshotMeta, io.ReadCloser, error) +} + +// LeadershipTransferFuture is used for waiting on a user-triggered leadership +// transfer to complete. +type LeadershipTransferFuture interface { + Future +} + +// errorFuture is used to return a static error. +type errorFuture struct { + err error +} + +func (e errorFuture) Error() error { + return e.err +} + +func (e errorFuture) Response() interface{} { + return nil +} + +func (e errorFuture) Index() uint64 { + return 0 +} + +// deferError can be embedded to allow a future +// to provide an error in the future. +type deferError struct { + err error + errCh chan error + responded bool + ShutdownCh chan struct{} +} + +func (d *deferError) init() { + d.errCh = make(chan error, 1) +} + +func (d *deferError) Error() error { + if d.err != nil { + // Note that when we've received a nil error, this + // won't trigger, but the channel is closed after + // send so we'll still return nil below. + return d.err + } + if d.errCh == nil { + panic("waiting for response on nil channel") + } + select { + case d.err = <-d.errCh: + case <-d.ShutdownCh: + d.err = ErrRaftShutdown + } + return d.err +} + +func (d *deferError) respond(err error) { + if d.errCh == nil { + return + } + if d.responded { + return + } + d.errCh <- err + close(d.errCh) + d.responded = true +} + +// There are several types of requests that cause a configuration entry to +// be appended to the log. These are encoded here for leaderLoop() to process. +// This is internal to a single server. +type configurationChangeFuture struct { + logFuture + req configurationChangeRequest +} + +// bootstrapFuture is used to attempt a live bootstrap of the cluster. See the +// Raft object's BootstrapCluster member function for more details. +type bootstrapFuture struct { + deferError + + // configuration is the proposed bootstrap configuration to apply. + configuration Configuration +} + +// logFuture is used to apply a log entry and waits until +// the log is considered committed. +type logFuture struct { + deferError + log Log + response interface{} + dispatch time.Time +} + +func (l *logFuture) Response() interface{} { + return l.response +} + +func (l *logFuture) Index() uint64 { + return l.log.Index +} + +type shutdownFuture struct { + raft *Raft +} + +func (s *shutdownFuture) Error() error { + if s.raft == nil { + return nil + } + s.raft.waitShutdown() + if closeable, ok := s.raft.trans.(WithClose); ok { + closeable.Close() + } + return nil +} + +// userSnapshotFuture is used for waiting on a user-triggered snapshot to +// complete. +type userSnapshotFuture struct { + deferError + + // opener is a function used to open the snapshot. This is filled in + // once the future returns with no error. + opener func() (*SnapshotMeta, io.ReadCloser, error) +} + +// Open is a function you can call to access the underlying snapshot and its +// metadata. +func (u *userSnapshotFuture) Open() (*SnapshotMeta, io.ReadCloser, error) { + if u.opener == nil { + return nil, nil, fmt.Errorf("no snapshot available") + } + // Invalidate the opener so it can't get called multiple times, + // which isn't generally safe. + defer func() { + u.opener = nil + }() + return u.opener() +} + +// userRestoreFuture is used for waiting on a user-triggered restore of an +// external snapshot to complete. +type userRestoreFuture struct { + deferError + + // meta is the metadata that belongs with the snapshot. + meta *SnapshotMeta + + // reader is the interface to read the snapshot contents from. + reader io.Reader +} + +// reqSnapshotFuture is used for requesting a snapshot start. +// It is only used internally. +type reqSnapshotFuture struct { + deferError + + // snapshot details provided by the FSM runner before responding + index uint64 + term uint64 + snapshot FSMSnapshot +} + +// restoreFuture is used for requesting an FSM to perform a +// snapshot restore. Used internally only. +type restoreFuture struct { + deferError + ID string +} + +// verifyFuture is used to verify the current node is still +// the leader. This is to prevent a stale read. +type verifyFuture struct { + deferError + notifyCh chan *verifyFuture + quorumSize int + votes int + voteLock sync.Mutex +} + +// leadershipTransferFuture is used to track the progress of a leadership +// transfer internally. +type leadershipTransferFuture struct { + deferError + + ID *ServerID + Address *ServerAddress +} + +// configurationsFuture is used to retrieve the current configurations. This is +// used to allow safe access to this information outside of the main thread. +type configurationsFuture struct { + deferError + configurations configurations +} + +// Configuration returns the latest configuration in use by Raft. +func (c *configurationsFuture) Configuration() Configuration { + return c.configurations.latest +} + +// Index returns the index of the latest configuration in use by Raft. +func (c *configurationsFuture) Index() uint64 { + return c.configurations.latestIndex +} + +// vote is used to respond to a verifyFuture. +// This may block when responding on the notifyCh. +func (v *verifyFuture) vote(leader bool) { + v.voteLock.Lock() + defer v.voteLock.Unlock() + + // Guard against having notified already + if v.notifyCh == nil { + return + } + + if leader { + v.votes++ + if v.votes >= v.quorumSize { + v.notifyCh <- v + v.notifyCh = nil + } + } else { + v.notifyCh <- v + v.notifyCh = nil + } +} + +// appendFuture is used for waiting on a pipelined append +// entries RPC. +type appendFuture struct { + deferError + start time.Time + args *AppendEntriesRequest + resp *AppendEntriesResponse +} + +func (a *appendFuture) Start() time.Time { + return a.start +} + +func (a *appendFuture) Request() *AppendEntriesRequest { + return a.args +} + +func (a *appendFuture) Response() *AppendEntriesResponse { + return a.resp +} diff --git a/vendor/github.com/hashicorp/raft/inmem_snapshot.go b/vendor/github.com/hashicorp/raft/inmem_snapshot.go new file mode 100644 index 0000000000000..d23bc20999036 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/inmem_snapshot.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "bytes" + "fmt" + "io" + "sync" +) + +// InmemSnapshotStore implements the SnapshotStore interface and +// retains only the most recent snapshot +type InmemSnapshotStore struct { + latest *InmemSnapshotSink + hasSnapshot bool + sync.RWMutex +} + +// InmemSnapshotSink implements SnapshotSink in memory +type InmemSnapshotSink struct { + meta SnapshotMeta + contents *bytes.Buffer +} + +// NewInmemSnapshotStore creates a blank new InmemSnapshotStore +func NewInmemSnapshotStore() *InmemSnapshotStore { + return &InmemSnapshotStore{ + latest: &InmemSnapshotSink{ + contents: &bytes.Buffer{}, + }, + } +} + +// Create replaces the stored snapshot with a new one using the given args +func (m *InmemSnapshotStore) Create(version SnapshotVersion, index, term uint64, + configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) { + // We only support version 1 snapshots at this time. + if version != 1 { + return nil, fmt.Errorf("unsupported snapshot version %d", version) + } + + name := snapshotName(term, index) + + m.Lock() + defer m.Unlock() + + sink := &InmemSnapshotSink{ + meta: SnapshotMeta{ + Version: version, + ID: name, + Index: index, + Term: term, + Peers: encodePeers(configuration, trans), + Configuration: configuration, + ConfigurationIndex: configurationIndex, + }, + contents: &bytes.Buffer{}, + } + m.hasSnapshot = true + m.latest = sink + + return sink, nil +} + +// List returns the latest snapshot taken +func (m *InmemSnapshotStore) List() ([]*SnapshotMeta, error) { + m.RLock() + defer m.RUnlock() + + if !m.hasSnapshot { + return []*SnapshotMeta{}, nil + } + return []*SnapshotMeta{&m.latest.meta}, nil +} + +// Open wraps an io.ReadCloser around the snapshot contents +func (m *InmemSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + m.RLock() + defer m.RUnlock() + + if m.latest.meta.ID != id { + return nil, nil, fmt.Errorf("[ERR] snapshot: failed to open snapshot id: %s", id) + } + + // Make a copy of the contents, since a bytes.Buffer can only be read + // once. + contents := bytes.NewBuffer(m.latest.contents.Bytes()) + return &m.latest.meta, io.NopCloser(contents), nil +} + +// Write appends the given bytes to the snapshot contents +func (s *InmemSnapshotSink) Write(p []byte) (n int, err error) { + written, err := s.contents.Write(p) + s.meta.Size += int64(written) + return written, err +} + +// Close updates the Size and is otherwise a no-op +func (s *InmemSnapshotSink) Close() error { + return nil +} + +// ID returns the ID of the SnapshotMeta +func (s *InmemSnapshotSink) ID() string { + return s.meta.ID +} + +// Cancel returns successfully with a nil error +func (s *InmemSnapshotSink) Cancel() error { + return nil +} diff --git a/vendor/github.com/hashicorp/raft/inmem_store.go b/vendor/github.com/hashicorp/raft/inmem_store.go new file mode 100644 index 0000000000000..730d03f288e3b --- /dev/null +++ b/vendor/github.com/hashicorp/raft/inmem_store.go @@ -0,0 +1,133 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "errors" + "sync" +) + +// InmemStore implements the LogStore and StableStore interface. +// It should NOT EVER be used for production. It is used only for +// unit tests. Use the MDBStore implementation instead. +type InmemStore struct { + l sync.RWMutex + lowIndex uint64 + highIndex uint64 + logs map[uint64]*Log + kv map[string][]byte + kvInt map[string]uint64 +} + +// NewInmemStore returns a new in-memory backend. Do not ever +// use for production. Only for testing. +func NewInmemStore() *InmemStore { + i := &InmemStore{ + logs: make(map[uint64]*Log), + kv: make(map[string][]byte), + kvInt: make(map[string]uint64), + } + return i +} + +// FirstIndex implements the LogStore interface. +func (i *InmemStore) FirstIndex() (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.lowIndex, nil +} + +// LastIndex implements the LogStore interface. +func (i *InmemStore) LastIndex() (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.highIndex, nil +} + +// GetLog implements the LogStore interface. +func (i *InmemStore) GetLog(index uint64, log *Log) error { + i.l.RLock() + defer i.l.RUnlock() + l, ok := i.logs[index] + if !ok { + return ErrLogNotFound + } + *log = *l + return nil +} + +// StoreLog implements the LogStore interface. +func (i *InmemStore) StoreLog(log *Log) error { + return i.StoreLogs([]*Log{log}) +} + +// StoreLogs implements the LogStore interface. +func (i *InmemStore) StoreLogs(logs []*Log) error { + i.l.Lock() + defer i.l.Unlock() + for _, l := range logs { + i.logs[l.Index] = l + if i.lowIndex == 0 { + i.lowIndex = l.Index + } + if l.Index > i.highIndex { + i.highIndex = l.Index + } + } + return nil +} + +// DeleteRange implements the LogStore interface. +func (i *InmemStore) DeleteRange(min, max uint64) error { + i.l.Lock() + defer i.l.Unlock() + for j := min; j <= max; j++ { + delete(i.logs, j) + } + if min <= i.lowIndex { + i.lowIndex = max + 1 + } + if max >= i.highIndex { + i.highIndex = min - 1 + } + if i.lowIndex > i.highIndex { + i.lowIndex = 0 + i.highIndex = 0 + } + return nil +} + +// Set implements the StableStore interface. +func (i *InmemStore) Set(key []byte, val []byte) error { + i.l.Lock() + defer i.l.Unlock() + i.kv[string(key)] = val + return nil +} + +// Get implements the StableStore interface. +func (i *InmemStore) Get(key []byte) ([]byte, error) { + i.l.RLock() + defer i.l.RUnlock() + val := i.kv[string(key)] + if val == nil { + return nil, errors.New("not found") + } + return val, nil +} + +// SetUint64 implements the StableStore interface. +func (i *InmemStore) SetUint64(key []byte, val uint64) error { + i.l.Lock() + defer i.l.Unlock() + i.kvInt[string(key)] = val + return nil +} + +// GetUint64 implements the StableStore interface. +func (i *InmemStore) GetUint64(key []byte) (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.kvInt[string(key)], nil +} diff --git a/vendor/github.com/hashicorp/raft/inmem_transport.go b/vendor/github.com/hashicorp/raft/inmem_transport.go new file mode 100644 index 0000000000000..561ba73d70a3e --- /dev/null +++ b/vendor/github.com/hashicorp/raft/inmem_transport.go @@ -0,0 +1,374 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "fmt" + "io" + "sync" + "time" +) + +// NewInmemAddr returns a new in-memory addr with +// a randomly generate UUID as the ID. +func NewInmemAddr() ServerAddress { + return ServerAddress(generateUUID()) +} + +// inmemPipeline is used to pipeline requests for the in-mem transport. +type inmemPipeline struct { + trans *InmemTransport + peer *InmemTransport + peerAddr ServerAddress + + doneCh chan AppendFuture + inprogressCh chan *inmemPipelineInflight + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.RWMutex +} + +type inmemPipelineInflight struct { + future *appendFuture + respCh <-chan RPCResponse +} + +// InmemTransport Implements the Transport interface, to allow Raft to be +// tested in-memory without going over a network. +type InmemTransport struct { + sync.RWMutex + consumerCh chan RPC + localAddr ServerAddress + peers map[ServerAddress]*InmemTransport + pipelines []*inmemPipeline + timeout time.Duration +} + +// NewInmemTransportWithTimeout is used to initialize a new transport and +// generates a random local address if none is specified. The given timeout +// will be used to decide how long to wait for a connected peer to process the +// RPCs that we're sending it. See also Connect() and Consumer(). +func NewInmemTransportWithTimeout(addr ServerAddress, timeout time.Duration) (ServerAddress, *InmemTransport) { + if string(addr) == "" { + addr = NewInmemAddr() + } + trans := &InmemTransport{ + consumerCh: make(chan RPC, 16), + localAddr: addr, + peers: make(map[ServerAddress]*InmemTransport), + timeout: timeout, + } + return addr, trans +} + +// NewInmemTransport is used to initialize a new transport +// and generates a random local address if none is specified +func NewInmemTransport(addr ServerAddress) (ServerAddress, *InmemTransport) { + return NewInmemTransportWithTimeout(addr, 500*time.Millisecond) +} + +// SetHeartbeatHandler is used to set optional fast-path for +// heartbeats, not supported for this transport. +func (i *InmemTransport) SetHeartbeatHandler(cb func(RPC)) { +} + +// Consumer implements the Transport interface. +func (i *InmemTransport) Consumer() <-chan RPC { + return i.consumerCh +} + +// LocalAddr implements the Transport interface. +func (i *InmemTransport) LocalAddr() ServerAddress { + return i.localAddr +} + +// AppendEntriesPipeline returns an interface that can be used to pipeline +// AppendEntries requests. +func (i *InmemTransport) AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) { + i.Lock() + defer i.Unlock() + + peer, ok := i.peers[target] + if !ok { + return nil, fmt.Errorf("failed to connect to peer: %v", target) + } + pipeline := newInmemPipeline(i, peer, target) + i.pipelines = append(i.pipelines, pipeline) + return pipeline, nil +} + +// AppendEntries implements the Transport interface. +func (i *InmemTransport) AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*AppendEntriesResponse) + *resp = *out + return nil +} + +// RequestVote implements the Transport interface. +func (i *InmemTransport) RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*RequestVoteResponse) + *resp = *out + return nil +} + +func (i *InmemTransport) RequestPreVote(id ServerID, target ServerAddress, args *RequestPreVoteRequest, resp *RequestPreVoteResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*RequestPreVoteResponse) + *resp = *out + return nil +} + +// InstallSnapshot implements the Transport interface. +func (i *InmemTransport) InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { + rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*InstallSnapshotResponse) + *resp = *out + return nil +} + +// TimeoutNow implements the Transport interface. +func (i *InmemTransport) TimeoutNow(id ServerID, target ServerAddress, args *TimeoutNowRequest, resp *TimeoutNowResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, 10*i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*TimeoutNowResponse) + *resp = *out + return nil +} + +func (i *InmemTransport) makeRPC(target ServerAddress, args interface{}, r io.Reader, timeout time.Duration) (rpcResp RPCResponse, err error) { + i.RLock() + peer, ok := i.peers[target] + i.RUnlock() + + if !ok { + err = fmt.Errorf("failed to connect to peer: %v", target) + return + } + + // Send the RPC over + respCh := make(chan RPCResponse, 1) + req := RPC{ + Command: args, + Reader: r, + RespChan: respCh, + } + select { + case peer.consumerCh <- req: + case <-time.After(timeout): + err = fmt.Errorf("send timed out") + return + } + + // Wait for a response + select { + case rpcResp = <-respCh: + if rpcResp.Error != nil { + err = rpcResp.Error + } + case <-time.After(timeout): + err = fmt.Errorf("command timed out") + } + return +} + +// EncodePeer implements the Transport interface. +func (i *InmemTransport) EncodePeer(id ServerID, p ServerAddress) []byte { + return []byte(p) +} + +// DecodePeer implements the Transport interface. +func (i *InmemTransport) DecodePeer(buf []byte) ServerAddress { + return ServerAddress(buf) +} + +// Connect is used to connect this transport to another transport for +// a given peer name. This allows for local routing. +func (i *InmemTransport) Connect(peer ServerAddress, t Transport) { + trans := t.(*InmemTransport) + i.Lock() + defer i.Unlock() + i.peers[peer] = trans +} + +// Disconnect is used to remove the ability to route to a given peer. +func (i *InmemTransport) Disconnect(peer ServerAddress) { + i.Lock() + defer i.Unlock() + delete(i.peers, peer) + + // Disconnect any pipelines + n := len(i.pipelines) + for idx := 0; idx < n; idx++ { + if i.pipelines[idx].peerAddr == peer { + i.pipelines[idx].Close() + i.pipelines[idx], i.pipelines[n-1] = i.pipelines[n-1], nil + idx-- + n-- + } + } + i.pipelines = i.pipelines[:n] +} + +// DisconnectAll is used to remove all routes to peers. +func (i *InmemTransport) DisconnectAll() { + i.Lock() + defer i.Unlock() + i.peers = make(map[ServerAddress]*InmemTransport) + + // Handle pipelines + for _, pipeline := range i.pipelines { + pipeline.Close() + } + i.pipelines = nil +} + +// Close is used to permanently disable the transport +func (i *InmemTransport) Close() error { + i.DisconnectAll() + return nil +} + +func newInmemPipeline(trans *InmemTransport, peer *InmemTransport, addr ServerAddress) *inmemPipeline { + i := &inmemPipeline{ + trans: trans, + peer: peer, + peerAddr: addr, + doneCh: make(chan AppendFuture, 16), + inprogressCh: make(chan *inmemPipelineInflight, 16), + shutdownCh: make(chan struct{}), + } + go i.decodeResponses() + return i +} + +func (i *inmemPipeline) decodeResponses() { + timeout := i.trans.timeout + for { + select { + case inp := <-i.inprogressCh: + var timeoutCh <-chan time.Time + if timeout > 0 { + timeoutCh = time.After(timeout) + } + + select { + case rpcResp := <-inp.respCh: + // Copy the result back + *inp.future.resp = *rpcResp.Response.(*AppendEntriesResponse) + inp.future.respond(rpcResp.Error) + + select { + case i.doneCh <- inp.future: + case <-i.shutdownCh: + return + } + + case <-timeoutCh: + inp.future.respond(fmt.Errorf("command timed out")) + select { + case i.doneCh <- inp.future: + case <-i.shutdownCh: + return + } + + case <-i.shutdownCh: + return + } + case <-i.shutdownCh: + return + } + } +} + +func (i *inmemPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { + // Create a new future + future := &appendFuture{ + start: time.Now(), + args: args, + resp: resp, + } + future.init() + + // Handle a timeout + var timeout <-chan time.Time + if i.trans.timeout > 0 { + timeout = time.After(i.trans.timeout) + } + + // Send the RPC over + respCh := make(chan RPCResponse, 1) + rpc := RPC{ + Command: args, + RespChan: respCh, + } + + // Check if we have been already shutdown, otherwise the random choose + // made by select statement below might pick consumerCh even if + // shutdownCh was closed. + i.shutdownLock.RLock() + shutdown := i.shutdown + i.shutdownLock.RUnlock() + if shutdown { + return nil, ErrPipelineShutdown + } + + select { + case i.peer.consumerCh <- rpc: + case <-timeout: + return nil, fmt.Errorf("command enqueue timeout") + case <-i.shutdownCh: + return nil, ErrPipelineShutdown + } + + // Send to be decoded + select { + case i.inprogressCh <- &inmemPipelineInflight{future, respCh}: + return future, nil + case <-i.shutdownCh: + return nil, ErrPipelineShutdown + } +} + +func (i *inmemPipeline) Consumer() <-chan AppendFuture { + return i.doneCh +} + +func (i *inmemPipeline) Close() error { + i.shutdownLock.Lock() + defer i.shutdownLock.Unlock() + if i.shutdown { + return nil + } + + i.shutdown = true + close(i.shutdownCh) + return nil +} diff --git a/vendor/github.com/hashicorp/raft/log.go b/vendor/github.com/hashicorp/raft/log.go new file mode 100644 index 0000000000000..4ae219327d918 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/log.go @@ -0,0 +1,192 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "fmt" + "time" + + metrics "github.com/armon/go-metrics" +) + +// LogType describes various types of log entries. +type LogType uint8 + +const ( + // LogCommand is applied to a user FSM. + LogCommand LogType = iota + + // LogNoop is used to assert leadership. + LogNoop + + // LogAddPeerDeprecated is used to add a new peer. This should only be used with + // older protocol versions designed to be compatible with unversioned + // Raft servers. See comments in config.go for details. + LogAddPeerDeprecated + + // LogRemovePeerDeprecated is used to remove an existing peer. This should only be + // used with older protocol versions designed to be compatible with + // unversioned Raft servers. See comments in config.go for details. + LogRemovePeerDeprecated + + // LogBarrier is used to ensure all preceding operations have been + // applied to the FSM. It is similar to LogNoop, but instead of returning + // once committed, it only returns once the FSM manager acks it. Otherwise, + // it is possible there are operations committed but not yet applied to + // the FSM. + LogBarrier + + // LogConfiguration establishes a membership change configuration. It is + // created when a server is added, removed, promoted, etc. Only used + // when protocol version 1 or greater is in use. + LogConfiguration +) + +// String returns LogType as a human readable string. +func (lt LogType) String() string { + switch lt { + case LogCommand: + return "LogCommand" + case LogNoop: + return "LogNoop" + case LogAddPeerDeprecated: + return "LogAddPeerDeprecated" + case LogRemovePeerDeprecated: + return "LogRemovePeerDeprecated" + case LogBarrier: + return "LogBarrier" + case LogConfiguration: + return "LogConfiguration" + default: + return fmt.Sprintf("%d", lt) + } +} + +// Log entries are replicated to all members of the Raft cluster +// and form the heart of the replicated state machine. +type Log struct { + // Index holds the index of the log entry. + Index uint64 + + // Term holds the election term of the log entry. + Term uint64 + + // Type holds the type of the log entry. + Type LogType + + // Data holds the log entry's type-specific data. + Data []byte + + // Extensions holds an opaque byte slice of information for middleware. It + // is up to the client of the library to properly modify this as it adds + // layers and remove those layers when appropriate. This value is a part of + // the log, so very large values could cause timing issues. + // + // N.B. It is _up to the client_ to handle upgrade paths. For instance if + // using this with go-raftchunking, the client should ensure that all Raft + // peers are using a version that can handle that extension before ever + // actually triggering chunking behavior. It is sometimes sufficient to + // ensure that non-leaders are upgraded first, then the current leader is + // upgraded, but a leader changeover during this process could lead to + // trouble, so gating extension behavior via some flag in the client + // program is also a good idea. + Extensions []byte + + // AppendedAt stores the time the leader first appended this log to it's + // LogStore. Followers will observe the leader's time. It is not used for + // coordination or as part of the replication protocol at all. It exists only + // to provide operational information for example how many seconds worth of + // logs are present on the leader which might impact follower's ability to + // catch up after restoring a large snapshot. We should never rely on this + // being in the past when appending on a follower or reading a log back since + // the clock skew can mean a follower could see a log with a future timestamp. + // In general too the leader is not required to persist the log before + // delivering to followers although the current implementation happens to do + // this. + AppendedAt time.Time +} + +// LogStore is used to provide an interface for storing +// and retrieving logs in a durable fashion. +type LogStore interface { + // FirstIndex returns the first index written. 0 for no entries. + FirstIndex() (uint64, error) + + // LastIndex returns the last index written. 0 for no entries. + LastIndex() (uint64, error) + + // GetLog gets a log entry at a given index. + GetLog(index uint64, log *Log) error + + // StoreLog stores a log entry. + StoreLog(log *Log) error + + // StoreLogs stores multiple log entries. By default the logs stored may not be contiguous with previous logs (i.e. may have a gap in Index since the last log written). If an implementation can't tolerate this it may optionally implement `MonotonicLogStore` to indicate that this is not allowed. This changes Raft's behaviour after restoring a user snapshot to remove all previous logs instead of relying on a "gap" to signal the discontinuity between logs before the snapshot and logs after. + StoreLogs(logs []*Log) error + + // DeleteRange deletes a range of log entries. The range is inclusive. + DeleteRange(min, max uint64) error +} + +// MonotonicLogStore is an optional interface for LogStore implementations that +// cannot tolerate gaps in between the Index values of consecutive log entries. For example, +// this may allow more efficient indexing because the Index values are densely populated. If true is +// returned, Raft will avoid relying on gaps to trigger re-synching logs on followers after a +// snapshot is restored. The LogStore must have an efficient implementation of +// DeleteLogs for the case where all logs are removed, as this must be called after snapshot restore when gaps are not allowed. +// We avoid deleting all records for LogStores that do not implement MonotonicLogStore +// because although it's always correct to do so, it has a major negative performance impact on the BoltDB store that is currently +// the most widely used. +type MonotonicLogStore interface { + IsMonotonic() bool +} + +func oldestLog(s LogStore) (Log, error) { + var l Log + + // We might get unlucky and have a truncate right between getting first log + // index and fetching it so keep trying until we succeed or hard fail. + var lastFailIdx uint64 + var lastErr error + for { + firstIdx, err := s.FirstIndex() + if err != nil { + return l, err + } + if firstIdx == 0 { + return l, ErrLogNotFound + } + if firstIdx == lastFailIdx { + // Got same index as last time around which errored, don't bother trying + // to fetch it again just return the error. + return l, lastErr + } + err = s.GetLog(firstIdx, &l) + if err == nil { + // We found the oldest log, break the loop + break + } + // We failed, keep trying to see if there is a new firstIndex + lastFailIdx = firstIdx + lastErr = err + } + return l, nil +} + +func emitLogStoreMetrics(s LogStore, prefix []string, interval time.Duration, stopCh <-chan struct{}) { + for { + select { + case <-time.After(interval): + // In error case emit 0 as the age + ageMs := float32(0.0) + l, err := oldestLog(s) + if err == nil && !l.AppendedAt.IsZero() { + ageMs = float32(time.Since(l.AppendedAt).Milliseconds()) + } + metrics.SetGauge(append(prefix, "oldestLogAge"), ageMs) + case <-stopCh: + return + } + } +} diff --git a/vendor/github.com/hashicorp/raft/log_cache.go b/vendor/github.com/hashicorp/raft/log_cache.go new file mode 100644 index 0000000000000..2cc3885aa616c --- /dev/null +++ b/vendor/github.com/hashicorp/raft/log_cache.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "fmt" + "sync" +) + +// LogCache wraps any LogStore implementation to provide an +// in-memory ring buffer. This is used to cache access to +// the recently written entries. For implementations that do not +// cache themselves, this can provide a substantial boost by +// avoiding disk I/O on recent entries. +type LogCache struct { + store LogStore + + cache []*Log + l sync.RWMutex +} + +// NewLogCache is used to create a new LogCache with the +// given capacity and backend store. +func NewLogCache(capacity int, store LogStore) (*LogCache, error) { + if capacity <= 0 { + return nil, fmt.Errorf("capacity must be positive") + } + c := &LogCache{ + store: store, + cache: make([]*Log, capacity), + } + return c, nil +} + +// IsMonotonic implements the MonotonicLogStore interface. This is a shim to +// expose the underyling store as monotonically indexed or not. +func (c *LogCache) IsMonotonic() bool { + if store, ok := c.store.(MonotonicLogStore); ok { + return store.IsMonotonic() + } + + return false +} + +func (c *LogCache) GetLog(idx uint64, log *Log) error { + // Check the buffer for an entry + c.l.RLock() + cached := c.cache[idx%uint64(len(c.cache))] + c.l.RUnlock() + + // Check if entry is valid + if cached != nil && cached.Index == idx { + *log = *cached + return nil + } + + // Forward request on cache miss + return c.store.GetLog(idx, log) +} + +func (c *LogCache) StoreLog(log *Log) error { + return c.StoreLogs([]*Log{log}) +} + +func (c *LogCache) StoreLogs(logs []*Log) error { + err := c.store.StoreLogs(logs) + // Insert the logs into the ring buffer, but only on success + if err != nil { + return fmt.Errorf("unable to store logs within log store, err: %q", err) + } + c.l.Lock() + for _, l := range logs { + c.cache[l.Index%uint64(len(c.cache))] = l + } + c.l.Unlock() + return nil +} + +func (c *LogCache) FirstIndex() (uint64, error) { + return c.store.FirstIndex() +} + +func (c *LogCache) LastIndex() (uint64, error) { + return c.store.LastIndex() +} + +func (c *LogCache) DeleteRange(min, max uint64) error { + // Invalidate the cache on deletes + c.l.Lock() + c.cache = make([]*Log, len(c.cache)) + c.l.Unlock() + + return c.store.DeleteRange(min, max) +} diff --git a/vendor/github.com/hashicorp/raft/membership.md b/vendor/github.com/hashicorp/raft/membership.md new file mode 100644 index 0000000000000..df1f83e27f695 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/membership.md @@ -0,0 +1,83 @@ +Simon (@superfell) and I (@ongardie) talked through reworking this library's cluster membership changes last Friday. We don't see a way to split this into independent patches, so we're taking the next best approach: submitting the plan here for review, then working on an enormous PR. Your feedback would be appreciated. (@superfell is out this week, however, so don't expect him to respond quickly.) + +These are the main goals: + - Bringing things in line with the description in my PhD dissertation; + - Catching up new servers prior to granting them a vote, as well as allowing permanent non-voting members; and + - Eliminating the `peers.json` file, to avoid issues of consistency between that and the log/snapshot. + +## Data-centric view + +We propose to re-define a *configuration* as a set of servers, where each server includes an address (as it does today) and a mode that is either: + - *Voter*: a server whose vote is counted in elections and whose match index is used in advancing the leader's commit index. + - *Nonvoter*: a server that receives log entries but is not considered for elections or commitment purposes. + - *Staging*: a server that acts like a nonvoter with one exception: once a staging server receives enough log entries to catch up sufficiently to the leader's log, the leader will invoke a membership change to change the staging server to a voter. + +All changes to the configuration will be done by writing a new configuration to the log. The new configuration will be in affect as soon as it is appended to the log (not when it is committed like a normal state machine command). Note that, per my dissertation, there can be at most one uncommitted configuration at a time (the next configuration may not be created until the prior one has been committed). It's not strictly necessary to follow these same rules for the nonvoter/staging servers, but we think its best to treat all changes uniformly. + +Each server will track two configurations: + 1. its *committed configuration*: the latest configuration in the log/snapshot that has been committed, along with its index. + 2. its *latest configuration*: the latest configuration in the log/snapshot (may be committed or uncommitted), along with its index. + +When there's no membership change happening, these two will be the same. The latest configuration is almost always the one used, except: + - When followers truncate the suffix of their logs, they may need to fall back to the committed configuration. + - When snapshotting, the committed configuration is written, to correspond with the committed log prefix that is being snapshotted. + + +## Application API + +We propose the following operations for clients to manipulate the cluster configuration: + - AddVoter: server becomes staging unless voter, + - AddNonvoter: server becomes nonvoter unless staging or voter, + - DemoteVoter: server becomes nonvoter unless absent, + - RemovePeer: server removed from configuration, + - GetConfiguration: waits for latest config to commit, returns committed config. + +This diagram, of which I'm quite proud, shows the possible transitions: +``` ++-----------------------------------------------------------------------------+ +| | +| Start -> +--------+ | +| ,------<------------| | | +| / | absent | | +| / RemovePeer--> | | <---RemovePeer | +| / | +--------+ \ | +| / | | \ | +| AddNonvoter | AddVoter \ | +| | ,->---' `--<-. | \ | +| v / \ v \ | +| +----------+ +----------+ +----------+ | +| | | ---AddVoter--> | | -log caught up --> | | | +| | nonvoter | | staging | | voter | | +| | | <-DemoteVoter- | | ,- | | | +| +----------+ \ +----------+ / +----------+ | +| \ / | +| `--------------<---------------' | +| | ++-----------------------------------------------------------------------------+ +``` + +While these operations aren't quite symmetric, we think they're a good set to capture +the possible intent of the user. For example, if I want to make sure a server doesn't have a vote, but the server isn't part of the configuration at all, it probably shouldn't be added as a nonvoting server. + +Each of these application-level operations will be interpreted by the leader and, if it has an effect, will cause the leader to write a new configuration entry to its log. Which particular application-level operation caused the log entry to be written need not be part of the log entry. + +## Code implications + +This is a non-exhaustive list, but we came up with a few things: +- Remove the PeerStore: the `peers.json` file introduces the possibility of getting out of sync with the log and snapshot, and it's hard to maintain this atomically as the log changes. It's not clear whether it's meant to track the committed or latest configuration, either. +- Servers will have to search their snapshot and log to find the committed configuration and the latest configuration on startup. +- Bootstrap will no longer use `peers.json` but should initialize the log or snapshot with an application-provided configuration entry. +- Snapshots should store the index of their configuration along with the configuration itself. In my experience with LogCabin, the original log index of the configuration is very useful to include in debug log messages. +- As noted in hashicorp/raft#84, configuration change requests should come in via a separate channel, and one may not proceed until the last has been committed. +- As to deciding when a log is sufficiently caught up, implementing a sophisticated algorithm *is* something that can be done in a separate PR. An easy and decent placeholder is: once the staging server has reached 95% of the leader's commit index, promote it. + +## Feedback + +Again, we're looking for feedback here before we start working on this. Here are some questions to think about: + - Does this seem like where we want things to go? + - Is there anything here that should be left out? + - Is there anything else we're forgetting about? + - Is there a good way to break this up? + - What do we need to worry about in terms of backwards compatibility? + - What implication will this have on current tests? + - What's the best way to test this code, in particular the small changes that will be sprinkled all over the library? diff --git a/vendor/github.com/hashicorp/raft/net_transport.go b/vendor/github.com/hashicorp/raft/net_transport.go new file mode 100644 index 0000000000000..1bac17d66462f --- /dev/null +++ b/vendor/github.com/hashicorp/raft/net_transport.go @@ -0,0 +1,912 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "net" + "os" + "sync" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-msgpack/v2/codec" +) + +const ( + rpcAppendEntries uint8 = iota + rpcRequestVote + rpcInstallSnapshot + rpcTimeoutNow + rpcRequestPreVote + + // DefaultTimeoutScale is the default TimeoutScale in a NetworkTransport. + DefaultTimeoutScale = 256 * 1024 // 256KB + + // DefaultMaxRPCsInFlight is the default value used for pipelining configuration + // if a zero value is passed. See https://github.com/hashicorp/raft/pull/541 + // for rationale. Note, if this is changed we should update the doc comments + // below for NetworkTransportConfig.MaxRPCsInFlight. + DefaultMaxRPCsInFlight = 2 + + // connReceiveBufferSize is the size of the buffer we will use for reading RPC requests into + // on followers + connReceiveBufferSize = 256 * 1024 // 256KB + + // connSendBufferSize is the size of the buffer we will use for sending RPC request data from + // the leader to followers. + connSendBufferSize = 256 * 1024 // 256KB + + // minInFlightForPipelining is a property of our current pipelining + // implementation and must not be changed unless we change the invariants of + // that implementation. Roughly speaking even with a zero-length in-flight + // buffer we still allow 2 requests to be in-flight before we block because we + // only block after sending and the receiving go-routine always unblocks the + // chan right after first send. This is a constant just to provide context + // rather than a magic number in a few places we have to check invariants to + // avoid panics etc. + minInFlightForPipelining = 2 +) + +var ( + // ErrTransportShutdown is returned when operations on a transport are + // invoked after it's been terminated. + ErrTransportShutdown = errors.New("transport shutdown") + + // ErrPipelineShutdown is returned when the pipeline is closed. + ErrPipelineShutdown = errors.New("append pipeline closed") +) + +// NetworkTransport provides a network based transport that can be +// used to communicate with Raft on remote machines. It requires +// an underlying stream layer to provide a stream abstraction, which can +// be simple TCP, TLS, etc. +// +// This transport is very simple and lightweight. Each RPC request is +// framed by sending a byte that indicates the message type, followed +// by the MsgPack encoded request. +// +// The response is an error string followed by the response object, +// both are encoded using MsgPack. +// +// InstallSnapshot is special, in that after the RPC request we stream +// the entire state. That socket is not re-used as the connection state +// is not known if there is an error. +type NetworkTransport struct { + connPool map[ServerAddress][]*netConn + connPoolLock sync.Mutex + + consumeCh chan RPC + + heartbeatFn func(RPC) + heartbeatFnLock sync.Mutex + + logger hclog.Logger + + maxPool int + maxInFlight int + + serverAddressLock sync.RWMutex + serverAddressProvider ServerAddressProvider + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + + stream StreamLayer + + // streamCtx is used to cancel existing connection handlers. + streamCtx context.Context + streamCancel context.CancelFunc + streamCtxLock sync.RWMutex + + timeout time.Duration + TimeoutScale int + + msgpackUseNewTimeFormat bool +} + +// NetworkTransportConfig encapsulates configuration for the network transport layer. +type NetworkTransportConfig struct { + // ServerAddressProvider is used to override the target address when establishing a connection to invoke an RPC + ServerAddressProvider ServerAddressProvider + + Logger hclog.Logger + + // Dialer + Stream StreamLayer + + // MaxPool controls how many connections we will pool + MaxPool int + + // MaxRPCsInFlight controls the pipelining "optimization" when replicating + // entries to followers. + // + // Setting this to 1 explicitly disables pipelining since no overlapping of + // request processing is allowed. If set to 1 the pipelining code path is + // skipped entirely and every request is entirely synchronous. + // + // If zero is set (or left as default), DefaultMaxRPCsInFlight is used which + // is currently 2. A value of 2 overlaps the preparation and sending of the + // next request while waiting for the previous response, but avoids additional + // queuing. + // + // Historically this was internally fixed at (effectively) 130 however + // performance testing has shown that in practice the pipelining optimization + // combines badly with batching and actually has a very large negative impact + // on commit latency when throughput is high, whilst having very little + // benefit on latency or throughput in any other case! See + // [#541](https://github.com/hashicorp/raft/pull/541) for more analysis of the + // performance impacts. + // + // Increasing this beyond 2 is likely to be beneficial only in very + // high-latency network conditions. HashiCorp doesn't recommend using our own + // products this way. + // + // To maintain the behavior from before version 1.4.1 exactly, set this to + // 130. The old internal constant was 128 but was used directly as a channel + // buffer size. Since we send before blocking on the channel and unblock the + // channel as soon as the receiver is done with the earliest outstanding + // request, even an unbuffered channel (buffer=0) allows one request to be + // sent while waiting for the previous one (i.e. 2 inflight). so the old + // buffer actually allowed 130 RPCs to be inflight at once. + MaxRPCsInFlight int + + // Timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply + // the timeout by (SnapshotSize / TimeoutScale). + Timeout time.Duration + + // MsgpackUseNewTimeFormat when set to true, force the underlying msgpack + // codec to use the new format of time.Time when encoding (used in + // go-msgpack v1.1.5 by default). Decoding is not affected, as all + // go-msgpack v2.1.0+ decoders know how to decode both formats. + MsgpackUseNewTimeFormat bool +} + +// ServerAddressProvider is a target address to which we invoke an RPC when establishing a connection +type ServerAddressProvider interface { + ServerAddr(id ServerID) (ServerAddress, error) +} + +// StreamLayer is used with the NetworkTransport to provide +// the low level stream abstraction. +type StreamLayer interface { + net.Listener + + // Dial is used to create a new outgoing connection + Dial(address ServerAddress, timeout time.Duration) (net.Conn, error) +} + +type netConn struct { + target ServerAddress + conn net.Conn + w *bufio.Writer + dec *codec.Decoder + enc *codec.Encoder +} + +func (n *netConn) Release() error { + return n.conn.Close() +} + +type netPipeline struct { + conn *netConn + trans *NetworkTransport + + doneCh chan AppendFuture + inprogressCh chan *appendFuture + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// NewNetworkTransportWithConfig creates a new network transport with the given config struct +func NewNetworkTransportWithConfig( + config *NetworkTransportConfig, +) *NetworkTransport { + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Name: "raft-net", + Output: hclog.DefaultOutput, + Level: hclog.DefaultLevel, + }) + } + maxInFlight := config.MaxRPCsInFlight + if maxInFlight == 0 { + // Default zero value + maxInFlight = DefaultMaxRPCsInFlight + } + trans := &NetworkTransport{ + connPool: make(map[ServerAddress][]*netConn), + consumeCh: make(chan RPC), + logger: config.Logger, + maxPool: config.MaxPool, + maxInFlight: maxInFlight, + shutdownCh: make(chan struct{}), + stream: config.Stream, + timeout: config.Timeout, + TimeoutScale: DefaultTimeoutScale, + serverAddressProvider: config.ServerAddressProvider, + msgpackUseNewTimeFormat: config.MsgpackUseNewTimeFormat, + } + + // Create the connection context and then start our listener. + trans.setupStreamContext() + go trans.listen() + + return trans +} + +// NewNetworkTransport creates a new network transport with the given dialer +// and listener. The maxPool controls how many connections we will pool. The +// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply +// the timeout by (SnapshotSize / TimeoutScale). +func NewNetworkTransport( + stream StreamLayer, + maxPool int, + timeout time.Duration, + logOutput io.Writer, +) *NetworkTransport { + if logOutput == nil { + logOutput = os.Stderr + } + logger := hclog.New(&hclog.LoggerOptions{ + Name: "raft-net", + Output: logOutput, + Level: hclog.DefaultLevel, + }) + config := &NetworkTransportConfig{Stream: stream, MaxPool: maxPool, Timeout: timeout, Logger: logger} + return NewNetworkTransportWithConfig(config) +} + +// NewNetworkTransportWithLogger creates a new network transport with the given logger, dialer +// and listener. The maxPool controls how many connections we will pool. The +// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply +// the timeout by (SnapshotSize / TimeoutScale). +func NewNetworkTransportWithLogger( + stream StreamLayer, + maxPool int, + timeout time.Duration, + logger hclog.Logger, +) *NetworkTransport { + config := &NetworkTransportConfig{Stream: stream, MaxPool: maxPool, Timeout: timeout, Logger: logger} + return NewNetworkTransportWithConfig(config) +} + +// setupStreamContext is used to create a new stream context. This should be +// called with the stream lock held. +func (n *NetworkTransport) setupStreamContext() { + ctx, cancel := context.WithCancel(context.Background()) + n.streamCtx = ctx + n.streamCancel = cancel +} + +// getStreamContext is used retrieve the current stream context. +func (n *NetworkTransport) getStreamContext() context.Context { + n.streamCtxLock.RLock() + defer n.streamCtxLock.RUnlock() + return n.streamCtx +} + +// SetHeartbeatHandler is used to set up a heartbeat handler +// as a fast-pass. This is to avoid head-of-line blocking from +// disk IO. +func (n *NetworkTransport) SetHeartbeatHandler(cb func(rpc RPC)) { + n.heartbeatFnLock.Lock() + defer n.heartbeatFnLock.Unlock() + n.heartbeatFn = cb +} + +// CloseStreams closes the current streams. +func (n *NetworkTransport) CloseStreams() { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + // Close all the connections in the connection pool and then remove their + // entry. + for k, e := range n.connPool { + for _, conn := range e { + conn.Release() + } + + delete(n.connPool, k) + } + + // Cancel the existing connections and create a new context. Both these + // operations must always be done with the lock held otherwise we can create + // connection handlers that are holding a context that will never be + // cancelable. + n.streamCtxLock.Lock() + n.streamCancel() + n.setupStreamContext() + n.streamCtxLock.Unlock() +} + +// Close is used to stop the network transport. +func (n *NetworkTransport) Close() error { + n.shutdownLock.Lock() + defer n.shutdownLock.Unlock() + + if !n.shutdown { + close(n.shutdownCh) + n.stream.Close() + n.shutdown = true + } + return nil +} + +// Consumer implements the Transport interface. +func (n *NetworkTransport) Consumer() <-chan RPC { + return n.consumeCh +} + +// LocalAddr implements the Transport interface. +func (n *NetworkTransport) LocalAddr() ServerAddress { + return ServerAddress(n.stream.Addr().String()) +} + +// IsShutdown is used to check if the transport is shutdown. +func (n *NetworkTransport) IsShutdown() bool { + select { + case <-n.shutdownCh: + return true + default: + return false + } +} + +// getExistingConn is used to grab a pooled connection. +func (n *NetworkTransport) getPooledConn(target ServerAddress) *netConn { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + conns, ok := n.connPool[target] + if !ok || len(conns) == 0 { + return nil + } + + var conn *netConn + num := len(conns) + conn, conns[num-1] = conns[num-1], nil + n.connPool[target] = conns[:num-1] + return conn +} + +// getConnFromAddressProvider returns a connection from the server address provider if available, or defaults to a connection using the target server address +func (n *NetworkTransport) getConnFromAddressProvider(id ServerID, target ServerAddress) (*netConn, error) { + address := n.getProviderAddressOrFallback(id, target) + return n.getConn(address) +} + +func (n *NetworkTransport) getProviderAddressOrFallback(id ServerID, target ServerAddress) ServerAddress { + n.serverAddressLock.RLock() + defer n.serverAddressLock.RUnlock() + if n.serverAddressProvider != nil { + serverAddressOverride, err := n.serverAddressProvider.ServerAddr(id) + if err != nil { + n.logger.Warn("unable to get address for server, using fallback address", "id", id, "fallback", target, "error", err) + } else { + return serverAddressOverride + } + } + return target +} + +// getConn is used to get a connection from the pool. +func (n *NetworkTransport) getConn(target ServerAddress) (*netConn, error) { + // Check for a pooled conn + if conn := n.getPooledConn(target); conn != nil { + return conn, nil + } + + // Dial a new connection + conn, err := n.stream.Dial(target, n.timeout) + if err != nil { + return nil, err + } + + // Wrap the conn + netConn := &netConn{ + target: target, + conn: conn, + dec: codec.NewDecoder(bufio.NewReader(conn), &codec.MsgpackHandle{}), + w: bufio.NewWriterSize(conn, connSendBufferSize), + } + + netConn.enc = codec.NewEncoder(netConn.w, &codec.MsgpackHandle{ + BasicHandle: codec.BasicHandle{ + TimeNotBuiltin: !n.msgpackUseNewTimeFormat, + }, + }) + + // Done + return netConn, nil +} + +// returnConn returns a connection back to the pool. +func (n *NetworkTransport) returnConn(conn *netConn) { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + key := conn.target + conns := n.connPool[key] + + if !n.IsShutdown() && len(conns) < n.maxPool { + n.connPool[key] = append(conns, conn) + } else { + conn.Release() + } +} + +// AppendEntriesPipeline returns an interface that can be used to pipeline +// AppendEntries requests. +func (n *NetworkTransport) AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) { + if n.maxInFlight < minInFlightForPipelining { + // Pipelining is disabled since no more than one request can be outstanding + // at once. Skip the whole code path and use synchronous requests. + return nil, ErrPipelineReplicationNotSupported + } + + // Get a connection + conn, err := n.getConnFromAddressProvider(id, target) + if err != nil { + return nil, err + } + + // Create the pipeline + return newNetPipeline(n, conn, n.maxInFlight), nil +} + +// AppendEntries implements the Transport interface. +func (n *NetworkTransport) AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { + return n.genericRPC(id, target, rpcAppendEntries, args, resp) +} + +// RequestVote implements the Transport interface. +func (n *NetworkTransport) RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error { + return n.genericRPC(id, target, rpcRequestVote, args, resp) +} + +// RequestPreVote implements the Transport interface. +func (n *NetworkTransport) RequestPreVote(id ServerID, target ServerAddress, args *RequestPreVoteRequest, resp *RequestPreVoteResponse) error { + return n.genericRPC(id, target, rpcRequestPreVote, args, resp) +} + +// genericRPC handles a simple request/response RPC. +func (n *NetworkTransport) genericRPC(id ServerID, target ServerAddress, rpcType uint8, args interface{}, resp interface{}) error { + // Get a conn + conn, err := n.getConnFromAddressProvider(id, target) + if err != nil { + return err + } + + // Set a deadline + if n.timeout > 0 { + conn.conn.SetDeadline(time.Now().Add(n.timeout)) + } + + // Send the RPC + if err = sendRPC(conn, rpcType, args); err != nil { + return err + } + + // Decode the response + canReturn, err := decodeResponse(conn, resp) + if canReturn { + n.returnConn(conn) + } + return err +} + +// InstallSnapshot implements the Transport interface. +func (n *NetworkTransport) InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { + // Get a conn, always close for InstallSnapshot + conn, err := n.getConnFromAddressProvider(id, target) + if err != nil { + return err + } + defer conn.Release() + + // Set a deadline, scaled by request size + if n.timeout > 0 { + timeout := n.timeout * time.Duration(args.Size/int64(n.TimeoutScale)) + if timeout < n.timeout { + timeout = n.timeout + } + conn.conn.SetDeadline(time.Now().Add(timeout)) + } + + // Send the RPC + if err = sendRPC(conn, rpcInstallSnapshot, args); err != nil { + return err + } + + // Stream the state + if _, err = io.Copy(conn.w, data); err != nil { + return err + } + + // Flush + if err = conn.w.Flush(); err != nil { + return err + } + + // Decode the response, do not return conn + _, err = decodeResponse(conn, resp) + return err +} + +// EncodePeer implements the Transport interface. +func (n *NetworkTransport) EncodePeer(id ServerID, p ServerAddress) []byte { + address := n.getProviderAddressOrFallback(id, p) + return []byte(address) +} + +// DecodePeer implements the Transport interface. +func (n *NetworkTransport) DecodePeer(buf []byte) ServerAddress { + return ServerAddress(buf) +} + +// TimeoutNow implements the Transport interface. +func (n *NetworkTransport) TimeoutNow(id ServerID, target ServerAddress, args *TimeoutNowRequest, resp *TimeoutNowResponse) error { + return n.genericRPC(id, target, rpcTimeoutNow, args, resp) +} + +// listen is used to handling incoming connections. +func (n *NetworkTransport) listen() { + const baseDelay = 5 * time.Millisecond + const maxDelay = 1 * time.Second + + var loopDelay time.Duration + for { + // Accept incoming connections + conn, err := n.stream.Accept() + if err != nil { + if loopDelay == 0 { + loopDelay = baseDelay + } else { + loopDelay *= 2 + } + + if loopDelay > maxDelay { + loopDelay = maxDelay + } + + if !n.IsShutdown() { + n.logger.Error("failed to accept connection", "error", err) + } + + select { + case <-n.shutdownCh: + return + case <-time.After(loopDelay): + continue + } + } + // No error, reset loop delay + loopDelay = 0 + + n.logger.Debug("accepted connection", "local-address", n.LocalAddr(), "remote-address", conn.RemoteAddr().String()) + + // Handle the connection in dedicated routine + go n.handleConn(n.getStreamContext(), conn) + } +} + +// handleConn is used to handle an inbound connection for its lifespan. The +// handler will exit when the passed context is cancelled or the connection is +// closed. +func (n *NetworkTransport) handleConn(connCtx context.Context, conn net.Conn) { + defer conn.Close() + r := bufio.NewReaderSize(conn, connReceiveBufferSize) + w := bufio.NewWriter(conn) + dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) + enc := codec.NewEncoder(w, &codec.MsgpackHandle{ + BasicHandle: codec.BasicHandle{ + TimeNotBuiltin: !n.msgpackUseNewTimeFormat, + }, + }) + + for { + select { + case <-connCtx.Done(): + n.logger.Debug("stream layer is closed") + return + default: + } + + if err := n.handleCommand(r, dec, enc); err != nil { + if err != io.EOF { + n.logger.Error("failed to decode incoming command", "error", err) + } + return + } + if err := w.Flush(); err != nil { + n.logger.Error("failed to flush response", "error", err) + return + } + } +} + +// handleCommand is used to decode and dispatch a single command. +func (n *NetworkTransport) handleCommand(r *bufio.Reader, dec *codec.Decoder, enc *codec.Encoder) error { + getTypeStart := time.Now() + + // Get the rpc type + rpcType, err := r.ReadByte() + if err != nil { + return err + } + + // measuring the time to get the first byte separately because the heartbeat conn will hang out here + // for a good while waiting for a heartbeat whereas the append entries/rpc conn should not. + metrics.MeasureSince([]string{"raft", "net", "getRPCType"}, getTypeStart) + decodeStart := time.Now() + + // Create the RPC object + respCh := make(chan RPCResponse, 1) + rpc := RPC{ + RespChan: respCh, + } + + // Decode the command + isHeartbeat := false + var labels []metrics.Label + switch rpcType { + case rpcAppendEntries: + var req AppendEntriesRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + + leaderAddr := req.RPCHeader.Addr + if len(leaderAddr) == 0 { + leaderAddr = req.Leader + } + + // Check if this is a heartbeat + if req.Term != 0 && leaderAddr != nil && + req.PrevLogEntry == 0 && req.PrevLogTerm == 0 && + len(req.Entries) == 0 && req.LeaderCommitIndex == 0 { + isHeartbeat = true + } + + if isHeartbeat { + labels = []metrics.Label{{Name: "rpcType", Value: "Heartbeat"}} + } else { + labels = []metrics.Label{{Name: "rpcType", Value: "AppendEntries"}} + } + case rpcRequestVote: + var req RequestVoteRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + labels = []metrics.Label{{Name: "rpcType", Value: "RequestVote"}} + case rpcRequestPreVote: + var req RequestPreVoteRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + labels = []metrics.Label{{Name: "rpcType", Value: "RequestPreVote"}} + case rpcInstallSnapshot: + var req InstallSnapshotRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + rpc.Reader = io.LimitReader(r, req.Size) + labels = []metrics.Label{{Name: "rpcType", Value: "InstallSnapshot"}} + case rpcTimeoutNow: + var req TimeoutNowRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + labels = []metrics.Label{{Name: "rpcType", Value: "TimeoutNow"}} + default: + return fmt.Errorf("unknown rpc type %d", rpcType) + } + + metrics.MeasureSinceWithLabels([]string{"raft", "net", "rpcDecode"}, decodeStart, labels) + + processStart := time.Now() + + // Check for heartbeat fast-path + if isHeartbeat { + n.heartbeatFnLock.Lock() + fn := n.heartbeatFn + n.heartbeatFnLock.Unlock() + if fn != nil { + fn(rpc) + goto RESP + } + } + + // Dispatch the RPC + select { + case n.consumeCh <- rpc: + case <-n.shutdownCh: + return ErrTransportShutdown + } + + // Wait for response +RESP: + // we will differentiate the heartbeat fast path from normal RPCs with labels + metrics.MeasureSinceWithLabels([]string{"raft", "net", "rpcEnqueue"}, processStart, labels) + respWaitStart := time.Now() + select { + case resp := <-respCh: + defer metrics.MeasureSinceWithLabels([]string{"raft", "net", "rpcRespond"}, respWaitStart, labels) + // Send the error first + respErr := "" + if resp.Error != nil { + respErr = resp.Error.Error() + } + if err := enc.Encode(respErr); err != nil { + return err + } + + // Send the response + if err := enc.Encode(resp.Response); err != nil { + return err + } + case <-n.shutdownCh: + return ErrTransportShutdown + } + return nil +} + +// decodeResponse is used to decode an RPC response and reports whether +// the connection can be reused. +func decodeResponse(conn *netConn, resp interface{}) (bool, error) { + // Decode the error if any + var rpcError string + if err := conn.dec.Decode(&rpcError); err != nil { + conn.Release() + return false, err + } + + // Decode the response + if err := conn.dec.Decode(resp); err != nil { + conn.Release() + return false, err + } + + // Format an error if any + if rpcError != "" { + return true, fmt.Errorf(rpcError) + } + return true, nil +} + +// sendRPC is used to encode and send the RPC. +func sendRPC(conn *netConn, rpcType uint8, args interface{}) error { + // Write the request type + if err := conn.w.WriteByte(rpcType); err != nil { + conn.Release() + return err + } + + // Send the request + if err := conn.enc.Encode(args); err != nil { + conn.Release() + return err + } + + // Flush + if err := conn.w.Flush(); err != nil { + conn.Release() + return err + } + return nil +} + +// newNetPipeline is used to construct a netPipeline from a given transport and +// connection. It is a bug to ever call this with maxInFlight less than 2 +// (minInFlightForPipelining) and will cause a panic. +func newNetPipeline(trans *NetworkTransport, conn *netConn, maxInFlight int) *netPipeline { + if maxInFlight < minInFlightForPipelining { + // Shouldn't happen (tm) since we validate this in the one call site and + // skip pipelining if it's lower. + panic("pipelining makes no sense if maxInFlight < 2") + } + n := &netPipeline{ + conn: conn, + trans: trans, + // The buffer size is 2 less than the configured max because we send before + // waiting on the channel and the decode routine unblocks the channel as + // soon as it's waiting on the first request. So a zero-buffered channel + // still allows 1 request to be sent even while decode is still waiting for + // a response from the previous one. i.e. two are inflight at the same time. + inprogressCh: make(chan *appendFuture, maxInFlight-2), + doneCh: make(chan AppendFuture, maxInFlight-2), + shutdownCh: make(chan struct{}), + } + go n.decodeResponses() + return n +} + +// decodeResponses is a long running routine that decodes the responses +// sent on the connection. +func (n *netPipeline) decodeResponses() { + timeout := n.trans.timeout + for { + select { + case future := <-n.inprogressCh: + if timeout > 0 { + n.conn.conn.SetReadDeadline(time.Now().Add(timeout)) + } + + _, err := decodeResponse(n.conn, future.resp) + future.respond(err) + select { + case n.doneCh <- future: + case <-n.shutdownCh: + return + } + case <-n.shutdownCh: + return + } + } +} + +// AppendEntries is used to pipeline a new append entries request. +func (n *netPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { + // Create a new future + future := &appendFuture{ + start: time.Now(), + args: args, + resp: resp, + } + future.init() + + // Add a send timeout + if timeout := n.trans.timeout; timeout > 0 { + n.conn.conn.SetWriteDeadline(time.Now().Add(timeout)) + } + + // Send the RPC + if err := sendRPC(n.conn, rpcAppendEntries, future.args); err != nil { + return nil, err + } + + // Hand-off for decoding, this can also cause back-pressure + // to prevent too many inflight requests + select { + case n.inprogressCh <- future: + return future, nil + case <-n.shutdownCh: + return nil, ErrPipelineShutdown + } +} + +// Consumer returns a channel that can be used to consume complete futures. +func (n *netPipeline) Consumer() <-chan AppendFuture { + return n.doneCh +} + +// Close is used to shut down the pipeline connection. +func (n *netPipeline) Close() error { + n.shutdownLock.Lock() + defer n.shutdownLock.Unlock() + if n.shutdown { + return nil + } + + // Release the connection + n.conn.Release() + + n.shutdown = true + close(n.shutdownCh) + return nil +} diff --git a/vendor/github.com/hashicorp/raft/observer.go b/vendor/github.com/hashicorp/raft/observer.go new file mode 100644 index 0000000000000..400a381ede4c2 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/observer.go @@ -0,0 +1,149 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "sync/atomic" + "time" +) + +// Observation is sent along the given channel to observers when an event occurs. +type Observation struct { + // Raft holds the Raft instance generating the observation. + Raft *Raft + // Data holds observation-specific data. Possible types are + // RequestVoteRequest + // RaftState + // PeerObservation + // LeaderObservation + Data interface{} +} + +// LeaderObservation is used for the data when leadership changes. +type LeaderObservation struct { + // DEPRECATED The LeaderAddr field should now be used + Leader ServerAddress + LeaderAddr ServerAddress + LeaderID ServerID +} + +// PeerObservation is sent to observers when peers change. +type PeerObservation struct { + Removed bool + Peer Server +} + +// FailedHeartbeatObservation is sent when a node fails to heartbeat with the leader +type FailedHeartbeatObservation struct { + PeerID ServerID + LastContact time.Time +} + +// ResumedHeartbeatObservation is sent when a node resumes to heartbeat with the leader following failures +type ResumedHeartbeatObservation struct { + PeerID ServerID +} + +// nextObserverId is used to provide a unique ID for each observer to aid in +// deregistration. +var nextObserverID uint64 + +// FilterFn is a function that can be registered in order to filter observations. +// The function reports whether the observation should be included - if +// it returns false, the observation will be filtered out. +type FilterFn func(o *Observation) bool + +// Observer describes what to do with a given observation. +type Observer struct { + // numObserved and numDropped are performance counters for this observer. + // 64 bit types must be 64 bit aligned to use with atomic operations on + // 32 bit platforms, so keep them at the top of the struct. + numObserved uint64 + numDropped uint64 + + // channel receives observations. + channel chan Observation + + // blocking, if true, will cause Raft to block when sending an observation + // to this observer. This should generally be set to false. + blocking bool + + // filter will be called to determine if an observation should be sent to + // the channel. + filter FilterFn + + // id is the ID of this observer in the Raft map. + id uint64 +} + +// NewObserver creates a new observer that can be registered +// to make observations on a Raft instance. Observations +// will be sent on the given channel if they satisfy the +// given filter. +// +// If blocking is true, the observer will block when it can't +// send on the channel, otherwise it may discard events. +func NewObserver(channel chan Observation, blocking bool, filter FilterFn) *Observer { + return &Observer{ + channel: channel, + blocking: blocking, + filter: filter, + id: atomic.AddUint64(&nextObserverID, 1), + } +} + +// GetNumObserved returns the number of observations. +func (or *Observer) GetNumObserved() uint64 { + return atomic.LoadUint64(&or.numObserved) +} + +// GetNumDropped returns the number of dropped observations due to blocking. +func (or *Observer) GetNumDropped() uint64 { + return atomic.LoadUint64(&or.numDropped) +} + +// RegisterObserver registers a new observer. +func (r *Raft) RegisterObserver(or *Observer) { + r.observersLock.Lock() + defer r.observersLock.Unlock() + r.observers[or.id] = or +} + +// DeregisterObserver deregisters an observer. +func (r *Raft) DeregisterObserver(or *Observer) { + r.observersLock.Lock() + defer r.observersLock.Unlock() + delete(r.observers, or.id) +} + +// observe sends an observation to every observer. +func (r *Raft) observe(o interface{}) { + // In general observers should not block. But in any case this isn't + // disastrous as we only hold a read lock, which merely prevents + // registration / deregistration of observers. + r.observersLock.RLock() + defer r.observersLock.RUnlock() + for _, or := range r.observers { + // It's wasteful to do this in the loop, but for the common case + // where there are no observers we won't create any objects. + ob := Observation{Raft: r, Data: o} + if or.filter != nil && !or.filter(&ob) { + continue + } + if or.channel == nil { + continue + } + if or.blocking { + or.channel <- ob + atomic.AddUint64(&or.numObserved, 1) + } else { + select { + case or.channel <- ob: + atomic.AddUint64(&or.numObserved, 1) + default: + atomic.AddUint64(&or.numDropped, 1) + } + } + } +} diff --git a/vendor/github.com/hashicorp/raft/peersjson.go b/vendor/github.com/hashicorp/raft/peersjson.go new file mode 100644 index 0000000000000..d81d5ec4c59a2 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/peersjson.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "bytes" + "encoding/json" + "os" +) + +// ReadPeersJSON consumes a legacy peers.json file in the format of the old JSON +// peer store and creates a new-style configuration structure. This can be used +// to migrate this data or perform manual recovery when running protocol versions +// that can interoperate with older, unversioned Raft servers. This should not be +// used once server IDs are in use, because the old peers.json file didn't have +// support for these, nor non-voter suffrage types. +func ReadPeersJSON(path string) (Configuration, error) { + // Read in the file. + buf, err := os.ReadFile(path) + if err != nil { + return Configuration{}, err + } + + // Parse it as JSON. + var peers []string + dec := json.NewDecoder(bytes.NewReader(buf)) + if err := dec.Decode(&peers); err != nil { + return Configuration{}, err + } + + // Map it into the new-style configuration structure. We can only specify + // voter roles here, and the ID has to be the same as the address. + var configuration Configuration + for _, peer := range peers { + server := Server{ + Suffrage: Voter, + ID: ServerID(peer), + Address: ServerAddress(peer), + } + configuration.Servers = append(configuration.Servers, server) + } + + // We should only ingest valid configurations. + if err := checkConfiguration(configuration); err != nil { + return Configuration{}, err + } + return configuration, nil +} + +// configEntry is used when decoding a new-style peers.json. +type configEntry struct { + // ID is the ID of the server (a UUID, usually). + ID ServerID `json:"id"` + + // Address is the host:port of the server. + Address ServerAddress `json:"address"` + + // NonVoter controls the suffrage. We choose this sense so people + // can leave this out and get a Voter by default. + NonVoter bool `json:"non_voter"` +} + +// ReadConfigJSON reads a new-style peers.json and returns a configuration +// structure. This can be used to perform manual recovery when running protocol +// versions that use server IDs. +func ReadConfigJSON(path string) (Configuration, error) { + // Read in the file. + buf, err := os.ReadFile(path) + if err != nil { + return Configuration{}, err + } + + // Parse it as JSON. + var peers []configEntry + dec := json.NewDecoder(bytes.NewReader(buf)) + if err := dec.Decode(&peers); err != nil { + return Configuration{}, err + } + + // Map it into the new-style configuration structure. + var configuration Configuration + for _, peer := range peers { + suffrage := Voter + if peer.NonVoter { + suffrage = Nonvoter + } + server := Server{ + Suffrage: suffrage, + ID: peer.ID, + Address: peer.Address, + } + configuration.Servers = append(configuration.Servers, server) + } + + // We should only ingest valid configurations. + if err := checkConfiguration(configuration); err != nil { + return Configuration{}, err + } + return configuration, nil +} diff --git a/vendor/github.com/hashicorp/raft/progress.go b/vendor/github.com/hashicorp/raft/progress.go new file mode 100644 index 0000000000000..6b4df53f508d3 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/progress.go @@ -0,0 +1,149 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "context" + "io" + "sync" + "time" + + hclog "github.com/hashicorp/go-hclog" +) + +const ( + snapshotRestoreMonitorInterval = 10 * time.Second +) + +type snapshotRestoreMonitor struct { + logger hclog.Logger + cr CountingReader + size int64 + networkTransfer bool + + once sync.Once + cancel func() + doneCh chan struct{} +} + +func startSnapshotRestoreMonitor( + logger hclog.Logger, + cr CountingReader, + size int64, + networkTransfer bool, +) *snapshotRestoreMonitor { + ctx, cancel := context.WithCancel(context.Background()) + + m := &snapshotRestoreMonitor{ + logger: logger, + cr: cr, + size: size, + networkTransfer: networkTransfer, + cancel: cancel, + doneCh: make(chan struct{}), + } + go m.run(ctx) + return m +} + +func (m *snapshotRestoreMonitor) run(ctx context.Context) { + defer close(m.doneCh) + + ticker := time.NewTicker(snapshotRestoreMonitorInterval) + defer ticker.Stop() + + ranOnce := false + for { + select { + case <-ctx.Done(): + if !ranOnce { + m.runOnce() + } + return + case <-ticker.C: + m.runOnce() + ranOnce = true + } + } +} + +func (m *snapshotRestoreMonitor) runOnce() { + readBytes := m.cr.Count() + pct := float64(100*readBytes) / float64(m.size) + + message := "snapshot restore progress" + if m.networkTransfer { + message = "snapshot network transfer progress" + } + + m.logger.Info(message, + "read-bytes", readBytes, + "percent-complete", hclog.Fmt("%0.2f%%", pct), + ) +} + +func (m *snapshotRestoreMonitor) StopAndWait() { + m.once.Do(func() { + m.cancel() + <-m.doneCh + }) +} + +type CountingReader interface { + io.Reader + Count() int64 +} + +type countingReader struct { + reader io.Reader + + mu sync.Mutex + bytes int64 +} + +func (r *countingReader) Read(p []byte) (n int, err error) { + n, err = r.reader.Read(p) + r.mu.Lock() + r.bytes += int64(n) + r.mu.Unlock() + return n, err +} + +func (r *countingReader) Count() int64 { + r.mu.Lock() + defer r.mu.Unlock() + return r.bytes +} + +func newCountingReader(r io.Reader) *countingReader { + return &countingReader{reader: r} +} + +type countingReadCloser struct { + *countingReader + readCloser io.ReadCloser +} + +func newCountingReadCloser(rc io.ReadCloser) *countingReadCloser { + return &countingReadCloser{ + countingReader: newCountingReader(rc), + readCloser: rc, + } +} + +func (c countingReadCloser) Close() error { + return c.readCloser.Close() +} + +func (c countingReadCloser) WrappedReadCloser() io.ReadCloser { + return c.readCloser +} + +// ReadCloserWrapper allows access to an underlying ReadCloser from a wrapper. +type ReadCloserWrapper interface { + io.ReadCloser + WrappedReadCloser() io.ReadCloser +} + +var _ ReadCloserWrapper = &countingReadCloser{} diff --git a/vendor/github.com/hashicorp/raft/raft.go b/vendor/github.com/hashicorp/raft/raft.go new file mode 100644 index 0000000000000..183f041a42254 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/raft.go @@ -0,0 +1,2242 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "bytes" + "container/list" + "fmt" + "io" + "strings" + "sync/atomic" + "time" + + "github.com/hashicorp/go-hclog" + + "github.com/armon/go-metrics" +) + +const ( + minCheckInterval = 10 * time.Millisecond + oldestLogGaugeInterval = 10 * time.Second + rpcUnexpectedCommandError = "unexpected command" +) + +var ( + keyCurrentTerm = []byte("CurrentTerm") + keyLastVoteTerm = []byte("LastVoteTerm") + keyLastVoteCand = []byte("LastVoteCand") +) + +// getRPCHeader returns an initialized RPCHeader struct for the given +// Raft instance. This structure is sent along with RPC requests and +// responses. +func (r *Raft) getRPCHeader() RPCHeader { + return RPCHeader{ + ProtocolVersion: r.config().ProtocolVersion, + ID: []byte(r.config().LocalID), + Addr: r.trans.EncodePeer(r.config().LocalID, r.localAddr), + } +} + +// checkRPCHeader houses logic about whether this instance of Raft can process +// the given RPC message. +func (r *Raft) checkRPCHeader(rpc RPC) error { + // Get the header off the RPC message. + wh, ok := rpc.Command.(WithRPCHeader) + if !ok { + return fmt.Errorf("RPC does not have a header") + } + header := wh.GetRPCHeader() + + // First check is to just make sure the code can understand the + // protocol at all. + if header.ProtocolVersion < ProtocolVersionMin || + header.ProtocolVersion > ProtocolVersionMax { + return ErrUnsupportedProtocol + } + + // Second check is whether we should support this message, given the + // current protocol we are configured to run. This will drop support + // for protocol version 0 starting at protocol version 2, which is + // currently what we want, and in general support one version back. We + // may need to revisit this policy depending on how future protocol + // changes evolve. + if header.ProtocolVersion < r.config().ProtocolVersion-1 { + return ErrUnsupportedProtocol + } + + return nil +} + +// getSnapshotVersion returns the snapshot version that should be used when +// creating snapshots, given the protocol version in use. +func getSnapshotVersion(protocolVersion ProtocolVersion) SnapshotVersion { + // Right now we only have two versions and they are backwards compatible + // so we don't need to look at the protocol version. + return 1 +} + +// commitTuple is used to send an index that was committed, +// with an optional associated future that should be invoked. +type commitTuple struct { + log *Log + future *logFuture +} + +// leaderState is state that is used while we are a leader. +type leaderState struct { + leadershipTransferInProgress int32 // indicates that a leadership transfer is in progress. + commitCh chan struct{} + commitment *commitment + inflight *list.List // list of logFuture in log index order + replState map[ServerID]*followerReplication + notify map[*verifyFuture]struct{} + stepDown chan struct{} +} + +// setLeader is used to modify the current leader Address and ID of the cluster +func (r *Raft) setLeader(leaderAddr ServerAddress, leaderID ServerID) { + r.leaderLock.Lock() + oldLeaderAddr := r.leaderAddr + r.leaderAddr = leaderAddr + oldLeaderID := r.leaderID + r.leaderID = leaderID + r.leaderLock.Unlock() + if oldLeaderAddr != leaderAddr || oldLeaderID != leaderID { + r.observe(LeaderObservation{Leader: leaderAddr, LeaderAddr: leaderAddr, LeaderID: leaderID}) + } +} + +// requestConfigChange is a helper for the above functions that make +// configuration change requests. 'req' describes the change. For timeout, +// see AddVoter. +func (r *Raft) requestConfigChange(req configurationChangeRequest, timeout time.Duration) IndexFuture { + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + future := &configurationChangeFuture{ + req: req, + } + future.init() + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case r.configurationChangeCh <- future: + return future + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } +} + +// run the main thread that handles leadership and RPC requests. +func (r *Raft) run() { + for { + // Check if we are doing a shutdown + select { + case <-r.shutdownCh: + // Clear the leader to prevent forwarding + r.setLeader("", "") + return + default: + } + + switch r.getState() { + case Follower: + r.runFollower() + case Candidate: + r.runCandidate() + case Leader: + r.runLeader() + } + } +} + +// runFollower runs the main loop while in the follower state. +func (r *Raft) runFollower() { + didWarn := false + leaderAddr, leaderID := r.LeaderWithID() + r.logger.Info("entering follower state", "follower", r, "leader-address", leaderAddr, "leader-id", leaderID) + metrics.IncrCounter([]string{"raft", "state", "follower"}, 1) + heartbeatTimer := randomTimeout(r.config().HeartbeatTimeout) + + for r.getState() == Follower { + r.mainThreadSaturation.sleeping() + + select { + case rpc := <-r.rpcCh: + r.mainThreadSaturation.working() + r.processRPC(rpc) + + case c := <-r.configurationChangeCh: + r.mainThreadSaturation.working() + // Reject any operations since we are not the leader + c.respond(ErrNotLeader) + + case a := <-r.applyCh: + r.mainThreadSaturation.working() + // Reject any operations since we are not the leader + a.respond(ErrNotLeader) + + case v := <-r.verifyCh: + r.mainThreadSaturation.working() + // Reject any operations since we are not the leader + v.respond(ErrNotLeader) + + case ur := <-r.userRestoreCh: + r.mainThreadSaturation.working() + // Reject any restores since we are not the leader + ur.respond(ErrNotLeader) + + case l := <-r.leadershipTransferCh: + r.mainThreadSaturation.working() + // Reject any operations since we are not the leader + l.respond(ErrNotLeader) + + case c := <-r.configurationsCh: + r.mainThreadSaturation.working() + c.configurations = r.configurations.Clone() + c.respond(nil) + + case b := <-r.bootstrapCh: + r.mainThreadSaturation.working() + b.respond(r.liveBootstrap(b.configuration)) + + case <-r.leaderNotifyCh: + // Ignore since we are not the leader + + case <-r.followerNotifyCh: + heartbeatTimer = time.After(0) + + case <-heartbeatTimer: + r.mainThreadSaturation.working() + // Restart the heartbeat timer + hbTimeout := r.config().HeartbeatTimeout + heartbeatTimer = randomTimeout(hbTimeout) + + // Check if we have had a successful contact + lastContact := r.LastContact() + if time.Since(lastContact) < hbTimeout { + continue + } + + // Heartbeat failed! Transition to the candidate state + lastLeaderAddr, lastLeaderID := r.LeaderWithID() + r.setLeader("", "") + + if r.configurations.latestIndex == 0 { + if !didWarn { + r.logger.Warn("no known peers, aborting election") + didWarn = true + } + } else if r.configurations.latestIndex == r.configurations.committedIndex && + !hasVote(r.configurations.latest, r.localID) { + if !didWarn { + r.logger.Warn("not part of stable configuration, aborting election") + didWarn = true + } + } else { + metrics.IncrCounter([]string{"raft", "transition", "heartbeat_timeout"}, 1) + if hasVote(r.configurations.latest, r.localID) { + r.logger.Warn("heartbeat timeout reached, starting election", "last-leader-addr", lastLeaderAddr, "last-leader-id", lastLeaderID) + r.setState(Candidate) + return + } else if !didWarn { + r.logger.Warn("heartbeat timeout reached, not part of a stable configuration or a non-voter, not triggering a leader election") + didWarn = true + } + } + + case <-r.shutdownCh: + return + } + } +} + +// liveBootstrap attempts to seed an initial configuration for the cluster. See +// the Raft object's member BootstrapCluster for more details. This must only be +// called on the main thread, and only makes sense in the follower state. +func (r *Raft) liveBootstrap(configuration Configuration) error { + if !hasVote(configuration, r.localID) { + // Reject this operation since we are not a voter + return ErrNotVoter + } + + // Use the pre-init API to make the static updates. + cfg := r.config() + err := BootstrapCluster(&cfg, r.logs, r.stable, r.snapshots, r.trans, configuration) + if err != nil { + return err + } + + // Make the configuration live. + var entry Log + if err := r.logs.GetLog(1, &entry); err != nil { + panic(err) + } + r.setCurrentTerm(1) + r.setLastLog(entry.Index, entry.Term) + return r.processConfigurationLogEntry(&entry) +} + +// runCandidate runs the main loop while in the candidate state. +func (r *Raft) runCandidate() { + term := r.getCurrentTerm() + 1 + r.logger.Info("entering candidate state", "node", r, "term", term) + metrics.IncrCounter([]string{"raft", "state", "candidate"}, 1) + + // Start vote for us, and set a timeout + var voteCh <-chan *voteResult + var prevoteCh <-chan *preVoteResult + + // check if pre-vote is active and that this is not a leader transfer. + // Leader transfer do not perform prevote by design + if !r.preVoteDisabled && !r.candidateFromLeadershipTransfer.Load() { + prevoteCh = r.preElectSelf() + } else { + voteCh = r.electSelf() + } + + // Make sure the leadership transfer flag is reset after each run. Having this + // flag will set the field LeadershipTransfer in a RequestVoteRequst to true, + // which will make other servers vote even though they have a leader already. + // It is important to reset that flag, because this priviledge could be abused + // otherwise. + defer func() { r.candidateFromLeadershipTransfer.Store(false) }() + + electionTimeout := r.config().ElectionTimeout + electionTimer := randomTimeout(electionTimeout) + + // Tally the votes, need a simple majority + preVoteGrantedVotes := 0 + preVoteRefusedVotes := 0 + grantedVotes := 0 + votesNeeded := r.quorumSize() + r.logger.Debug("calculated votes needed", "needed", votesNeeded, "term", term) + + for r.getState() == Candidate { + r.mainThreadSaturation.sleeping() + + select { + case rpc := <-r.rpcCh: + r.mainThreadSaturation.working() + r.processRPC(rpc) + case preVote := <-prevoteCh: + // This a pre-vote case it should trigger a "real" election if the pre-vote is won. + r.mainThreadSaturation.working() + r.logger.Debug("pre-vote received", "from", preVote.voterID, "term", preVote.Term, "tally", preVoteGrantedVotes) + // Check if the term is greater than ours, bail + if preVote.Term > term { + r.logger.Debug("pre-vote denied: found newer term, falling back to follower", "term", preVote.Term) + r.setState(Follower) + r.setCurrentTerm(preVote.Term) + return + } + + // Check if the preVote is granted + if preVote.Granted { + preVoteGrantedVotes++ + r.logger.Debug("pre-vote granted", "from", preVote.voterID, "term", preVote.Term, "tally", preVoteGrantedVotes) + } else { + preVoteRefusedVotes++ + r.logger.Debug("pre-vote denied", "from", preVote.voterID, "term", preVote.Term, "tally", preVoteGrantedVotes) + } + + // Check if we've won the pre-vote and proceed to election if so + if preVoteGrantedVotes >= votesNeeded { + r.logger.Info("pre-vote successful, starting election", "term", preVote.Term, + "tally", preVoteGrantedVotes, "refused", preVoteRefusedVotes, "votesNeeded", votesNeeded) + preVoteGrantedVotes = 0 + preVoteRefusedVotes = 0 + electionTimer = randomTimeout(electionTimeout) + prevoteCh = nil + voteCh = r.electSelf() + } + // Check if we've lost the pre-vote and wait for the election to timeout so we can do another time of + // prevote. + if preVoteRefusedVotes >= votesNeeded { + r.logger.Info("pre-vote campaign failed, waiting for election timeout", "term", preVote.Term, + "tally", preVoteGrantedVotes, "refused", preVoteRefusedVotes, "votesNeeded", votesNeeded) + } + case vote := <-voteCh: + r.mainThreadSaturation.working() + // Check if the term is greater than ours, bail + if vote.Term > r.getCurrentTerm() { + r.logger.Debug("newer term discovered, fallback to follower", "term", vote.Term) + r.setState(Follower) + r.setCurrentTerm(vote.Term) + return + } + + // Check if the vote is granted + if vote.Granted { + grantedVotes++ + r.logger.Debug("vote granted", "from", vote.voterID, "term", vote.Term, "tally", grantedVotes) + } + + // Check if we've become the leader + if grantedVotes >= votesNeeded { + r.logger.Info("election won", "term", vote.Term, "tally", grantedVotes) + r.setState(Leader) + r.setLeader(r.localAddr, r.localID) + return + } + case c := <-r.configurationChangeCh: + r.mainThreadSaturation.working() + // Reject any operations since we are not the leader + c.respond(ErrNotLeader) + + case a := <-r.applyCh: + r.mainThreadSaturation.working() + // Reject any operations since we are not the leader + a.respond(ErrNotLeader) + + case v := <-r.verifyCh: + r.mainThreadSaturation.working() + // Reject any operations since we are not the leader + v.respond(ErrNotLeader) + + case ur := <-r.userRestoreCh: + r.mainThreadSaturation.working() + // Reject any restores since we are not the leader + ur.respond(ErrNotLeader) + + case l := <-r.leadershipTransferCh: + r.mainThreadSaturation.working() + // Reject any operations since we are not the leader + l.respond(ErrNotLeader) + + case c := <-r.configurationsCh: + r.mainThreadSaturation.working() + c.configurations = r.configurations.Clone() + c.respond(nil) + + case b := <-r.bootstrapCh: + r.mainThreadSaturation.working() + b.respond(ErrCantBootstrap) + + case <-r.leaderNotifyCh: + // Ignore since we are not the leader + + case <-r.followerNotifyCh: + if electionTimeout != r.config().ElectionTimeout { + electionTimeout = r.config().ElectionTimeout + electionTimer = randomTimeout(electionTimeout) + } + + case <-electionTimer: + r.mainThreadSaturation.working() + // Election failed! Restart the election. We simply return, + // which will kick us back into runCandidate + r.logger.Warn("Election timeout reached, restarting election") + return + + case <-r.shutdownCh: + return + } + } +} + +func (r *Raft) setLeadershipTransferInProgress(v bool) { + if v { + atomic.StoreInt32(&r.leaderState.leadershipTransferInProgress, 1) + } else { + atomic.StoreInt32(&r.leaderState.leadershipTransferInProgress, 0) + } +} + +func (r *Raft) getLeadershipTransferInProgress() bool { + v := atomic.LoadInt32(&r.leaderState.leadershipTransferInProgress) + return v == 1 +} + +func (r *Raft) setupLeaderState() { + r.leaderState.commitCh = make(chan struct{}, 1) + r.leaderState.commitment = newCommitment(r.leaderState.commitCh, + r.configurations.latest, + r.getLastIndex()+1 /* first index that may be committed in this term */) + r.leaderState.inflight = list.New() + r.leaderState.replState = make(map[ServerID]*followerReplication) + r.leaderState.notify = make(map[*verifyFuture]struct{}) + r.leaderState.stepDown = make(chan struct{}, 1) +} + +// runLeader runs the main loop while in leader state. Do the setup here and drop into +// the leaderLoop for the hot loop. +func (r *Raft) runLeader() { + r.logger.Info("entering leader state", "leader", r) + metrics.IncrCounter([]string{"raft", "state", "leader"}, 1) + + // Notify that we are the leader + overrideNotifyBool(r.leaderCh, true) + + // Store the notify chan. It's not reloadable so shouldn't change before the + // defer below runs, but this makes sure we always notify the same chan if + // ever for both gaining and loosing leadership. + notify := r.config().NotifyCh + + // Push to the notify channel if given + if notify != nil { + select { + case notify <- true: + case <-r.shutdownCh: + // make sure push to the notify channel ( if given ) + select { + case notify <- true: + default: + } + } + } + + // setup leader state. This is only supposed to be accessed within the + // leaderloop. + r.setupLeaderState() + + // Run a background go-routine to emit metrics on log age + stopCh := make(chan struct{}) + go emitLogStoreMetrics(r.logs, []string{"raft", "leader"}, oldestLogGaugeInterval, stopCh) + + // Cleanup state on step down + defer func() { + close(stopCh) + + // Since we were the leader previously, we update our + // last contact time when we step down, so that we are not + // reporting a last contact time from before we were the + // leader. Otherwise, to a client it would seem our data + // is extremely stale. + r.setLastContact() + + // Stop replication + for _, p := range r.leaderState.replState { + close(p.stopCh) + } + + // Respond to all inflight operations + for e := r.leaderState.inflight.Front(); e != nil; e = e.Next() { + e.Value.(*logFuture).respond(ErrLeadershipLost) + } + + // Respond to any pending verify requests + for future := range r.leaderState.notify { + future.respond(ErrLeadershipLost) + } + + // Clear all the state + r.leaderState.commitCh = nil + r.leaderState.commitment = nil + r.leaderState.inflight = nil + r.leaderState.replState = nil + r.leaderState.notify = nil + r.leaderState.stepDown = nil + + // If we are stepping down for some reason, no known leader. + // We may have stepped down due to an RPC call, which would + // provide the leader, so we cannot always blank this out. + r.leaderLock.Lock() + if r.leaderAddr == r.localAddr && r.leaderID == r.localID { + r.leaderAddr = "" + r.leaderID = "" + } + r.leaderLock.Unlock() + + // Notify that we are not the leader + overrideNotifyBool(r.leaderCh, false) + + // Push to the notify channel if given + if notify != nil { + select { + case notify <- false: + case <-r.shutdownCh: + // On shutdown, make a best effort but do not block + select { + case notify <- false: + default: + } + } + } + }() + + // Start a replication routine for each peer + r.startStopReplication() + + // Dispatch a no-op log entry first. This gets this leader up to the latest + // possible commit index, even in the absence of client commands. This used + // to append a configuration entry instead of a noop. However, that permits + // an unbounded number of uncommitted configurations in the log. We now + // maintain that there exists at most one uncommitted configuration entry in + // any log, so we have to do proper no-ops here. + noop := &logFuture{log: Log{Type: LogNoop}} + r.dispatchLogs([]*logFuture{noop}) + + // Sit in the leader loop until we step down + r.leaderLoop() +} + +// startStopReplication will set up state and start asynchronous replication to +// new peers, and stop replication to removed peers. Before removing a peer, +// it'll instruct the replication routines to try to replicate to the current +// index. This must only be called from the main thread. +func (r *Raft) startStopReplication() { + inConfig := make(map[ServerID]bool, len(r.configurations.latest.Servers)) + lastIdx := r.getLastIndex() + + // Start replication goroutines that need starting + for _, server := range r.configurations.latest.Servers { + if server.ID == r.localID { + continue + } + + inConfig[server.ID] = true + + s, ok := r.leaderState.replState[server.ID] + if !ok { + r.logger.Info("added peer, starting replication", "peer", server.ID) + s = &followerReplication{ + peer: server, + commitment: r.leaderState.commitment, + stopCh: make(chan uint64, 1), + triggerCh: make(chan struct{}, 1), + triggerDeferErrorCh: make(chan *deferError, 1), + currentTerm: r.getCurrentTerm(), + nextIndex: lastIdx + 1, + lastContact: time.Now(), + notify: make(map[*verifyFuture]struct{}), + notifyCh: make(chan struct{}, 1), + stepDown: r.leaderState.stepDown, + } + + r.leaderState.replState[server.ID] = s + r.goFunc(func() { r.replicate(s) }) + asyncNotifyCh(s.triggerCh) + r.observe(PeerObservation{Peer: server, Removed: false}) + } else if ok { + + s.peerLock.RLock() + peer := s.peer + s.peerLock.RUnlock() + + if peer.Address != server.Address { + r.logger.Info("updating peer", "peer", server.ID) + s.peerLock.Lock() + s.peer = server + s.peerLock.Unlock() + } + } + } + + // Stop replication goroutines that need stopping + for serverID, repl := range r.leaderState.replState { + if inConfig[serverID] { + continue + } + // Replicate up to lastIdx and stop + r.logger.Info("removed peer, stopping replication", "peer", serverID, "last-index", lastIdx) + repl.stopCh <- lastIdx + close(repl.stopCh) + delete(r.leaderState.replState, serverID) + r.observe(PeerObservation{Peer: repl.peer, Removed: true}) + } + + // Update peers metric + metrics.SetGauge([]string{"raft", "peers"}, float32(len(r.configurations.latest.Servers))) +} + +// configurationChangeChIfStable returns r.configurationChangeCh if it's safe +// to process requests from it, or nil otherwise. This must only be called +// from the main thread. +// +// Note that if the conditions here were to change outside of leaderLoop to take +// this from nil to non-nil, we would need leaderLoop to be kicked. +func (r *Raft) configurationChangeChIfStable() chan *configurationChangeFuture { + // Have to wait until: + // 1. The latest configuration is committed, and + // 2. This leader has committed some entry (the noop) in this term + // https://groups.google.com/forum/#!msg/raft-dev/t4xj6dJTP6E/d2D9LrWRza8J + if r.configurations.latestIndex == r.configurations.committedIndex && + r.getCommitIndex() >= r.leaderState.commitment.startIndex { + return r.configurationChangeCh + } + return nil +} + +// leaderLoop is the hot loop for a leader. It is invoked +// after all the various leader setup is done. +func (r *Raft) leaderLoop() { + // stepDown is used to track if there is an inflight log that + // would cause us to lose leadership (specifically a RemovePeer of + // ourselves). If this is the case, we must not allow any logs to + // be processed in parallel, otherwise we are basing commit on + // only a single peer (ourself) and replicating to an undefined set + // of peers. + stepDown := false + // This is only used for the first lease check, we reload lease below + // based on the current config value. + lease := time.After(r.config().LeaderLeaseTimeout) + + for r.getState() == Leader { + r.mainThreadSaturation.sleeping() + + select { + case rpc := <-r.rpcCh: + r.mainThreadSaturation.working() + r.processRPC(rpc) + + case <-r.leaderState.stepDown: + r.mainThreadSaturation.working() + r.setState(Follower) + + case future := <-r.leadershipTransferCh: + r.mainThreadSaturation.working() + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + future.respond(ErrLeadershipTransferInProgress) + continue + } + + r.logger.Debug("starting leadership transfer", "id", future.ID, "address", future.Address) + + // When we are leaving leaderLoop, we are no longer + // leader, so we should stop transferring. + leftLeaderLoop := make(chan struct{}) + defer func() { close(leftLeaderLoop) }() + + stopCh := make(chan struct{}) + doneCh := make(chan error, 1) + + // This is intentionally being setup outside of the + // leadershipTransfer function. Because the TimeoutNow + // call is blocking and there is no way to abort that + // in case eg the timer expires. + // The leadershipTransfer function is controlled with + // the stopCh and doneCh. + // No matter how this exits, have this function set + // leadership transfer to false before we return + // + // Note that this leaves a window where callers of + // LeadershipTransfer() and LeadershipTransferToServer() + // may start executing after they get their future but before + // this routine has set leadershipTransferInProgress back to false. + // It may be safe to modify things such that setLeadershipTransferInProgress + // is set to false before calling future.Respond, but that still needs + // to be tested and this situation mirrors what callers already had to deal with. + go func() { + defer r.setLeadershipTransferInProgress(false) + select { + case <-time.After(r.config().ElectionTimeout): + close(stopCh) + err := fmt.Errorf("leadership transfer timeout") + r.logger.Debug(err.Error()) + future.respond(err) + <-doneCh + case <-leftLeaderLoop: + close(stopCh) + err := fmt.Errorf("lost leadership during transfer (expected)") + r.logger.Debug(err.Error()) + future.respond(nil) + <-doneCh + case err := <-doneCh: + if err != nil { + r.logger.Debug(err.Error()) + future.respond(err) + } else { + // Wait for up to ElectionTimeout before flagging the + // leadership transfer as done and unblocking applies in + // the leaderLoop. + select { + case <-time.After(r.config().ElectionTimeout): + err := fmt.Errorf("leadership transfer timeout") + r.logger.Debug(err.Error()) + future.respond(err) + case <-leftLeaderLoop: + r.logger.Debug("lost leadership during transfer (expected)") + future.respond(nil) + } + } + } + }() + + // leaderState.replState is accessed here before + // starting leadership transfer asynchronously because + // leaderState is only supposed to be accessed in the + // leaderloop. + id := future.ID + address := future.Address + if id == nil { + s := r.pickServer() + if s != nil { + id = &s.ID + address = &s.Address + } else { + doneCh <- fmt.Errorf("cannot find peer") + continue + } + } + state, ok := r.leaderState.replState[*id] + if !ok { + doneCh <- fmt.Errorf("cannot find replication state for %v", id) + continue + } + r.setLeadershipTransferInProgress(true) + go r.leadershipTransfer(*id, *address, state, stopCh, doneCh) + + case <-r.leaderState.commitCh: + r.mainThreadSaturation.working() + // Process the newly committed entries + oldCommitIndex := r.getCommitIndex() + commitIndex := r.leaderState.commitment.getCommitIndex() + r.setCommitIndex(commitIndex) + + // New configuration has been committed, set it as the committed + // value. + if r.configurations.latestIndex > oldCommitIndex && + r.configurations.latestIndex <= commitIndex { + r.setCommittedConfiguration(r.configurations.latest, r.configurations.latestIndex) + if !hasVote(r.configurations.committed, r.localID) { + stepDown = true + } + } + + start := time.Now() + var groupReady []*list.Element + groupFutures := make(map[uint64]*logFuture) + var lastIdxInGroup uint64 + + // Pull all inflight logs that are committed off the queue. + for e := r.leaderState.inflight.Front(); e != nil; e = e.Next() { + commitLog := e.Value.(*logFuture) + idx := commitLog.log.Index + if idx > commitIndex { + // Don't go past the committed index + break + } + + // Measure the commit time + metrics.MeasureSince([]string{"raft", "commitTime"}, commitLog.dispatch) + groupReady = append(groupReady, e) + groupFutures[idx] = commitLog + lastIdxInGroup = idx + } + + // Process the group + if len(groupReady) != 0 { + r.processLogs(lastIdxInGroup, groupFutures) + + for _, e := range groupReady { + r.leaderState.inflight.Remove(e) + } + } + + // Measure the time to enqueue batch of logs for FSM to apply + metrics.MeasureSince([]string{"raft", "fsm", "enqueue"}, start) + + // Count the number of logs enqueued + metrics.SetGauge([]string{"raft", "commitNumLogs"}, float32(len(groupReady))) + + if stepDown { + if r.config().ShutdownOnRemove { + r.logger.Info("removed ourself, shutting down") + r.Shutdown() + } else { + r.logger.Info("removed ourself, transitioning to follower") + r.setState(Follower) + } + } + + case v := <-r.verifyCh: + r.mainThreadSaturation.working() + if v.quorumSize == 0 { + // Just dispatched, start the verification + r.verifyLeader(v) + } else if v.votes < v.quorumSize { + // Early return, means there must be a new leader + r.logger.Warn("new leader elected, stepping down") + r.setState(Follower) + delete(r.leaderState.notify, v) + for _, repl := range r.leaderState.replState { + repl.cleanNotify(v) + } + v.respond(ErrNotLeader) + + } else { + // Quorum of members agree, we are still leader + delete(r.leaderState.notify, v) + for _, repl := range r.leaderState.replState { + repl.cleanNotify(v) + } + v.respond(nil) + } + + case future := <-r.userRestoreCh: + r.mainThreadSaturation.working() + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + future.respond(ErrLeadershipTransferInProgress) + continue + } + err := r.restoreUserSnapshot(future.meta, future.reader) + future.respond(err) + + case future := <-r.configurationsCh: + r.mainThreadSaturation.working() + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + future.respond(ErrLeadershipTransferInProgress) + continue + } + future.configurations = r.configurations.Clone() + future.respond(nil) + + case future := <-r.configurationChangeChIfStable(): + r.mainThreadSaturation.working() + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + future.respond(ErrLeadershipTransferInProgress) + continue + } + r.appendConfigurationEntry(future) + + case b := <-r.bootstrapCh: + r.mainThreadSaturation.working() + b.respond(ErrCantBootstrap) + + case newLog := <-r.applyCh: + r.mainThreadSaturation.working() + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + newLog.respond(ErrLeadershipTransferInProgress) + continue + } + // Group commit, gather all the ready commits + ready := []*logFuture{newLog} + GROUP_COMMIT_LOOP: + for i := 0; i < r.config().MaxAppendEntries; i++ { + select { + case newLog := <-r.applyCh: + ready = append(ready, newLog) + default: + break GROUP_COMMIT_LOOP + } + } + + // Dispatch the logs + if stepDown { + // we're in the process of stepping down as leader, don't process anything new + for i := range ready { + ready[i].respond(ErrNotLeader) + } + } else { + r.dispatchLogs(ready) + } + + case <-lease: + r.mainThreadSaturation.working() + // Check if we've exceeded the lease, potentially stepping down + maxDiff := r.checkLeaderLease() + + // Next check interval should adjust for the last node we've + // contacted, without going negative + checkInterval := r.config().LeaderLeaseTimeout - maxDiff + if checkInterval < minCheckInterval { + checkInterval = minCheckInterval + } + + // Renew the lease timer + lease = time.After(checkInterval) + + case <-r.leaderNotifyCh: + for _, repl := range r.leaderState.replState { + asyncNotifyCh(repl.notifyCh) + } + + case <-r.followerNotifyCh: + // Ignore since we are not a follower + + case <-r.shutdownCh: + return + } + } +} + +// verifyLeader must be called from the main thread for safety. +// Causes the followers to attempt an immediate heartbeat. +func (r *Raft) verifyLeader(v *verifyFuture) { + // Current leader always votes for self + v.votes = 1 + + // Set the quorum size, hot-path for single node + v.quorumSize = r.quorumSize() + if v.quorumSize == 1 { + v.respond(nil) + return + } + + // Track this request + v.notifyCh = r.verifyCh + r.leaderState.notify[v] = struct{}{} + + // Trigger immediate heartbeats + for _, repl := range r.leaderState.replState { + repl.notifyLock.Lock() + repl.notify[v] = struct{}{} + repl.notifyLock.Unlock() + asyncNotifyCh(repl.notifyCh) + } +} + +// leadershipTransfer is doing the heavy lifting for the leadership transfer. +func (r *Raft) leadershipTransfer(id ServerID, address ServerAddress, repl *followerReplication, stopCh chan struct{}, doneCh chan error) { + // make sure we are not already stopped + select { + case <-stopCh: + doneCh <- nil + return + default: + } + + for atomic.LoadUint64(&repl.nextIndex) <= r.getLastIndex() { + err := &deferError{} + err.init() + repl.triggerDeferErrorCh <- err + select { + case err := <-err.errCh: + if err != nil { + doneCh <- err + return + } + case <-stopCh: + doneCh <- nil + return + } + } + + // Step ?: the thesis describes in chap 6.4.1: Using clocks to reduce + // messaging for read-only queries. If this is implemented, the lease + // has to be reset as well, in case leadership is transferred. This + // implementation also has a lease, but it serves another purpose and + // doesn't need to be reset. The lease mechanism in our raft lib, is + // setup in a similar way to the one in the thesis, but in practice + // it's a timer that just tells the leader how often to check + // heartbeats are still coming in. + + // Step 3: send TimeoutNow message to target server. + err := r.trans.TimeoutNow(id, address, &TimeoutNowRequest{RPCHeader: r.getRPCHeader()}, &TimeoutNowResponse{}) + if err != nil { + err = fmt.Errorf("failed to make TimeoutNow RPC to %v: %v", id, err) + } + doneCh <- err +} + +// checkLeaderLease is used to check if we can contact a quorum of nodes +// within the last leader lease interval. If not, we need to step down, +// as we may have lost connectivity. Returns the maximum duration without +// contact. This must only be called from the main thread. +func (r *Raft) checkLeaderLease() time.Duration { + // Track contacted nodes, we can always contact ourself + contacted := 0 + + // Store lease timeout for this one check invocation as we need to refer to it + // in the loop and would be confusing if it ever becomes reloadable and + // changes between iterations below. + leaseTimeout := r.config().LeaderLeaseTimeout + + // Check each follower + var maxDiff time.Duration + now := time.Now() + for _, server := range r.configurations.latest.Servers { + if server.Suffrage == Voter { + if server.ID == r.localID { + contacted++ + continue + } + f := r.leaderState.replState[server.ID] + diff := now.Sub(f.LastContact()) + if diff <= leaseTimeout { + contacted++ + if diff > maxDiff { + maxDiff = diff + } + } else { + // Log at least once at high value, then debug. Otherwise it gets very verbose. + if diff <= 3*leaseTimeout { + r.logger.Warn("failed to contact", "server-id", server.ID, "time", diff) + } else { + r.logger.Debug("failed to contact", "server-id", server.ID, "time", diff) + } + } + metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond)) + } + } + + // Verify we can contact a quorum + quorum := r.quorumSize() + if contacted < quorum { + r.logger.Warn("failed to contact quorum of nodes, stepping down") + r.setState(Follower) + metrics.IncrCounter([]string{"raft", "transition", "leader_lease_timeout"}, 1) + } + return maxDiff +} + +// quorumSize is used to return the quorum size. This must only be called on +// the main thread. +// TODO: revisit usage +func (r *Raft) quorumSize() int { + voters := 0 + for _, server := range r.configurations.latest.Servers { + if server.Suffrage == Voter { + voters++ + } + } + return voters/2 + 1 +} + +// restoreUserSnapshot is used to manually consume an external snapshot, such +// as if restoring from a backup. We will use the current Raft configuration, +// not the one from the snapshot, so that we can restore into a new cluster. We +// will also use the higher of the index of the snapshot, or the current index, +// and then add 1 to that, so we force a new state with a hole in the Raft log, +// so that the snapshot will be sent to followers and used for any new joiners. +// This can only be run on the leader, and returns a future that can be used to +// block until complete. +func (r *Raft) restoreUserSnapshot(meta *SnapshotMeta, reader io.Reader) error { + defer metrics.MeasureSince([]string{"raft", "restoreUserSnapshot"}, time.Now()) + + // Sanity check the version. + version := meta.Version + if version < SnapshotVersionMin || version > SnapshotVersionMax { + return fmt.Errorf("unsupported snapshot version %d", version) + } + + // We don't support snapshots while there's a config change + // outstanding since the snapshot doesn't have a means to + // represent this state. + committedIndex := r.configurations.committedIndex + latestIndex := r.configurations.latestIndex + if committedIndex != latestIndex { + return fmt.Errorf("cannot restore snapshot now, wait until the configuration entry at %v has been applied (have applied %v)", + latestIndex, committedIndex) + } + + // Cancel any inflight requests. + for { + e := r.leaderState.inflight.Front() + if e == nil { + break + } + e.Value.(*logFuture).respond(ErrAbortedByRestore) + r.leaderState.inflight.Remove(e) + } + + // We will overwrite the snapshot metadata with the current term, + // an index that's greater than the current index, or the last + // index in the snapshot. It's important that we leave a hole in + // the index so we know there's nothing in the Raft log there and + // replication will fault and send the snapshot. + term := r.getCurrentTerm() + lastIndex := r.getLastIndex() + if meta.Index > lastIndex { + lastIndex = meta.Index + } + lastIndex++ + + // Dump the snapshot. Note that we use the latest configuration, + // not the one that came with the snapshot. + sink, err := r.snapshots.Create(version, lastIndex, term, + r.configurations.latest, r.configurations.latestIndex, r.trans) + if err != nil { + return fmt.Errorf("failed to create snapshot: %v", err) + } + n, err := io.Copy(sink, reader) + if err != nil { + sink.Cancel() + return fmt.Errorf("failed to write snapshot: %v", err) + } + if n != meta.Size { + sink.Cancel() + return fmt.Errorf("failed to write snapshot, size didn't match (%d != %d)", n, meta.Size) + } + if err := sink.Close(); err != nil { + return fmt.Errorf("failed to close snapshot: %v", err) + } + r.logger.Info("copied to local snapshot", "bytes", n) + + // Restore the snapshot into the FSM. If this fails we are in a + // bad state so we panic to take ourselves out. + fsm := &restoreFuture{ID: sink.ID()} + fsm.ShutdownCh = r.shutdownCh + fsm.init() + select { + case r.fsmMutateCh <- fsm: + case <-r.shutdownCh: + return ErrRaftShutdown + } + if err := fsm.Error(); err != nil { + panic(fmt.Errorf("failed to restore snapshot: %v", err)) + } + + // We set the last log so it looks like we've stored the empty + // index we burned. The last applied is set because we made the + // FSM take the snapshot state, and we store the last snapshot + // in the stable store since we created a snapshot as part of + // this process. + r.setLastLog(lastIndex, term) + r.setLastApplied(lastIndex) + r.setLastSnapshot(lastIndex, term) + + // Remove old logs if r.logs is a MonotonicLogStore. Log any errors and continue. + if logs, ok := r.logs.(MonotonicLogStore); ok && logs.IsMonotonic() { + if err := r.removeOldLogs(); err != nil { + r.logger.Error("failed to remove old logs", "error", err) + } + } + + r.logger.Info("restored user snapshot", "index", lastIndex) + return nil +} + +// appendConfigurationEntry changes the configuration and adds a new +// configuration entry to the log. This must only be called from the +// main thread. +func (r *Raft) appendConfigurationEntry(future *configurationChangeFuture) { + configuration, err := nextConfiguration(r.configurations.latest, r.configurations.latestIndex, future.req) + if err != nil { + future.respond(err) + return + } + + r.logger.Info("updating configuration", + "command", future.req.command, + "server-id", future.req.serverID, + "server-addr", future.req.serverAddress, + "servers", hclog.Fmt("%+v", configuration.Servers)) + + // In pre-ID compatibility mode we translate all configuration changes + // in to an old remove peer message, which can handle all supported + // cases for peer changes in the pre-ID world (adding and removing + // voters). Both add peer and remove peer log entries are handled + // similarly on old Raft servers, but remove peer does extra checks to + // see if a leader needs to step down. Since they both assert the full + // configuration, then we can safely call remove peer for everything. + if r.protocolVersion < 2 { + future.log = Log{ + Type: LogRemovePeerDeprecated, + Data: encodePeers(configuration, r.trans), + } + } else { + future.log = Log{ + Type: LogConfiguration, + Data: EncodeConfiguration(configuration), + } + } + + r.dispatchLogs([]*logFuture{&future.logFuture}) + index := future.Index() + r.setLatestConfiguration(configuration, index) + r.leaderState.commitment.setConfiguration(configuration) + r.startStopReplication() +} + +// dispatchLog is called on the leader to push a log to disk, mark it +// as inflight and begin replication of it. +func (r *Raft) dispatchLogs(applyLogs []*logFuture) { + now := time.Now() + defer metrics.MeasureSince([]string{"raft", "leader", "dispatchLog"}, now) + + term := r.getCurrentTerm() + lastIndex := r.getLastIndex() + + n := len(applyLogs) + logs := make([]*Log, n) + metrics.SetGauge([]string{"raft", "leader", "dispatchNumLogs"}, float32(n)) + + for idx, applyLog := range applyLogs { + applyLog.dispatch = now + lastIndex++ + applyLog.log.Index = lastIndex + applyLog.log.Term = term + applyLog.log.AppendedAt = now + logs[idx] = &applyLog.log + r.leaderState.inflight.PushBack(applyLog) + } + + // Write the log entry locally + if err := r.logs.StoreLogs(logs); err != nil { + r.logger.Error("failed to commit logs", "error", err) + for _, applyLog := range applyLogs { + applyLog.respond(err) + } + r.setState(Follower) + return + } + r.leaderState.commitment.match(r.localID, lastIndex) + + // Update the last log since it's on disk now + r.setLastLog(lastIndex, term) + + // Notify the replicators of the new log + for _, f := range r.leaderState.replState { + asyncNotifyCh(f.triggerCh) + } +} + +// processLogs is used to apply all the committed entries that haven't been +// applied up to the given index limit. +// This can be called from both leaders and followers. +// Followers call this from AppendEntries, for n entries at a time, and always +// pass futures=nil. +// Leaders call this when entries are committed. They pass the futures from any +// inflight logs. +func (r *Raft) processLogs(index uint64, futures map[uint64]*logFuture) { + // Reject logs we've applied already + lastApplied := r.getLastApplied() + if index <= lastApplied { + r.logger.Warn("skipping application of old log", "index", index) + return + } + + applyBatch := func(batch []*commitTuple) { + select { + case r.fsmMutateCh <- batch: + case <-r.shutdownCh: + for _, cl := range batch { + if cl.future != nil { + cl.future.respond(ErrRaftShutdown) + } + } + } + } + + // Store maxAppendEntries for this call in case it ever becomes reloadable. We + // need to use the same value for all lines here to get the expected result. + maxAppendEntries := r.config().MaxAppendEntries + + batch := make([]*commitTuple, 0, maxAppendEntries) + + // Apply all the preceding logs + for idx := lastApplied + 1; idx <= index; idx++ { + var preparedLog *commitTuple + // Get the log, either from the future or from our log store + future, futureOk := futures[idx] + if futureOk { + preparedLog = r.prepareLog(&future.log, future) + } else { + l := new(Log) + if err := r.logs.GetLog(idx, l); err != nil { + r.logger.Error("failed to get log", "index", idx, "error", err) + panic(err) + } + preparedLog = r.prepareLog(l, nil) + } + + switch { + case preparedLog != nil: + // If we have a log ready to send to the FSM add it to the batch. + // The FSM thread will respond to the future. + batch = append(batch, preparedLog) + + // If we have filled up a batch, send it to the FSM + if len(batch) >= maxAppendEntries { + applyBatch(batch) + batch = make([]*commitTuple, 0, maxAppendEntries) + } + + case futureOk: + // Invoke the future if given. + future.respond(nil) + } + } + + // If there are any remaining logs in the batch apply them + if len(batch) != 0 { + applyBatch(batch) + } + + // Update the lastApplied index and term + r.setLastApplied(index) +} + +// processLog is invoked to process the application of a single committed log entry. +func (r *Raft) prepareLog(l *Log, future *logFuture) *commitTuple { + switch l.Type { + case LogBarrier: + // Barrier is handled by the FSM + fallthrough + + case LogCommand: + return &commitTuple{l, future} + + case LogConfiguration: + // Only support this with the v2 configuration format + if r.protocolVersion > 2 { + return &commitTuple{l, future} + } + case LogAddPeerDeprecated: + case LogRemovePeerDeprecated: + case LogNoop: + // Ignore the no-op + + default: + panic(fmt.Errorf("unrecognized log type: %#v", l)) + } + + return nil +} + +// processRPC is called to handle an incoming RPC request. This must only be +// called from the main thread. +func (r *Raft) processRPC(rpc RPC) { + if err := r.checkRPCHeader(rpc); err != nil { + rpc.Respond(nil, err) + return + } + + switch cmd := rpc.Command.(type) { + case *AppendEntriesRequest: + r.appendEntries(rpc, cmd) + case *RequestVoteRequest: + r.requestVote(rpc, cmd) + case *RequestPreVoteRequest: + r.requestPreVote(rpc, cmd) + case *InstallSnapshotRequest: + r.installSnapshot(rpc, cmd) + case *TimeoutNowRequest: + r.timeoutNow(rpc, cmd) + default: + r.logger.Error("got unexpected command", + "command", hclog.Fmt("%#v", rpc.Command)) + + rpc.Respond(nil, fmt.Errorf(rpcUnexpectedCommandError)) + } +} + +// processHeartbeat is a special handler used just for heartbeat requests +// so that they can be fast-pathed if a transport supports it. This must only +// be called from the main thread. +func (r *Raft) processHeartbeat(rpc RPC) { + defer metrics.MeasureSince([]string{"raft", "rpc", "processHeartbeat"}, time.Now()) + + // Check if we are shutdown, just ignore the RPC + select { + case <-r.shutdownCh: + return + default: + } + + // Ensure we are only handling a heartbeat + switch cmd := rpc.Command.(type) { + case *AppendEntriesRequest: + r.appendEntries(rpc, cmd) + default: + r.logger.Error("expected heartbeat, got", "command", hclog.Fmt("%#v", rpc.Command)) + rpc.Respond(nil, fmt.Errorf("unexpected command")) + } +} + +// appendEntries is invoked when we get an append entries RPC call. This must +// only be called from the main thread. +func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "appendEntries"}, time.Now()) + // Setup a response + resp := &AppendEntriesResponse{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + LastLog: r.getLastIndex(), + Success: false, + NoRetryBackoff: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Ignore an older term + if a.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one, also transition to follower + // if we ever get an appendEntries call + if a.Term > r.getCurrentTerm() || (r.getState() != Follower && !r.candidateFromLeadershipTransfer.Load()) { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(a.Term) + resp.Term = a.Term + } + + // Save the current leader + if len(a.Addr) > 0 { + r.setLeader(r.trans.DecodePeer(a.Addr), ServerID(a.ID)) + } else { + r.setLeader(r.trans.DecodePeer(a.Leader), ServerID(a.ID)) + } + // Verify the last log entry + if a.PrevLogEntry > 0 { + lastIdx, lastTerm := r.getLastEntry() + + var prevLogTerm uint64 + if a.PrevLogEntry == lastIdx { + prevLogTerm = lastTerm + } else { + var prevLog Log + if err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil { + r.logger.Warn("failed to get previous log", + "previous-index", a.PrevLogEntry, + "last-index", lastIdx, + "error", err) + resp.NoRetryBackoff = true + return + } + prevLogTerm = prevLog.Term + } + + if a.PrevLogTerm != prevLogTerm { + r.logger.Warn("previous log term mis-match", + "ours", prevLogTerm, + "remote", a.PrevLogTerm) + resp.NoRetryBackoff = true + return + } + } + + // Process any new entries + if len(a.Entries) > 0 { + start := time.Now() + + // Delete any conflicting entries, skip any duplicates + lastLogIdx, _ := r.getLastLog() + var newEntries []*Log + for i, entry := range a.Entries { + if entry.Index > lastLogIdx { + newEntries = a.Entries[i:] + break + } + var storeEntry Log + if err := r.logs.GetLog(entry.Index, &storeEntry); err != nil { + r.logger.Warn("failed to get log entry", + "index", entry.Index, + "error", err) + return + } + if entry.Term != storeEntry.Term { + r.logger.Warn("clearing log suffix", "from", entry.Index, "to", lastLogIdx) + if err := r.logs.DeleteRange(entry.Index, lastLogIdx); err != nil { + r.logger.Error("failed to clear log suffix", "error", err) + return + } + if entry.Index <= r.configurations.latestIndex { + r.setLatestConfiguration(r.configurations.committed, r.configurations.committedIndex) + } + newEntries = a.Entries[i:] + break + } + } + + if n := len(newEntries); n > 0 { + // Append the new entries + if err := r.logs.StoreLogs(newEntries); err != nil { + r.logger.Error("failed to append to logs", "error", err) + // TODO: leaving r.getLastLog() in the wrong + // state if there was a truncation above + return + } + + // Handle any new configuration changes + for _, newEntry := range newEntries { + if err := r.processConfigurationLogEntry(newEntry); err != nil { + r.logger.Warn("failed to append entry", + "index", newEntry.Index, + "error", err) + rpcErr = err + return + } + } + + // Update the lastLog + last := newEntries[n-1] + r.setLastLog(last.Index, last.Term) + } + + metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "storeLogs"}, start) + } + + // Update the commit index + if a.LeaderCommitIndex > 0 && a.LeaderCommitIndex > r.getCommitIndex() { + start := time.Now() + idx := min(a.LeaderCommitIndex, r.getLastIndex()) + r.setCommitIndex(idx) + if r.configurations.latestIndex <= idx { + r.setCommittedConfiguration(r.configurations.latest, r.configurations.latestIndex) + } + r.processLogs(idx, nil) + metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "processLogs"}, start) + } + + // Everything went well, set success + resp.Success = true + r.setLastContact() +} + +// processConfigurationLogEntry takes a log entry and updates the latest +// configuration if the entry results in a new configuration. This must only be +// called from the main thread, or from NewRaft() before any threads have begun. +func (r *Raft) processConfigurationLogEntry(entry *Log) error { + switch entry.Type { + case LogConfiguration: + r.setCommittedConfiguration(r.configurations.latest, r.configurations.latestIndex) + r.setLatestConfiguration(DecodeConfiguration(entry.Data), entry.Index) + + case LogAddPeerDeprecated, LogRemovePeerDeprecated: + r.setCommittedConfiguration(r.configurations.latest, r.configurations.latestIndex) + conf, err := decodePeers(entry.Data, r.trans) + if err != nil { + return err + } + r.setLatestConfiguration(conf, entry.Index) + } + return nil +} + +// requestVote is invoked when we get a request vote RPC call. +func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "requestVote"}, time.Now()) + r.observe(*req) + + // Setup a response + resp := &RequestVoteResponse{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + Granted: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Version 0 servers will panic unless the peers is present. It's only + // used on them to produce a warning message. + if r.protocolVersion < 2 { + resp.Peers = encodePeers(r.configurations.latest, r.trans) + } + + // Check if we have an existing leader [who's not the candidate] and also + // check the LeadershipTransfer flag is set. Usually votes are rejected if + // there is a known leader. But if the leader initiated a leadership transfer, + // vote! + var candidate ServerAddress + var candidateBytes []byte + if len(req.RPCHeader.Addr) > 0 { + candidate = r.trans.DecodePeer(req.RPCHeader.Addr) + candidateBytes = req.RPCHeader.Addr + } else { + candidate = r.trans.DecodePeer(req.Candidate) + candidateBytes = req.Candidate + } + + // For older raft version ID is not part of the packed message + // We assume that the peer is part of the configuration and skip this check + if len(req.ID) > 0 { + candidateID := ServerID(req.ID) + // if the Servers list is empty that mean the cluster is very likely trying to bootstrap, + // Grant the vote + if len(r.configurations.latest.Servers) > 0 && !inConfiguration(r.configurations.latest, candidateID) { + r.logger.Warn("rejecting vote request since node is not in configuration", + "from", candidate) + return + } + } + if leaderAddr, leaderID := r.LeaderWithID(); leaderAddr != "" && leaderAddr != candidate && !req.LeadershipTransfer { + r.logger.Warn("rejecting vote request since we have a leader", + "from", candidate, + "leader", leaderAddr, + "leader-id", string(leaderID)) + return + } + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one + if req.Term > r.getCurrentTerm() { + // Ensure transition to follower + r.logger.Debug("lost leadership because received a requestVote with a newer term") + r.setState(Follower) + r.setCurrentTerm(req.Term) + + resp.Term = req.Term + } + + // if we get a request for vote from a nonVoter and the request term is higher, + // step down and update term, but reject the vote request + // This could happen when a node, previously voter, is converted to non-voter + // The reason we need to step in is to permit to the cluster to make progress in such a scenario + // More details about that in https://github.com/hashicorp/raft/pull/526 + if len(req.ID) > 0 { + candidateID := ServerID(req.ID) + if len(r.configurations.latest.Servers) > 0 && !hasVote(r.configurations.latest, candidateID) { + r.logger.Warn("rejecting vote request since node is not a voter", "from", candidate) + return + } + } + // Check if we have voted yet + lastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm) + if err != nil && err.Error() != "not found" { + r.logger.Error("failed to get last vote term", "error", err) + return + } + lastVoteCandBytes, err := r.stable.Get(keyLastVoteCand) + if err != nil && err.Error() != "not found" { + r.logger.Error("failed to get last vote candidate", "error", err) + return + } + + // Check if we've voted in this election before + if lastVoteTerm == req.Term && lastVoteCandBytes != nil { + r.logger.Info("duplicate requestVote for same term", "term", req.Term) + if bytes.Equal(lastVoteCandBytes, candidateBytes) { + r.logger.Warn("duplicate requestVote from", "candidate", candidate) + resp.Granted = true + } + return + } + + // Reject if their term is older + lastIdx, lastTerm := r.getLastEntry() + if lastTerm > req.LastLogTerm { + r.logger.Warn("rejecting vote request since our last term is greater", + "candidate", candidate, + "last-term", lastTerm, + "last-candidate-term", req.LastLogTerm) + return + } + + if lastTerm == req.LastLogTerm && lastIdx > req.LastLogIndex { + r.logger.Warn("rejecting vote request since our last index is greater", + "candidate", candidate, + "last-index", lastIdx, + "last-candidate-index", req.LastLogIndex) + return + } + + // Persist a vote for safety + if err := r.persistVote(req.Term, candidateBytes); err != nil { + r.logger.Error("failed to persist vote", "error", err) + return + } + + resp.Granted = true + r.setLastContact() +} + +// requestPreVote is invoked when we get a request Pre-Vote RPC call. +func (r *Raft) requestPreVote(rpc RPC, req *RequestPreVoteRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "requestVote"}, time.Now()) + r.observe(*req) + + // Setup a response + resp := &RequestPreVoteResponse{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + Granted: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Check if we have an existing leader [who's not the candidate] and also + var candidate ServerAddress + candidateID := ServerID(req.ID) + + // if the Servers list is empty that mean the cluster is very likely trying to bootstrap, + // Grant the vote + if len(r.configurations.latest.Servers) > 0 && !inConfiguration(r.configurations.latest, candidateID) { + r.logger.Warn("rejecting pre-vote request since node is not in configuration", + "from", candidate) + return + } + + if leaderAddr, leaderID := r.LeaderWithID(); leaderAddr != "" && leaderAddr != candidate { + r.logger.Warn("rejecting pre-vote request since we have a leader", + "from", candidate, + "leader", leaderAddr, + "leader-id", string(leaderID)) + return + } + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + return + } + + if req.Term > r.getCurrentTerm() { + // continue processing here to possibly grant the pre-vote as in a "real" vote this will transition us to follower + r.logger.Debug("received a requestPreVote with a newer term, grant the pre-vote") + resp.Term = req.Term + } + + // if we get a request for a pre-vote from a nonVoter and the request term is higher, do not grant the Pre-Vote + // This could happen when a node, previously voter, is converted to non-voter + if len(r.configurations.latest.Servers) > 0 && !hasVote(r.configurations.latest, candidateID) { + r.logger.Warn("rejecting pre-vote request since node is not a voter", "from", candidate) + return + } + + // Reject if their term is older + lastIdx, lastTerm := r.getLastEntry() + if lastTerm > req.LastLogTerm { + r.logger.Warn("rejecting pre-vote request since our last term is greater", + "candidate", candidate, + "last-term", lastTerm, + "last-candidate-term", req.LastLogTerm) + return + } + + if lastTerm == req.LastLogTerm && lastIdx > req.LastLogIndex { + r.logger.Warn("rejecting pre-vote request since our last index is greater", + "candidate", candidate, + "last-index", lastIdx, + "last-candidate-index", req.LastLogIndex) + return + } + + resp.Granted = true + r.setLastContact() +} + +// installSnapshot is invoked when we get a InstallSnapshot RPC call. +// We must be in the follower state for this, since it means we are +// too far behind a leader for log replay. This must only be called +// from the main thread. +func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "installSnapshot"}, time.Now()) + // Setup a response + resp := &InstallSnapshotResponse{ + Term: r.getCurrentTerm(), + Success: false, + } + var rpcErr error + defer func() { + _, _ = io.Copy(io.Discard, rpc.Reader) // ensure we always consume all the snapshot data from the stream [see issue #212] + rpc.Respond(resp, rpcErr) + }() + + // Sanity check the version + if req.SnapshotVersion < SnapshotVersionMin || + req.SnapshotVersion > SnapshotVersionMax { + rpcErr = fmt.Errorf("unsupported snapshot version %d", req.SnapshotVersion) + return + } + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + r.logger.Info("ignoring installSnapshot request with older term than current term", + "request-term", req.Term, + "current-term", r.getCurrentTerm()) + return + } + + // Increase the term if we see a newer one + if req.Term > r.getCurrentTerm() { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(req.Term) + resp.Term = req.Term + } + + // Save the current leader + if len(req.ID) > 0 { + r.setLeader(r.trans.DecodePeer(req.RPCHeader.Addr), ServerID(req.ID)) + } else { + r.setLeader(r.trans.DecodePeer(req.Leader), ServerID(req.ID)) + } + + // Create a new snapshot + var reqConfiguration Configuration + var reqConfigurationIndex uint64 + if req.SnapshotVersion > 0 { + reqConfiguration = DecodeConfiguration(req.Configuration) + reqConfigurationIndex = req.ConfigurationIndex + } else { + reqConfiguration, rpcErr = decodePeers(req.Peers, r.trans) + if rpcErr != nil { + r.logger.Error("failed to install snapshot", "error", rpcErr) + return + } + reqConfigurationIndex = req.LastLogIndex + } + version := getSnapshotVersion(r.protocolVersion) + sink, err := r.snapshots.Create(version, req.LastLogIndex, req.LastLogTerm, + reqConfiguration, reqConfigurationIndex, r.trans) + if err != nil { + r.logger.Error("failed to create snapshot to install", "error", err) + rpcErr = fmt.Errorf("failed to create snapshot: %v", err) + return + } + + // Separately track the progress of streaming a snapshot over the network + // because this too can take a long time. + countingRPCReader := newCountingReader(rpc.Reader) + + // Spill the remote snapshot to disk + transferMonitor := startSnapshotRestoreMonitor(r.logger, countingRPCReader, req.Size, true) + n, err := io.Copy(sink, countingRPCReader) + transferMonitor.StopAndWait() + if err != nil { + sink.Cancel() + r.logger.Error("failed to copy snapshot", "error", err) + rpcErr = err + return + } + + // Check that we received it all + if n != req.Size { + sink.Cancel() + r.logger.Error("failed to receive whole snapshot", + "received", hclog.Fmt("%d / %d", n, req.Size)) + rpcErr = fmt.Errorf("short read") + return + } + + // Finalize the snapshot + if err := sink.Close(); err != nil { + r.logger.Error("failed to finalize snapshot", "error", err) + rpcErr = err + return + } + r.logger.Info("copied to local snapshot", "bytes", n) + + // Restore snapshot + future := &restoreFuture{ID: sink.ID()} + future.ShutdownCh = r.shutdownCh + future.init() + select { + case r.fsmMutateCh <- future: + case <-r.shutdownCh: + future.respond(ErrRaftShutdown) + return + } + + // Wait for the restore to happen + if err := future.Error(); err != nil { + r.logger.Error("failed to restore snapshot", "error", err) + rpcErr = err + return + } + + // Update the lastApplied so we don't replay old logs + r.setLastApplied(req.LastLogIndex) + + // Update the last stable snapshot info + r.setLastSnapshot(req.LastLogIndex, req.LastLogTerm) + + // Restore the peer set + r.setLatestConfiguration(reqConfiguration, reqConfigurationIndex) + r.setCommittedConfiguration(reqConfiguration, reqConfigurationIndex) + + // Clear old logs if r.logs is a MonotonicLogStore. Otherwise compact the + // logs. In both cases, log any errors and continue. + if mlogs, ok := r.logs.(MonotonicLogStore); ok && mlogs.IsMonotonic() { + if err := r.removeOldLogs(); err != nil { + r.logger.Error("failed to reset logs", "error", err) + } + } else if err := r.compactLogs(req.LastLogIndex); err != nil { + r.logger.Error("failed to compact logs", "error", err) + } + + r.logger.Info("Installed remote snapshot") + resp.Success = true + r.setLastContact() +} + +// setLastContact is used to set the last contact time to now +func (r *Raft) setLastContact() { + r.lastContactLock.Lock() + r.lastContact = time.Now() + r.lastContactLock.Unlock() +} + +type voteResult struct { + RequestVoteResponse + voterID ServerID +} + +type preVoteResult struct { + RequestPreVoteResponse + voterID ServerID +} + +// electSelf is used to send a RequestVote RPC to all peers, and vote for +// ourself. This has the side affecting of incrementing the current term. The +// response channel returned is used to wait for all the responses (including a +// vote for ourself). This must only be called from the main thread. +func (r *Raft) electSelf() <-chan *voteResult { + // Create a response channel + respCh := make(chan *voteResult, len(r.configurations.latest.Servers)) + + // Increment the term + newTerm := r.getCurrentTerm() + 1 + + r.setCurrentTerm(newTerm) + // Construct the request + lastIdx, lastTerm := r.getLastEntry() + req := &RequestVoteRequest{ + RPCHeader: r.getRPCHeader(), + Term: newTerm, + // this is needed for retro compatibility, before RPCHeader.Addr was added + Candidate: r.trans.EncodePeer(r.localID, r.localAddr), + LastLogIndex: lastIdx, + LastLogTerm: lastTerm, + LeadershipTransfer: r.candidateFromLeadershipTransfer.Load(), + } + + // Construct a function to ask for a vote + askPeer := func(peer Server) { + r.goFunc(func() { + defer metrics.MeasureSince([]string{"raft", "candidate", "electSelf"}, time.Now()) + resp := &voteResult{voterID: peer.ID} + err := r.trans.RequestVote(peer.ID, peer.Address, req, &resp.RequestVoteResponse) + if err != nil { + r.logger.Error("failed to make requestVote RPC", + "target", peer, + "error", err, + "term", req.Term) + resp.Term = req.Term + resp.Granted = false + } + respCh <- resp + }) + } + + // For each peer, request a vote + for _, server := range r.configurations.latest.Servers { + if server.Suffrage == Voter { + if server.ID == r.localID { + r.logger.Debug("voting for self", "term", req.Term, "id", r.localID) + + // Persist a vote for ourselves + if err := r.persistVote(req.Term, req.RPCHeader.Addr); err != nil { + r.logger.Error("failed to persist vote", "error", err) + return nil + + } + // Include our own vote + respCh <- &voteResult{ + RequestVoteResponse: RequestVoteResponse{ + RPCHeader: r.getRPCHeader(), + Term: req.Term, + Granted: true, + }, + voterID: r.localID, + } + } else { + r.logger.Debug("asking for vote", "term", req.Term, "from", server.ID, "address", server.Address) + askPeer(server) + } + } + } + + return respCh +} + +// preElectSelf is used to send a RequestPreVote RPC to all peers, and vote for +// ourself. This will not increment the current term. The +// response channel returned is used to wait for all the responses (including a +// vote for ourself). +// This must only be called from the main thread. +func (r *Raft) preElectSelf() <-chan *preVoteResult { + + // At this point transport should support pre-vote + // but check just in case + prevoteTrans, prevoteTransSupported := r.trans.(WithPreVote) + if !prevoteTransSupported { + panic("preElection is not possible if the transport don't support pre-vote") + } + + // Create a response channel + respCh := make(chan *preVoteResult, len(r.configurations.latest.Servers)) + + // Propose the next term without actually changing our state + newTerm := r.getCurrentTerm() + 1 + + // Construct the request + lastIdx, lastTerm := r.getLastEntry() + req := &RequestPreVoteRequest{ + RPCHeader: r.getRPCHeader(), + Term: newTerm, + LastLogIndex: lastIdx, + LastLogTerm: lastTerm, + } + + // Construct a function to ask for a vote + askPeer := func(peer Server) { + r.goFunc(func() { + defer metrics.MeasureSince([]string{"raft", "candidate", "preElectSelf"}, time.Now()) + resp := &preVoteResult{voterID: peer.ID} + + err := prevoteTrans.RequestPreVote(peer.ID, peer.Address, req, &resp.RequestPreVoteResponse) + + // If the target server do not support Pre-vote RPC we count this as a granted vote to allow + // the cluster to progress. + if err != nil && strings.Contains(err.Error(), rpcUnexpectedCommandError) { + r.logger.Error("target does not support pre-vote RPC, treating as granted", + "target", peer, + "error", err, + "term", req.Term) + resp.Term = req.Term + resp.Granted = true + } else if err != nil { + r.logger.Error("failed to make requestVote RPC", + "target", peer, + "error", err, + "term", req.Term) + resp.Term = req.Term + resp.Granted = false + } + respCh <- resp + + }) + } + + // For each peer, request a vote + for _, server := range r.configurations.latest.Servers { + if server.Suffrage == Voter { + if server.ID == r.localID { + r.logger.Debug("pre-voting for self", "term", req.Term, "id", r.localID) + + // cast a pre-vote for our self + respCh <- &preVoteResult{ + RequestPreVoteResponse: RequestPreVoteResponse{ + RPCHeader: r.getRPCHeader(), + Term: req.Term, + Granted: true, + }, + voterID: r.localID, + } + } else { + r.logger.Debug("asking for pre-vote", "term", req.Term, "from", server.ID, "address", server.Address) + askPeer(server) + } + } + } + + return respCh +} + +// persistVote is used to persist our vote for safety. +func (r *Raft) persistVote(term uint64, candidate []byte) error { + if err := r.stable.SetUint64(keyLastVoteTerm, term); err != nil { + return err + } + if err := r.stable.Set(keyLastVoteCand, candidate); err != nil { + return err + } + return nil +} + +// setCurrentTerm is used to set the current term in a durable manner. +func (r *Raft) setCurrentTerm(t uint64) { + // Persist to disk first + if err := r.stable.SetUint64(keyCurrentTerm, t); err != nil { + panic(fmt.Errorf("failed to save current term: %v", err)) + } + r.raftState.setCurrentTerm(t) +} + +// setState is used to update the current state. Any state +// transition causes the known leader to be cleared. This means +// that leader should be set only after updating the state. +func (r *Raft) setState(state RaftState) { + r.setLeader("", "") + oldState := r.raftState.getState() + r.raftState.setState(state) + if oldState != state { + r.observe(state) + } +} + +// pickServer returns the follower that is most up to date and participating in quorum. +// Because it accesses leaderstate, it should only be called from the leaderloop. +func (r *Raft) pickServer() *Server { + var pick *Server + var current uint64 + for _, server := range r.configurations.latest.Servers { + if server.ID == r.localID || server.Suffrage != Voter { + continue + } + state, ok := r.leaderState.replState[server.ID] + if !ok { + continue + } + nextIdx := atomic.LoadUint64(&state.nextIndex) + if nextIdx > current { + current = nextIdx + tmp := server + pick = &tmp + } + } + return pick +} + +// initiateLeadershipTransfer starts the leadership on the leader side, by +// sending a message to the leadershipTransferCh, to make sure it runs in the +// mainloop. +func (r *Raft) initiateLeadershipTransfer(id *ServerID, address *ServerAddress) LeadershipTransferFuture { + future := &leadershipTransferFuture{ID: id, Address: address} + future.init() + + if id != nil && *id == r.localID { + err := fmt.Errorf("cannot transfer leadership to itself") + r.logger.Info(err.Error()) + future.respond(err) + return future + } + + select { + case r.leadershipTransferCh <- future: + return future + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + default: + return errorFuture{ErrEnqueueTimeout} + } +} + +// timeoutNow is what happens when a server receives a TimeoutNowRequest. +func (r *Raft) timeoutNow(rpc RPC, req *TimeoutNowRequest) { + r.setLeader("", "") + r.setState(Candidate) + r.candidateFromLeadershipTransfer.Store(true) + rpc.Respond(&TimeoutNowResponse{}, nil) +} + +// setLatestConfiguration stores the latest configuration and updates a copy of it. +func (r *Raft) setLatestConfiguration(c Configuration, i uint64) { + r.configurations.latest = c + r.configurations.latestIndex = i + r.latestConfiguration.Store(c.Clone()) +} + +// setCommittedConfiguration stores the committed configuration. +func (r *Raft) setCommittedConfiguration(c Configuration, i uint64) { + r.configurations.committed = c + r.configurations.committedIndex = i +} + +// getLatestConfiguration reads the configuration from a copy of the main +// configuration, which means it can be accessed independently from the main +// loop. +func (r *Raft) getLatestConfiguration() Configuration { + // this switch catches the case where this is called without having set + // a configuration previously. + switch c := r.latestConfiguration.Load().(type) { + case Configuration: + return c + default: + return Configuration{} + } +} diff --git a/vendor/github.com/hashicorp/raft/replication.go b/vendor/github.com/hashicorp/raft/replication.go new file mode 100644 index 0000000000000..c0343df3262dd --- /dev/null +++ b/vendor/github.com/hashicorp/raft/replication.go @@ -0,0 +1,653 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" +) + +const ( + maxFailureScale = 12 + failureWait = 10 * time.Millisecond +) + +var ( + // ErrLogNotFound indicates a given log entry is not available. + ErrLogNotFound = errors.New("log not found") + + // ErrPipelineReplicationNotSupported can be returned by the transport to + // signal that pipeline replication is not supported in general, and that + // no error message should be produced. + ErrPipelineReplicationNotSupported = errors.New("pipeline replication not supported") +) + +// followerReplication is in charge of sending snapshots and log entries from +// this leader during this particular term to a remote follower. +type followerReplication struct { + // currentTerm and nextIndex must be kept at the top of the struct so + // they're 64 bit aligned which is a requirement for atomic ops on 32 bit + // platforms. + + // currentTerm is the term of this leader, to be included in AppendEntries + // requests. + currentTerm uint64 + + // nextIndex is the index of the next log entry to send to the follower, + // which may fall past the end of the log. + nextIndex uint64 + + // peer contains the network address and ID of the remote follower. + peer Server + // peerLock protects 'peer' + peerLock sync.RWMutex + + // commitment tracks the entries acknowledged by followers so that the + // leader's commit index can advance. It is updated on successful + // AppendEntries responses. + commitment *commitment + + // stopCh is notified/closed when this leader steps down or the follower is + // removed from the cluster. In the follower removed case, it carries a log + // index; replication should be attempted with a best effort up through that + // index, before exiting. + stopCh chan uint64 + + // triggerCh is notified every time new entries are appended to the log. + triggerCh chan struct{} + + // triggerDeferErrorCh is used to provide a backchannel. By sending a + // deferErr, the sender can be notifed when the replication is done. + triggerDeferErrorCh chan *deferError + + // lastContact is updated to the current time whenever any response is + // received from the follower (successful or not). This is used to check + // whether the leader should step down (Raft.checkLeaderLease()). + lastContact time.Time + // lastContactLock protects 'lastContact'. + lastContactLock sync.RWMutex + + // failures counts the number of failed RPCs since the last success, which is + // used to apply backoff. + failures uint64 + + // notifyCh is notified to send out a heartbeat, which is used to check that + // this server is still leader. + notifyCh chan struct{} + // notify is a map of futures to be resolved upon receipt of an + // acknowledgement, then cleared from this map. + notify map[*verifyFuture]struct{} + // notifyLock protects 'notify'. + notifyLock sync.Mutex + + // stepDown is used to indicate to the leader that we + // should step down based on information from a follower. + stepDown chan struct{} + + // allowPipeline is used to determine when to pipeline the AppendEntries RPCs. + // It is private to this replication goroutine. + allowPipeline bool +} + +// notifyAll is used to notify all the waiting verify futures +// if the follower believes we are still the leader. +func (s *followerReplication) notifyAll(leader bool) { + // Clear the waiting notifies minimizing lock time + s.notifyLock.Lock() + n := s.notify + s.notify = make(map[*verifyFuture]struct{}) + s.notifyLock.Unlock() + + // Submit our votes + for v := range n { + v.vote(leader) + } +} + +// cleanNotify is used to delete notify, . +func (s *followerReplication) cleanNotify(v *verifyFuture) { + s.notifyLock.Lock() + delete(s.notify, v) + s.notifyLock.Unlock() +} + +// LastContact returns the time of last contact. +func (s *followerReplication) LastContact() time.Time { + s.lastContactLock.RLock() + last := s.lastContact + s.lastContactLock.RUnlock() + return last +} + +// setLastContact sets the last contact to the current time. +func (s *followerReplication) setLastContact() { + s.lastContactLock.Lock() + s.lastContact = time.Now() + s.lastContactLock.Unlock() +} + +// replicate is a long running routine that replicates log entries to a single +// follower. +func (r *Raft) replicate(s *followerReplication) { + // Start an async heartbeating routing + stopHeartbeat := make(chan struct{}) + defer close(stopHeartbeat) + r.goFunc(func() { r.heartbeat(s, stopHeartbeat) }) + +RPC: + shouldStop := false + for !shouldStop { + select { + case maxIndex := <-s.stopCh: + // Make a best effort to replicate up to this index + if maxIndex > 0 { + r.replicateTo(s, maxIndex) + } + return + case deferErr := <-s.triggerDeferErrorCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.replicateTo(s, lastLogIdx) + if !shouldStop { + deferErr.respond(nil) + } else { + deferErr.respond(fmt.Errorf("replication failed")) + } + case <-s.triggerCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.replicateTo(s, lastLogIdx) + // This is _not_ our heartbeat mechanism but is to ensure + // followers quickly learn the leader's commit index when + // raft commits stop flowing naturally. The actual heartbeats + // can't do this to keep them unblocked by disk IO on the + // follower. See https://github.com/hashicorp/raft/issues/282. + case <-randomTimeout(r.config().CommitTimeout): + lastLogIdx, _ := r.getLastLog() + shouldStop = r.replicateTo(s, lastLogIdx) + } + + // If things looks healthy, switch to pipeline mode + if !shouldStop && s.allowPipeline { + goto PIPELINE + } + } + return + +PIPELINE: + // Disable until re-enabled + s.allowPipeline = false + + // Replicates using a pipeline for high performance. This method + // is not able to gracefully recover from errors, and so we fall back + // to standard mode on failure. + if err := r.pipelineReplicate(s); err != nil { + if err != ErrPipelineReplicationNotSupported { + s.peerLock.RLock() + peer := s.peer + s.peerLock.RUnlock() + r.logger.Error("failed to start pipeline replication to", "peer", peer, "error", err) + } + } + goto RPC +} + +// replicateTo is a helper to replicate(), used to replicate the logs up to a +// given last index. +// If the follower log is behind, we take care to bring them up to date. +func (r *Raft) replicateTo(s *followerReplication, lastIndex uint64) (shouldStop bool) { + // Create the base request + var req AppendEntriesRequest + var resp AppendEntriesResponse + var start time.Time + var peer Server + +START: + // Prevent an excessive retry rate on errors + if s.failures > 0 { + select { + case <-time.After(backoff(failureWait, s.failures, maxFailureScale)): + case <-r.shutdownCh: + } + } + + s.peerLock.RLock() + peer = s.peer + s.peerLock.RUnlock() + + // Setup the request + if err := r.setupAppendEntries(s, &req, atomic.LoadUint64(&s.nextIndex), lastIndex); err == ErrLogNotFound { + goto SEND_SNAP + } else if err != nil { + return + } + + // Make the RPC call + start = time.Now() + if err := r.trans.AppendEntries(peer.ID, peer.Address, &req, &resp); err != nil { + r.logger.Error("failed to appendEntries to", "peer", peer, "error", err) + s.failures++ + return + } + appendStats(string(peer.ID), start, float32(len(req.Entries))) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return true + } + + // Update the last contact + s.setLastContact() + + // Update s based on success + if resp.Success { + // Update our replication state + updateLastAppended(s, &req) + + // Clear any failures, allow pipelining + s.failures = 0 + s.allowPipeline = true + } else { + atomic.StoreUint64(&s.nextIndex, max(min(s.nextIndex-1, resp.LastLog+1), 1)) + if resp.NoRetryBackoff { + s.failures = 0 + } else { + s.failures++ + } + r.logger.Warn("appendEntries rejected, sending older logs", "peer", peer, "next", atomic.LoadUint64(&s.nextIndex)) + } + +CHECK_MORE: + // Poll the stop channel here in case we are looping and have been asked + // to stop, or have stepped down as leader. Even for the best effort case + // where we are asked to replicate to a given index and then shutdown, + // it's better to not loop in here to send lots of entries to a straggler + // that's leaving the cluster anyways. + select { + case <-s.stopCh: + return true + default: + } + + // Check if there are more logs to replicate + if atomic.LoadUint64(&s.nextIndex) <= lastIndex { + goto START + } + return + + // SEND_SNAP is used when we fail to get a log, usually because the follower + // is too far behind, and we must ship a snapshot down instead +SEND_SNAP: + if stop, err := r.sendLatestSnapshot(s); stop { + return true + } else if err != nil { + r.logger.Error("failed to send snapshot to", "peer", peer, "error", err) + return + } + + // Check if there is more to replicate + goto CHECK_MORE +} + +// sendLatestSnapshot is used to send the latest snapshot we have +// down to our follower. +func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { + // Get the snapshots + snapshots, err := r.snapshots.List() + if err != nil { + r.logger.Error("failed to list snapshots", "error", err) + return false, err + } + + // Check we have at least a single snapshot + if len(snapshots) == 0 { + return false, fmt.Errorf("no snapshots found") + } + + // Open the most recent snapshot + snapID := snapshots[0].ID + meta, snapshot, err := r.snapshots.Open(snapID) + if err != nil { + r.logger.Error("failed to open snapshot", "id", snapID, "error", err) + return false, err + } + defer snapshot.Close() + + // Setup the request + req := InstallSnapshotRequest{ + RPCHeader: r.getRPCHeader(), + SnapshotVersion: meta.Version, + Term: s.currentTerm, + // this is needed for retro compatibility, before RPCHeader.Addr was added + Leader: r.trans.EncodePeer(r.localID, r.localAddr), + LastLogIndex: meta.Index, + LastLogTerm: meta.Term, + Peers: meta.Peers, + Size: meta.Size, + Configuration: EncodeConfiguration(meta.Configuration), + ConfigurationIndex: meta.ConfigurationIndex, + } + + s.peerLock.RLock() + peer := s.peer + s.peerLock.RUnlock() + + // Make the call + start := time.Now() + var resp InstallSnapshotResponse + if err := r.trans.InstallSnapshot(peer.ID, peer.Address, &req, &resp, snapshot); err != nil { + r.logger.Error("failed to install snapshot", "id", snapID, "error", err) + s.failures++ + return false, err + } + labels := []metrics.Label{{Name: "peer_id", Value: string(peer.ID)}} + metrics.MeasureSinceWithLabels([]string{"raft", "replication", "installSnapshot"}, start, labels) + // Duplicated information. Kept for backward compatibility. + metrics.MeasureSince([]string{"raft", "replication", "installSnapshot", string(peer.ID)}, start) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return true, nil + } + + // Update the last contact + s.setLastContact() + + // Check for success + if resp.Success { + // Update the indexes + atomic.StoreUint64(&s.nextIndex, meta.Index+1) + s.commitment.match(peer.ID, meta.Index) + + // Clear any failures + s.failures = 0 + + // Notify we are still leader + s.notifyAll(true) + } else { + s.failures++ + r.logger.Warn("installSnapshot rejected to", "peer", peer) + } + return false, nil +} + +// heartbeat is used to periodically invoke AppendEntries on a peer +// to ensure they don't time out. This is done async of replicate(), +// since that routine could potentially be blocked on disk IO. +func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) { + var failures uint64 + req := AppendEntriesRequest{ + RPCHeader: r.getRPCHeader(), + Term: s.currentTerm, + // this is needed for retro compatibility, before RPCHeader.Addr was added + Leader: r.trans.EncodePeer(r.localID, r.localAddr), + } + + var resp AppendEntriesResponse + for { + // Wait for the next heartbeat interval or forced notify + select { + case <-s.notifyCh: + case <-randomTimeout(r.config().HeartbeatTimeout / 10): + case <-stopCh: + return + } + + s.peerLock.RLock() + peer := s.peer + s.peerLock.RUnlock() + + start := time.Now() + if err := r.trans.AppendEntries(peer.ID, peer.Address, &req, &resp); err != nil { + nextBackoffTime := cappedExponentialBackoff(failureWait, failures, maxFailureScale, r.config().HeartbeatTimeout/2) + r.logger.Error("failed to heartbeat to", "peer", peer.Address, "backoff time", + nextBackoffTime, "error", err) + r.observe(FailedHeartbeatObservation{PeerID: peer.ID, LastContact: s.LastContact()}) + failures++ + select { + case <-time.After(nextBackoffTime): + case <-stopCh: + return + } + } else { + if failures > 0 { + r.observe(ResumedHeartbeatObservation{PeerID: peer.ID}) + } + s.setLastContact() + failures = 0 + labels := []metrics.Label{{Name: "peer_id", Value: string(peer.ID)}} + metrics.MeasureSinceWithLabels([]string{"raft", "replication", "heartbeat"}, start, labels) + // Duplicated information. Kept for backward compatibility. + metrics.MeasureSince([]string{"raft", "replication", "heartbeat", string(peer.ID)}, start) + s.notifyAll(resp.Success) + } + } +} + +// pipelineReplicate is used when we have synchronized our state with the follower, +// and want to switch to a higher performance pipeline mode of replication. +// We only pipeline AppendEntries commands, and if we ever hit an error, we fall +// back to the standard replication which can handle more complex situations. +func (r *Raft) pipelineReplicate(s *followerReplication) error { + s.peerLock.RLock() + peer := s.peer + s.peerLock.RUnlock() + + // Create a new pipeline + pipeline, err := r.trans.AppendEntriesPipeline(peer.ID, peer.Address) + if err != nil { + return err + } + defer pipeline.Close() + + // Log start and stop of pipeline + r.logger.Info("pipelining replication", "peer", peer) + defer r.logger.Info("aborting pipeline replication", "peer", peer) + + // Create a shutdown and finish channel + stopCh := make(chan struct{}) + finishCh := make(chan struct{}) + + // Start a dedicated decoder + r.goFunc(func() { r.pipelineDecode(s, pipeline, stopCh, finishCh) }) + + // Start pipeline sends at the last good nextIndex + nextIndex := atomic.LoadUint64(&s.nextIndex) + + shouldStop := false +SEND: + for !shouldStop { + select { + case <-finishCh: + break SEND + case maxIndex := <-s.stopCh: + // Make a best effort to replicate up to this index + if maxIndex > 0 { + r.pipelineSend(s, pipeline, &nextIndex, maxIndex) + } + break SEND + case deferErr := <-s.triggerDeferErrorCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) + if !shouldStop { + deferErr.respond(nil) + } else { + deferErr.respond(fmt.Errorf("replication failed")) + } + case <-s.triggerCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) + case <-randomTimeout(r.config().CommitTimeout): + lastLogIdx, _ := r.getLastLog() + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) + } + } + + // Stop our decoder, and wait for it to finish + close(stopCh) + select { + case <-finishCh: + case <-r.shutdownCh: + } + return nil +} + +// pipelineSend is used to send data over a pipeline. It is a helper to +// pipelineReplicate. +func (r *Raft) pipelineSend(s *followerReplication, p AppendPipeline, nextIdx *uint64, lastIndex uint64) (shouldStop bool) { + // Create a new append request + req := new(AppendEntriesRequest) + if err := r.setupAppendEntries(s, req, *nextIdx, lastIndex); err != nil { + return true + } + + // Pipeline the append entries + if _, err := p.AppendEntries(req, new(AppendEntriesResponse)); err != nil { + r.logger.Error("failed to pipeline appendEntries", "peer", s.peer, "error", err) + return true + } + + // Increase the next send log to avoid re-sending old logs + if n := len(req.Entries); n > 0 { + last := req.Entries[n-1] + atomic.StoreUint64(nextIdx, last.Index+1) + } + return false +} + +// pipelineDecode is used to decode the responses of pipelined requests. +func (r *Raft) pipelineDecode(s *followerReplication, p AppendPipeline, stopCh, finishCh chan struct{}) { + defer close(finishCh) + respCh := p.Consumer() + for { + select { + case ready := <-respCh: + s.peerLock.RLock() + peer := s.peer + s.peerLock.RUnlock() + + req, resp := ready.Request(), ready.Response() + appendStats(string(peer.ID), ready.Start(), float32(len(req.Entries))) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return + } + + // Update the last contact + s.setLastContact() + + // Abort pipeline if not successful + if !resp.Success { + return + } + + // Update our replication state + updateLastAppended(s, req) + case <-stopCh: + return + } + } +} + +// setupAppendEntries is used to setup an append entries request. +func (r *Raft) setupAppendEntries(s *followerReplication, req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { + req.RPCHeader = r.getRPCHeader() + req.Term = s.currentTerm + // this is needed for retro compatibility, before RPCHeader.Addr was added + req.Leader = r.trans.EncodePeer(r.localID, r.localAddr) + req.LeaderCommitIndex = r.getCommitIndex() + if err := r.setPreviousLog(req, nextIndex); err != nil { + return err + } + if err := r.setNewLogs(req, nextIndex, lastIndex); err != nil { + return err + } + return nil +} + +// setPreviousLog is used to setup the PrevLogEntry and PrevLogTerm for an +// AppendEntriesRequest given the next index to replicate. +func (r *Raft) setPreviousLog(req *AppendEntriesRequest, nextIndex uint64) error { + // Guard for the first index, since there is no 0 log entry + // Guard against the previous index being a snapshot as well + lastSnapIdx, lastSnapTerm := r.getLastSnapshot() + if nextIndex == 1 { + req.PrevLogEntry = 0 + req.PrevLogTerm = 0 + + } else if (nextIndex - 1) == lastSnapIdx { + req.PrevLogEntry = lastSnapIdx + req.PrevLogTerm = lastSnapTerm + + } else { + var l Log + if err := r.logs.GetLog(nextIndex-1, &l); err != nil { + r.logger.Error("failed to get log", "index", nextIndex-1, "error", err) + return err + } + + // Set the previous index and term (0 if nextIndex is 1) + req.PrevLogEntry = l.Index + req.PrevLogTerm = l.Term + } + return nil +} + +// setNewLogs is used to setup the logs which should be appended for a request. +func (r *Raft) setNewLogs(req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { + // Append up to MaxAppendEntries or up to the lastIndex. we need to use a + // consistent value for maxAppendEntries in the lines below in case it ever + // becomes reloadable. + maxAppendEntries := r.config().MaxAppendEntries + req.Entries = make([]*Log, 0, maxAppendEntries) + maxIndex := min(nextIndex+uint64(maxAppendEntries)-1, lastIndex) + for i := nextIndex; i <= maxIndex; i++ { + oldLog := new(Log) + if err := r.logs.GetLog(i, oldLog); err != nil { + r.logger.Error("failed to get log", "index", i, "error", err) + return err + } + req.Entries = append(req.Entries, oldLog) + } + return nil +} + +// appendStats is used to emit stats about an AppendEntries invocation. +func appendStats(peer string, start time.Time, logs float32) { + labels := []metrics.Label{{Name: "peer_id", Value: peer}} + metrics.MeasureSinceWithLabels([]string{"raft", "replication", "appendEntries", "rpc"}, start, labels) + metrics.IncrCounterWithLabels([]string{"raft", "replication", "appendEntries", "logs"}, logs, labels) + // Duplicated information. Kept for backward compatibility. + metrics.MeasureSince([]string{"raft", "replication", "appendEntries", "rpc", peer}, start) + metrics.IncrCounter([]string{"raft", "replication", "appendEntries", "logs", peer}, logs) +} + +// handleStaleTerm is used when a follower indicates that we have a stale term. +func (r *Raft) handleStaleTerm(s *followerReplication) { + r.logger.Error("peer has newer term, stopping replication", "peer", s.peer) + s.notifyAll(false) // No longer leader + asyncNotifyCh(s.stepDown) +} + +// updateLastAppended is used to update follower replication state after a +// successful AppendEntries RPC. +// TODO: This isn't used during InstallSnapshot, but the code there is similar. +func updateLastAppended(s *followerReplication, req *AppendEntriesRequest) { + // Mark any inflight logs as committed + if logs := req.Entries; len(logs) > 0 { + last := logs[len(logs)-1] + atomic.StoreUint64(&s.nextIndex, last.Index+1) + s.commitment.match(s.peer.ID, last.Index) + } + + // Notify still leader + s.notifyAll(true) +} diff --git a/vendor/github.com/hashicorp/raft/saturation.go b/vendor/github.com/hashicorp/raft/saturation.go new file mode 100644 index 0000000000000..508f08fd7fd22 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/saturation.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "math" + "time" + + "github.com/armon/go-metrics" +) + +// saturationMetric measures the saturation (percentage of time spent working vs +// waiting for work) of an event processing loop, such as runFSM. It reports the +// saturation as a gauge metric (at most) once every reportInterval. +// +// Callers must instrument their loop with calls to sleeping and working, starting +// with a call to sleeping. +// +// Note: the caller must be single-threaded and saturationMetric is not safe for +// concurrent use by multiple goroutines. +type saturationMetric struct { + reportInterval time.Duration + + // slept contains time for which the event processing loop was sleeping rather + // than working in the period since lastReport. + slept time.Duration + + // lost contains time that is considered lost due to incorrect use of + // saturationMetricBucket (e.g. calling sleeping() or working() multiple + // times in succession) in the period since lastReport. + lost time.Duration + + lastReport, sleepBegan, workBegan time.Time + + // These are overwritten in tests. + nowFn func() time.Time + reportFn func(float32) +} + +// newSaturationMetric creates a saturationMetric that will update the gauge +// with the given name at the given reportInterval. keepPrev determines the +// number of previous measurements that will be used to smooth out spikes. +func newSaturationMetric(name []string, reportInterval time.Duration) *saturationMetric { + m := &saturationMetric{ + reportInterval: reportInterval, + nowFn: time.Now, + lastReport: time.Now(), + reportFn: func(sat float32) { metrics.AddSample(name, sat) }, + } + return m +} + +// sleeping records the time at which the loop began waiting for work. After the +// initial call it must always be proceeded by a call to working. +func (s *saturationMetric) sleeping() { + now := s.nowFn() + + if !s.sleepBegan.IsZero() { + // sleeping called twice in succession. Count that time as lost rather than + // measuring nonsense. + s.lost += now.Sub(s.sleepBegan) + } + + s.sleepBegan = now + s.workBegan = time.Time{} + s.report() +} + +// working records the time at which the loop began working. It must always be +// proceeded by a call to sleeping. +func (s *saturationMetric) working() { + now := s.nowFn() + + if s.workBegan.IsZero() { + if s.sleepBegan.IsZero() { + // working called before the initial call to sleeping. Count that time as + // lost rather than measuring nonsense. + s.lost += now.Sub(s.lastReport) + } else { + s.slept += now.Sub(s.sleepBegan) + } + } else { + // working called twice in succession. Count that time as lost rather than + // measuring nonsense. + s.lost += now.Sub(s.workBegan) + } + + s.workBegan = now + s.sleepBegan = time.Time{} + s.report() +} + +// report updates the gauge if reportInterval has passed since our last report. +func (s *saturationMetric) report() { + now := s.nowFn() + timeSinceLastReport := now.Sub(s.lastReport) + + if timeSinceLastReport < s.reportInterval { + return + } + + var saturation float64 + total := timeSinceLastReport - s.lost + if total != 0 { + saturation = float64(total-s.slept) / float64(total) + saturation = math.Round(saturation*100) / 100 + } + s.reportFn(float32(saturation)) + + s.slept = 0 + s.lost = 0 + s.lastReport = now +} diff --git a/vendor/github.com/hashicorp/raft/snapshot.go b/vendor/github.com/hashicorp/raft/snapshot.go new file mode 100644 index 0000000000000..89d11fda4162d --- /dev/null +++ b/vendor/github.com/hashicorp/raft/snapshot.go @@ -0,0 +1,278 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "fmt" + "io" + "time" + + "github.com/armon/go-metrics" +) + +// SnapshotMeta is for metadata of a snapshot. +type SnapshotMeta struct { + // Version is the version number of the snapshot metadata. This does not cover + // the application's data in the snapshot, that should be versioned + // separately. + Version SnapshotVersion + + // ID is opaque to the store, and is used for opening. + ID string + + // Index and Term store when the snapshot was taken. + Index uint64 + Term uint64 + + // Peers is deprecated and used to support version 0 snapshots, but will + // be populated in version 1 snapshots as well to help with upgrades. + Peers []byte + + // Configuration and ConfigurationIndex are present in version 1 + // snapshots and later. + Configuration Configuration + ConfigurationIndex uint64 + + // Size is the size of the snapshot in bytes. + Size int64 +} + +// SnapshotStore interface is used to allow for flexible implementations +// of snapshot storage and retrieval. For example, a client could implement +// a shared state store such as S3, allowing new nodes to restore snapshots +// without streaming from the leader. +type SnapshotStore interface { + // Create is used to begin a snapshot at a given index and term, and with + // the given committed configuration. The version parameter controls + // which snapshot version to create. + Create(version SnapshotVersion, index, term uint64, configuration Configuration, + configurationIndex uint64, trans Transport) (SnapshotSink, error) + + // List is used to list the available snapshots in the store. + // It should return then in descending order, with the highest index first. + List() ([]*SnapshotMeta, error) + + // Open takes a snapshot ID and provides a ReadCloser. Once close is + // called it is assumed the snapshot is no longer needed. + Open(id string) (*SnapshotMeta, io.ReadCloser, error) +} + +// SnapshotSink is returned by StartSnapshot. The FSM will Write state +// to the sink and call Close on completion. On error, Cancel will be invoked. +type SnapshotSink interface { + io.WriteCloser + ID() string + Cancel() error +} + +// runSnapshots is a long running goroutine used to manage taking +// new snapshots of the FSM. It runs in parallel to the FSM and +// main goroutines, so that snapshots do not block normal operation. +func (r *Raft) runSnapshots() { + for { + select { + case <-randomTimeout(r.config().SnapshotInterval): + // Check if we should snapshot + if !r.shouldSnapshot() { + continue + } + + // Trigger a snapshot + if _, err := r.takeSnapshot(); err != nil { + r.logger.Error("failed to take snapshot", "error", err) + } + + case future := <-r.userSnapshotCh: + // User-triggered, run immediately + id, err := r.takeSnapshot() + if err != nil { + r.logger.Error("failed to take snapshot", "error", err) + } else { + future.opener = func() (*SnapshotMeta, io.ReadCloser, error) { + return r.snapshots.Open(id) + } + } + future.respond(err) + + case <-r.shutdownCh: + return + } + } +} + +// shouldSnapshot checks if we meet the conditions to take +// a new snapshot. +func (r *Raft) shouldSnapshot() bool { + // Check the last snapshot index + lastSnap, _ := r.getLastSnapshot() + + // Check the last log index + lastIdx, err := r.logs.LastIndex() + if err != nil { + r.logger.Error("failed to get last log index", "error", err) + return false + } + + // Compare the delta to the threshold + delta := lastIdx - lastSnap + return delta >= r.config().SnapshotThreshold +} + +// takeSnapshot is used to take a new snapshot. This must only be called from +// the snapshot thread, never the main thread. This returns the ID of the new +// snapshot, along with an error. +func (r *Raft) takeSnapshot() (string, error) { + defer metrics.MeasureSince([]string{"raft", "snapshot", "takeSnapshot"}, time.Now()) + + // Create a request for the FSM to perform a snapshot. + snapReq := &reqSnapshotFuture{} + snapReq.init() + + // Wait for dispatch or shutdown. + select { + case r.fsmSnapshotCh <- snapReq: + case <-r.shutdownCh: + return "", ErrRaftShutdown + } + + // Wait until we get a response + if err := snapReq.Error(); err != nil { + if err != ErrNothingNewToSnapshot { + err = fmt.Errorf("failed to start snapshot: %v", err) + } + return "", err + } + defer snapReq.snapshot.Release() + + // Make a request for the configurations and extract the committed info. + // We have to use the future here to safely get this information since + // it is owned by the main thread. + configReq := &configurationsFuture{} + configReq.ShutdownCh = r.shutdownCh + configReq.init() + select { + case r.configurationsCh <- configReq: + case <-r.shutdownCh: + return "", ErrRaftShutdown + } + if err := configReq.Error(); err != nil { + return "", err + } + committed := configReq.configurations.committed + committedIndex := configReq.configurations.committedIndex + + // We don't support snapshots while there's a config change outstanding + // since the snapshot doesn't have a means to represent this state. This + // is a little weird because we need the FSM to apply an index that's + // past the configuration change, even though the FSM itself doesn't see + // the configuration changes. It should be ok in practice with normal + // application traffic flowing through the FSM. If there's none of that + // then it's not crucial that we snapshot, since there's not much going + // on Raft-wise. + if snapReq.index < committedIndex { + return "", fmt.Errorf("cannot take snapshot now, wait until the configuration entry at %v has been applied (have applied %v)", + committedIndex, snapReq.index) + } + + // Create a new snapshot. + r.logger.Info("starting snapshot up to", "index", snapReq.index) + start := time.Now() + version := getSnapshotVersion(r.protocolVersion) + sink, err := r.snapshots.Create(version, snapReq.index, snapReq.term, committed, committedIndex, r.trans) + if err != nil { + return "", fmt.Errorf("failed to create snapshot: %v", err) + } + metrics.MeasureSince([]string{"raft", "snapshot", "create"}, start) + + // Try to persist the snapshot. + start = time.Now() + if err := snapReq.snapshot.Persist(sink); err != nil { + sink.Cancel() + return "", fmt.Errorf("failed to persist snapshot: %v", err) + } + metrics.MeasureSince([]string{"raft", "snapshot", "persist"}, start) + + // Close and check for error. + if err := sink.Close(); err != nil { + return "", fmt.Errorf("failed to close snapshot: %v", err) + } + + // Update the last stable snapshot info. + r.setLastSnapshot(snapReq.index, snapReq.term) + + // Compact the logs. + if err := r.compactLogs(snapReq.index); err != nil { + return "", err + } + + r.logger.Info("snapshot complete up to", "index", snapReq.index) + return sink.ID(), nil +} + +// compactLogsWithTrailing takes the last inclusive index of a snapshot, +// the lastLogIdx, and and the trailingLogs and trims the logs that +// are no longer needed. +func (r *Raft) compactLogsWithTrailing(snapIdx uint64, lastLogIdx uint64, trailingLogs uint64) error { + // Determine log ranges to compact + minLog, err := r.logs.FirstIndex() + if err != nil { + return fmt.Errorf("failed to get first log index: %v", err) + } + + // Check if we have enough logs to truncate + // Use a consistent value for trailingLogs for the duration of this method + // call to avoid surprising behaviour. + if lastLogIdx <= trailingLogs { + return nil + } + + // Truncate up to the end of the snapshot, or `TrailingLogs` + // back from the head, which ever is further back. This ensures + // at least `TrailingLogs` entries, but does not allow logs + // after the snapshot to be removed. + maxLog := min(snapIdx, lastLogIdx-trailingLogs) + + if minLog > maxLog { + r.logger.Info("no logs to truncate") + return nil + } + + r.logger.Info("compacting logs", "from", minLog, "to", maxLog) + + // Compact the logs + if err := r.logs.DeleteRange(minLog, maxLog); err != nil { + return fmt.Errorf("log compaction failed: %v", err) + } + return nil +} + +// compactLogs takes the last inclusive index of a snapshot +// and trims the logs that are no longer needed. +func (r *Raft) compactLogs(snapIdx uint64) error { + defer metrics.MeasureSince([]string{"raft", "compactLogs"}, time.Now()) + + lastLogIdx, _ := r.getLastLog() + trailingLogs := r.config().TrailingLogs + + return r.compactLogsWithTrailing(snapIdx, lastLogIdx, trailingLogs) +} + +// removeOldLogs removes all old logs from the store. This is used for +// MonotonicLogStores after restore. Callers should verify that the store +// implementation is monotonic prior to calling. +func (r *Raft) removeOldLogs() error { + defer metrics.MeasureSince([]string{"raft", "removeOldLogs"}, time.Now()) + + lastLogIdx, err := r.logs.LastIndex() + if err != nil { + return fmt.Errorf("failed to get last log index: %w", err) + } + + r.logger.Info("removing all old logs from log store") + + // call compactLogsWithTrailing with lastLogIdx for snapIdx since + // it will take the lesser of lastLogIdx and snapIdx to figure out + // the end for which to apply trailingLogs. + return r.compactLogsWithTrailing(lastLogIdx, lastLogIdx, 0) +} diff --git a/vendor/github.com/hashicorp/raft/stable.go b/vendor/github.com/hashicorp/raft/stable.go new file mode 100644 index 0000000000000..3d5a576445ecd --- /dev/null +++ b/vendor/github.com/hashicorp/raft/stable.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +// StableStore is used to provide stable storage +// of key configurations to ensure safety. +type StableStore interface { + Set(key []byte, val []byte) error + + // Get returns the value for key, or an empty byte slice if key was not found. + Get(key []byte) ([]byte, error) + + SetUint64(key []byte, val uint64) error + + // GetUint64 returns the uint64 value for key, or 0 if key was not found. + GetUint64(key []byte) (uint64, error) +} diff --git a/vendor/github.com/hashicorp/raft/state.go b/vendor/github.com/hashicorp/raft/state.go new file mode 100644 index 0000000000000..edbccae722211 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/state.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "sync" + "sync/atomic" +) + +// RaftState captures the state of a Raft node: Follower, Candidate, Leader, +// or Shutdown. +type RaftState uint32 + +const ( + // Follower is the initial state of a Raft node. + Follower RaftState = iota + + // Candidate is one of the valid states of a Raft node. + Candidate + + // Leader is one of the valid states of a Raft node. + Leader + + // Shutdown is the terminal state of a Raft node. + Shutdown +) + +func (s RaftState) String() string { + switch s { + case Follower: + return "Follower" + case Candidate: + return "Candidate" + case Leader: + return "Leader" + case Shutdown: + return "Shutdown" + default: + return "Unknown" + } +} + +// raftState is used to maintain various state variables +// and provides an interface to set/get the variables in a +// thread safe manner. +type raftState struct { + // currentTerm commitIndex, lastApplied, must be kept at the top of + // the struct so they're 64 bit aligned which is a requirement for + // atomic ops on 32 bit platforms. + + // The current term, cache of StableStore + currentTerm uint64 + + // Highest committed log entry + commitIndex uint64 + + // Last applied log to the FSM + lastApplied uint64 + + // protects 4 next fields + lastLock sync.Mutex + + // Cache the latest snapshot index/term + lastSnapshotIndex uint64 + lastSnapshotTerm uint64 + + // Cache the latest log from LogStore + lastLogIndex uint64 + lastLogTerm uint64 + + // Tracks running goroutines + routinesGroup sync.WaitGroup + + // The current state + state RaftState +} + +func (r *raftState) getState() RaftState { + stateAddr := (*uint32)(&r.state) + return RaftState(atomic.LoadUint32(stateAddr)) +} + +func (r *raftState) setState(s RaftState) { + stateAddr := (*uint32)(&r.state) + atomic.StoreUint32(stateAddr, uint32(s)) +} + +func (r *raftState) getCurrentTerm() uint64 { + return atomic.LoadUint64(&r.currentTerm) +} + +func (r *raftState) setCurrentTerm(term uint64) { + atomic.StoreUint64(&r.currentTerm, term) +} + +func (r *raftState) getLastLog() (index, term uint64) { + r.lastLock.Lock() + index = r.lastLogIndex + term = r.lastLogTerm + r.lastLock.Unlock() + return +} + +func (r *raftState) setLastLog(index, term uint64) { + r.lastLock.Lock() + r.lastLogIndex = index + r.lastLogTerm = term + r.lastLock.Unlock() +} + +func (r *raftState) getLastSnapshot() (index, term uint64) { + r.lastLock.Lock() + index = r.lastSnapshotIndex + term = r.lastSnapshotTerm + r.lastLock.Unlock() + return +} + +func (r *raftState) setLastSnapshot(index, term uint64) { + r.lastLock.Lock() + r.lastSnapshotIndex = index + r.lastSnapshotTerm = term + r.lastLock.Unlock() +} + +func (r *raftState) getCommitIndex() uint64 { + return atomic.LoadUint64(&r.commitIndex) +} + +func (r *raftState) setCommitIndex(index uint64) { + atomic.StoreUint64(&r.commitIndex, index) +} + +func (r *raftState) getLastApplied() uint64 { + return atomic.LoadUint64(&r.lastApplied) +} + +func (r *raftState) setLastApplied(index uint64) { + atomic.StoreUint64(&r.lastApplied, index) +} + +// Start a goroutine and properly handle the race between a routine +// starting and incrementing, and exiting and decrementing. +func (r *raftState) goFunc(f func()) { + r.routinesGroup.Add(1) + go func() { + defer r.routinesGroup.Done() + f() + }() +} + +func (r *raftState) waitShutdown() { + r.routinesGroup.Wait() +} + +// getLastIndex returns the last index in stable storage. +// Either from the last log or from the last snapshot. +func (r *raftState) getLastIndex() uint64 { + r.lastLock.Lock() + defer r.lastLock.Unlock() + return max(r.lastLogIndex, r.lastSnapshotIndex) +} + +// getLastEntry returns the last index and term in stable storage. +// Either from the last log or from the last snapshot. +func (r *raftState) getLastEntry() (uint64, uint64) { + r.lastLock.Lock() + defer r.lastLock.Unlock() + if r.lastLogIndex >= r.lastSnapshotIndex { + return r.lastLogIndex, r.lastLogTerm + } + return r.lastSnapshotIndex, r.lastSnapshotTerm +} diff --git a/vendor/github.com/hashicorp/raft/tag.sh b/vendor/github.com/hashicorp/raft/tag.sh new file mode 100644 index 0000000000000..c6eb8a066745d --- /dev/null +++ b/vendor/github.com/hashicorp/raft/tag.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +# The version must be supplied from the environment. Do not include the +# leading "v". +if [ -z $VERSION ]; then + echo "Please specify a version." + exit 1 +fi + +# Generate the tag. +echo "==> Tagging version $VERSION..." +git commit --allow-empty -a --gpg-sign=348FFC4C -m "Release v$VERSION" +git tag -a -m "Version $VERSION" -s -u 348FFC4C "v${VERSION}" main + +exit 0 diff --git a/vendor/github.com/hashicorp/raft/tcp_transport.go b/vendor/github.com/hashicorp/raft/tcp_transport.go new file mode 100644 index 0000000000000..573696e467cc5 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/tcp_transport.go @@ -0,0 +1,120 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "errors" + "io" + "net" + "time" + + "github.com/hashicorp/go-hclog" +) + +var ( + errNotAdvertisable = errors.New("local bind address is not advertisable") + errNotTCP = errors.New("local address is not a TCP address") +) + +// TCPStreamLayer implements StreamLayer interface for plain TCP. +type TCPStreamLayer struct { + advertise net.Addr + listener *net.TCPListener +} + +// NewTCPTransport returns a NetworkTransport that is built on top of +// a TCP streaming transport layer. +func NewTCPTransport( + bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + logOutput io.Writer, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport { + return NewNetworkTransport(stream, maxPool, timeout, logOutput) + }) +} + +// NewTCPTransportWithLogger returns a NetworkTransport that is built on top of +// a TCP streaming transport layer, with log output going to the supplied Logger +func NewTCPTransportWithLogger( + bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + logger hclog.Logger, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport { + return NewNetworkTransportWithLogger(stream, maxPool, timeout, logger) + }) +} + +// NewTCPTransportWithConfig returns a NetworkTransport that is built on top of +// a TCP streaming transport layer, using the given config struct. +func NewTCPTransportWithConfig( + bindAddr string, + advertise net.Addr, + config *NetworkTransportConfig, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport { + config.Stream = stream + return NewNetworkTransportWithConfig(config) + }) +} + +func newTCPTransport(bindAddr string, + advertise net.Addr, + transportCreator func(stream StreamLayer) *NetworkTransport) (*NetworkTransport, error) { + // Try to bind + list, err := net.Listen("tcp", bindAddr) + if err != nil { + return nil, err + } + + // Create stream + stream := &TCPStreamLayer{ + advertise: advertise, + listener: list.(*net.TCPListener), + } + + // Verify that we have a usable advertise address + addr, ok := stream.Addr().(*net.TCPAddr) + if !ok { + list.Close() + return nil, errNotTCP + } + if addr.IP == nil || addr.IP.IsUnspecified() { + list.Close() + return nil, errNotAdvertisable + } + + // Create the network transport + trans := transportCreator(stream) + return trans, nil +} + +// Dial implements the StreamLayer interface. +func (t *TCPStreamLayer) Dial(address ServerAddress, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", string(address), timeout) +} + +// Accept implements the net.Listener interface. +func (t *TCPStreamLayer) Accept() (c net.Conn, err error) { + return t.listener.Accept() +} + +// Close implements the net.Listener interface. +func (t *TCPStreamLayer) Close() (err error) { + return t.listener.Close() +} + +// Addr implements the net.Listener interface. +func (t *TCPStreamLayer) Addr() net.Addr { + // Use an advertise addr if provided + if t.advertise != nil { + return t.advertise + } + return t.listener.Addr() +} diff --git a/vendor/github.com/hashicorp/raft/testing.go b/vendor/github.com/hashicorp/raft/testing.go new file mode 100644 index 0000000000000..351a9ababa554 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/testing.go @@ -0,0 +1,874 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "reflect" + "sync" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-msgpack/v2/codec" +) + +var userSnapshotErrorsOnNoData = true + +// Return configurations optimized for in-memory +func inmemConfig(t testing.TB) *Config { + conf := DefaultConfig() + conf.HeartbeatTimeout = 50 * time.Millisecond + conf.ElectionTimeout = 50 * time.Millisecond + conf.LeaderLeaseTimeout = 50 * time.Millisecond + conf.CommitTimeout = 5 * time.Millisecond + conf.Logger = newTestLogger(t) + return conf +} + +// MockFSM is an implementation of the FSM interface, and just stores +// the logs sequentially. +// +// NOTE: This is exposed for middleware testing purposes and is not a stable API +type MockFSM struct { + sync.Mutex + logs [][]byte + configurations []Configuration +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +type MockFSMConfigStore struct { + FSM +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +type WrappingFSM interface { + Underlying() FSM +} + +func getMockFSM(fsm FSM) *MockFSM { + switch f := fsm.(type) { + case *MockFSM: + return f + case *MockFSMConfigStore: + return f.FSM.(*MockFSM) + case WrappingFSM: + return getMockFSM(f.Underlying()) + } + + return nil +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +type MockSnapshot struct { + logs [][]byte + maxIndex int +} + +var _ ConfigurationStore = (*MockFSMConfigStore)(nil) + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockFSM) Apply(log *Log) interface{} { + m.Lock() + defer m.Unlock() + m.logs = append(m.logs, log.Data) + return len(m.logs) +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockFSM) Snapshot() (FSMSnapshot, error) { + m.Lock() + defer m.Unlock() + return &MockSnapshot{m.logs, len(m.logs)}, nil +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockFSM) Restore(inp io.ReadCloser) error { + m.Lock() + defer m.Unlock() + defer inp.Close() + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(inp, &hd) + + m.logs = nil + return dec.Decode(&m.logs) +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockFSM) Logs() [][]byte { + m.Lock() + defer m.Unlock() + return m.logs +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockFSMConfigStore) StoreConfiguration(index uint64, config Configuration) { + mm := m.FSM.(*MockFSM) + mm.Lock() + defer mm.Unlock() + mm.configurations = append(mm.configurations, config) +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockSnapshot) Persist(sink SnapshotSink) error { + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(sink, &hd) + if err := enc.Encode(m.logs[:m.maxIndex]); err != nil { + sink.Cancel() + return err + } + sink.Close() + return nil +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockSnapshot) Release() { +} + +// MockMonotonicLogStore is a LogStore wrapper for testing the +// MonotonicLogStore interface. +type MockMonotonicLogStore struct { + s LogStore +} + +// IsMonotonic implements the MonotonicLogStore interface. +func (m *MockMonotonicLogStore) IsMonotonic() bool { + return true +} + +// FirstIndex implements the LogStore interface. +func (m *MockMonotonicLogStore) FirstIndex() (uint64, error) { + return m.s.FirstIndex() +} + +// LastIndex implements the LogStore interface. +func (m *MockMonotonicLogStore) LastIndex() (uint64, error) { + return m.s.LastIndex() +} + +// GetLog implements the LogStore interface. +func (m *MockMonotonicLogStore) GetLog(index uint64, log *Log) error { + return m.s.GetLog(index, log) +} + +// StoreLog implements the LogStore interface. +func (m *MockMonotonicLogStore) StoreLog(log *Log) error { + return m.s.StoreLog(log) +} + +// StoreLogs implements the LogStore interface. +func (m *MockMonotonicLogStore) StoreLogs(logs []*Log) error { + return m.s.StoreLogs(logs) +} + +// DeleteRange implements the LogStore interface. +func (m *MockMonotonicLogStore) DeleteRange(min uint64, max uint64) error { + return m.s.DeleteRange(min, max) +} + +// This can be used as the destination for a logger and it'll +// map them into calls to testing.T.Log, so that you only see +// the logging for failed tests. +type testLoggerAdapter struct { + tb testing.TB + prefix string +} + +func (a *testLoggerAdapter) Write(d []byte) (int, error) { + if d[len(d)-1] == '\n' { + d = d[:len(d)-1] + } + if a.prefix != "" { + l := a.prefix + ": " + string(d) + a.tb.Log(l) + return len(l), nil + } + + a.tb.Log(string(d)) + return len(d), nil +} + +func newTestLogger(tb testing.TB) hclog.Logger { + return newTestLoggerWithPrefix(tb, "") +} + +// newTestLoggerWithPrefix returns a Logger that can be used in tests. prefix +// will be added as the name of the logger. +// +// If tests are run with -v (verbose mode, or -json which implies verbose) the +// log output will go to stderr directly. If tests are run in regular "quiet" +// mode, logs will be sent to t.Log so that the logs only appear when a test +// fails. +// +// Be careful where this is used though - calling t.Log after the test completes +// causes a panic. This is common if you use it for a NetworkTransport for +// example and then close the transport at the end of the test because an error +// is logged after the test is complete. +func newTestLoggerWithPrefix(tb testing.TB, prefix string) hclog.Logger { + if testing.Verbose() { + return hclog.New(&hclog.LoggerOptions{Name: prefix, Level: hclog.Trace}) + } + + return hclog.New(&hclog.LoggerOptions{ + Name: prefix, + Output: &testLoggerAdapter{tb: tb, prefix: prefix}, + }) +} + +type cluster struct { + dirs []string + stores []*InmemStore + fsms []FSM + snaps []*FileSnapshotStore + trans []LoopbackTransport + rafts []*Raft + t *testing.T + observationCh chan Observation + conf *Config + propagateTimeout time.Duration + longstopTimeout time.Duration + logger hclog.Logger + startTime time.Time + + failedLock sync.Mutex + failedCh chan struct{} + failed bool +} + +func (c *cluster) Merge(other *cluster) { + c.dirs = append(c.dirs, other.dirs...) + c.stores = append(c.stores, other.stores...) + c.fsms = append(c.fsms, other.fsms...) + c.snaps = append(c.snaps, other.snaps...) + c.trans = append(c.trans, other.trans...) + c.rafts = append(c.rafts, other.rafts...) +} + +func (c *cluster) RemoveServer(id ServerID) { + for i, n := range c.rafts { + if n.localID == id { + c.rafts = append(c.rafts[:i], c.rafts[i+1:]...) + return + } + } +} + +// notifyFailed will close the failed channel which can signal the goroutine +// running the test that another goroutine has detected a failure in order to +// terminate the test. +func (c *cluster) notifyFailed() { + c.failedLock.Lock() + defer c.failedLock.Unlock() + if !c.failed { + c.failed = true + close(c.failedCh) + } +} + +// Failf provides a logging function that fails the tests, prints the output +// with microseconds, and does not mysteriously eat the string. This can be +// safely called from goroutines but won't immediately halt the test. The +// failedCh will be closed to allow blocking functions in the main thread to +// detect the failure and react. Note that you should arrange for the main +// thread to block until all goroutines have completed in order to reliably +// fail tests using this function. +func (c *cluster) Failf(format string, args ...interface{}) { + c.logger.Error(fmt.Sprintf(format, args...)) + c.t.Fail() + c.notifyFailed() +} + +// FailNowf provides a logging function that fails the tests, prints the output +// with microseconds, and does not mysteriously eat the string. FailNowf must be +// called from the goroutine running the test or benchmark function, not from +// other goroutines created during the test. Calling FailNowf does not stop +// those other goroutines. +func (c *cluster) FailNowf(format string, args ...interface{}) { + c.t.Helper() + c.t.Fatalf(format, args...) +} + +// Close shuts down the cluster and cleans up. +func (c *cluster) Close() { + var futures []Future + for _, r := range c.rafts { + futures = append(futures, r.Shutdown()) + } + + // Wait for shutdown + limit := time.AfterFunc(c.longstopTimeout, func() { + // We can't FailNowf here, and c.Failf won't do anything if we + // hang, so panic. + panic("timed out waiting for shutdown") + }) + defer limit.Stop() + + for _, f := range futures { + if err := f.Error(); err != nil { + c.t.Fatalf("shutdown future err: %v", err) + } + } + + for _, d := range c.dirs { + os.RemoveAll(d) + } +} + +// WaitEventChan returns a channel which will signal if an observation is made +// or a timeout occurs. It is possible to set a filter to look for specific +// observations. Setting timeout to 0 means that it will wait forever until a +// non-filtered observation is made. +func (c *cluster) WaitEventChan(ctx context.Context, filter FilterFn) <-chan struct{} { + ch := make(chan struct{}) + go func() { + defer close(ch) + for { + select { + case <-ctx.Done(): + return + case o, ok := <-c.observationCh: + if !ok || filter == nil || filter(&o) { + return + } + } + } + }() + return ch +} + +// WaitEvent waits until an observation is made, a timeout occurs, or a test +// failure is signaled. It is possible to set a filter to look for specific +// observations. Setting timeout to 0 means that it will wait forever until a +// non-filtered observation is made or a test failure is signaled. +func (c *cluster) WaitEvent(filter FilterFn, timeout time.Duration) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + eventCh := c.WaitEventChan(ctx, filter) + select { + case <-c.failedCh: + c.t.FailNow() + case <-eventCh: + } +} + +// WaitForReplication blocks until every FSM in the cluster has the given +// length, or the long sanity check timeout expires. +func (c *cluster) WaitForReplication(fsmLength int) { + limitCh := time.After(c.longstopTimeout) + +CHECK: + for { + ctx, cancel := context.WithTimeout(context.Background(), c.conf.CommitTimeout) + defer cancel() + ch := c.WaitEventChan(ctx, nil) + select { + case <-c.failedCh: + c.t.FailNow() + + case <-limitCh: + c.t.Fatalf("timeout waiting for replication") + + case <-ch: + for _, fsmRaw := range c.fsms { + fsm := getMockFSM(fsmRaw) + fsm.Lock() + num := len(fsm.logs) + fsm.Unlock() + if num != fsmLength { + continue CHECK + } + } + return + } + } +} + +// pollState takes a snapshot of the state of the cluster. This might not be +// stable, so use GetInState() to apply some additional checks when waiting +// for the cluster to achieve a particular state. +func (c *cluster) pollState(s RaftState) ([]*Raft, uint64) { + var highestTerm uint64 + in := make([]*Raft, 0, 1) + for _, r := range c.rafts { + if r.State() == s { + in = append(in, r) + } + term := r.getCurrentTerm() + if term > highestTerm { + highestTerm = term + } + } + return in, highestTerm +} + +// GetInState polls the state of the cluster and attempts to identify when it has +// settled into the given state. +func (c *cluster) GetInState(s RaftState) []*Raft { + c.logger.Info("starting stability test", "raft-state", s) + limitCh := time.After(c.longstopTimeout) + + // An election should complete after 2 * max(HeartbeatTimeout, ElectionTimeout) + // because of the randomised timer expiring in 1 x interval ... 2 x interval. + // We add a bit for propagation delay. If the election fails (e.g. because + // two elections start at once), we will have got something through our + // observer channel indicating a different state (i.e. one of the nodes + // will have moved to candidate state) which will reset the timer. + // + // Because of an implementation peculiarity, it can actually be 3 x timeout. + timeout := c.conf.HeartbeatTimeout + if timeout < c.conf.ElectionTimeout { + timeout = c.conf.ElectionTimeout + } + timeout = 2*timeout + c.conf.CommitTimeout + timer := time.NewTimer(timeout) + defer timer.Stop() + + // Wait until we have a stable instate slice. Each time we see an + // observation a state has changed, recheck it and if it has changed, + // restart the timer. + pollStartTime := time.Now() + for { + _, highestTerm := c.pollState(s) + inStateTime := time.Now() + + // Sometimes this routine is called very early on before the + // rafts have started up. We then timeout even though no one has + // even started an election. So if the highest term in use is + // zero, we know there are no raft processes that have yet issued + // a RequestVote, and we set a long time out. This is fixed when + // we hear the first RequestVote, at which point we reset the + // timer. + if highestTerm == 0 { + timer.Reset(c.longstopTimeout) + } else { + timer.Reset(timeout) + } + + // Filter will wake up whenever we observe a RequestVote. + filter := func(ob *Observation) bool { + switch ob.Data.(type) { + case RaftState: + return true + case RequestVoteRequest: + return true + default: + return false + } + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + eventCh := c.WaitEventChan(ctx, filter) + select { + case <-c.failedCh: + c.t.FailNow() + + case <-limitCh: + c.t.Fatalf("timeout waiting for stable %s state", s) + + case <-eventCh: + c.logger.Debug("resetting stability timeout") + + case t, ok := <-timer.C: + if !ok { + c.t.Fatalf("timer channel errored") + } + + inState, highestTerm := c.pollState(s) + c.logger.Info(fmt.Sprintf("stable state for %s reached at %s (%d nodes), highestTerm is %d, %s from start of poll, %s from cluster start. Timeout at %s, %s after stability", + s, inStateTime, len(inState), highestTerm, inStateTime.Sub(pollStartTime), inStateTime.Sub(c.startTime), t, t.Sub(inStateTime))) + return inState + } + } +} + +// Leader waits for the cluster to elect a leader and stay in a stable state. +func (c *cluster) Leader() *Raft { + c.t.Helper() + leaders := c.GetInState(Leader) + if len(leaders) != 1 { + c.t.Fatalf("expected one leader: %v", leaders) + } + return leaders[0] +} + +// Followers waits for the cluster to have N-1 followers and stay in a stable +// state. +func (c *cluster) Followers() []*Raft { + expFollowers := len(c.rafts) - 1 + return c.WaitForFollowers(expFollowers) +} + +// WaitForFollowers waits for the cluster to have a given number of followers and stay in a stable +// state. +func (c *cluster) WaitForFollowers(expFollowers int) []*Raft { + followers := c.GetInState(Follower) + if len(followers) != expFollowers { + c.t.Fatalf("timeout waiting for %d followers (followers are %v)", expFollowers, followers) + } + return followers +} + +// FullyConnect connects all the transports together. +func (c *cluster) FullyConnect() { + c.logger.Debug("fully connecting") + for i, t1 := range c.trans { + for j, t2 := range c.trans { + if i != j { + t1.Connect(t2.LocalAddr(), t2) + t2.Connect(t1.LocalAddr(), t1) + } + } + } +} + +// Disconnect disconnects all transports from the given address. +func (c *cluster) Disconnect(a ServerAddress) { + c.logger.Debug("disconnecting", "address", a) + for _, t := range c.trans { + if t.LocalAddr() == a { + t.DisconnectAll() + } else { + t.Disconnect(a) + } + } +} + +// Partition keeps the given list of addresses connected but isolates them +// from the other members of the cluster. +func (c *cluster) Partition(far []ServerAddress) { + c.logger.Debug("partitioning", "addresses", far) + + // Gather the set of nodes on the "near" side of the partition (we + // will call the supplied list of nodes the "far" side). + near := make(map[ServerAddress]struct{}) +OUTER: + for _, t := range c.trans { + l := t.LocalAddr() + for _, a := range far { + if l == a { + continue OUTER + } + } + near[l] = struct{}{} + } + + // Now fixup all the connections. The near side will be separated from + // the far side, and vice-versa. + for _, t := range c.trans { + l := t.LocalAddr() + if _, ok := near[l]; ok { + for _, a := range far { + t.Disconnect(a) + } + } else { + for a := range near { + t.Disconnect(a) + } + } + } +} + +// IndexOf returns the index of the given raft instance. +func (c *cluster) IndexOf(r *Raft) int { + for i, n := range c.rafts { + if n == r { + return i + } + } + return -1 +} + +// EnsureLeader checks that ALL the nodes think the leader is the given expected +// leader. +func (c *cluster) EnsureLeader(t *testing.T, expect ServerAddress) { + // We assume c.Leader() has been called already; now check all the rafts + // think the leader is correct + fail := false + for _, r := range c.rafts { + leaderAddr, _ := r.LeaderWithID() + + if leaderAddr != expect { + if leaderAddr == "" { + leaderAddr = "[none]" + } + if expect == "" { + c.logger.Error("peer sees incorrect leader", "peer", r, "leader", leaderAddr, "expected-leader", "[none]") + } else { + c.logger.Error("peer sees incorrect leader", "peer", r, "leader", leaderAddr, "expected-leader", expect) + } + fail = true + } + } + if fail { + t.Fatalf("at least one peer has the wrong notion of leader") + } +} + +// EnsureSame makes sure all the FSMs have the same contents. +func (c *cluster) EnsureSame(t *testing.T) { + limit := time.Now().Add(c.longstopTimeout) + first := getMockFSM(c.fsms[0]) + +CHECK: + first.Lock() + for i, fsmRaw := range c.fsms { + fsm := getMockFSM(fsmRaw) + if i == 0 { + continue + } + fsm.Lock() + + if len(first.logs) != len(fsm.logs) { + fsm.Unlock() + if time.Now().After(limit) { + t.Fatalf("FSM log length mismatch: %d %d", + len(first.logs), len(fsm.logs)) + } else { + goto WAIT + } + } + + for idx := 0; idx < len(first.logs); idx++ { + if bytes.Compare(first.logs[idx], fsm.logs[idx]) != 0 { + fsm.Unlock() + if time.Now().After(limit) { + t.Fatalf("FSM log mismatch at index %d", idx) + } else { + goto WAIT + } + } + } + if len(first.configurations) != len(fsm.configurations) { + fsm.Unlock() + if time.Now().After(limit) { + t.Fatalf("FSM configuration length mismatch: %d %d", + len(first.logs), len(fsm.logs)) + } else { + goto WAIT + } + } + + for idx := 0; idx < len(first.configurations); idx++ { + if !reflect.DeepEqual(first.configurations[idx], fsm.configurations[idx]) { + fsm.Unlock() + if time.Now().After(limit) { + t.Fatalf("FSM configuration mismatch at index %d: %v, %v", idx, first.configurations[idx], fsm.configurations[idx]) + } else { + goto WAIT + } + } + } + fsm.Unlock() + } + + first.Unlock() + return + +WAIT: + first.Unlock() + c.WaitEvent(nil, c.conf.CommitTimeout) + goto CHECK +} + +// getConfiguration returns the configuration of the given Raft instance, or +// fails the test if there's an error +func (c *cluster) getConfiguration(r *Raft) Configuration { + future := r.GetConfiguration() + if err := future.Error(); err != nil { + c.t.Fatalf("failed to get configuration: %v", err) + return Configuration{} + } + + return future.Configuration() +} + +// EnsureSamePeers makes sure all the rafts have the same set of peers. +func (c *cluster) EnsureSamePeers(t *testing.T) { + limit := time.Now().Add(c.longstopTimeout) + peerSet := c.getConfiguration(c.rafts[0]) + +CHECK: + for i, raft := range c.rafts { + if i == 0 { + continue + } + + otherSet := c.getConfiguration(raft) + if !reflect.DeepEqual(peerSet, otherSet) { + if time.Now().After(limit) { + t.Fatalf("peer mismatch: %+v %+v", peerSet, otherSet) + } else { + goto WAIT + } + } + } + return + +WAIT: + c.WaitEvent(nil, c.conf.CommitTimeout) + goto CHECK +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +type MakeClusterOpts struct { + Peers int + Bootstrap bool + Conf *Config + ConfigStoreFSM bool + MakeFSMFunc func() FSM + LongstopTimeout time.Duration + MonotonicLogs bool +} + +// makeCluster will return a cluster with the given config and number of peers. +// If bootstrap is true, the servers will know about each other before starting, +// otherwise their transports will be wired up but they won't yet have configured +// each other. +func makeCluster(t *testing.T, opts *MakeClusterOpts) *cluster { + if opts.Conf == nil { + opts.Conf = inmemConfig(t) + } + + c := &cluster{ + observationCh: make(chan Observation, 1024), + conf: opts.Conf, + // Propagation takes a maximum of 2 heartbeat timeouts (time to + // get a new heartbeat that would cause a commit) plus a bit. + propagateTimeout: opts.Conf.HeartbeatTimeout*2 + opts.Conf.CommitTimeout, + longstopTimeout: 5 * time.Second, + logger: newTestLoggerWithPrefix(t, "cluster"), + failedCh: make(chan struct{}), + } + if opts.LongstopTimeout > 0 { + c.longstopTimeout = opts.LongstopTimeout + } + + c.t = t + var configuration Configuration + + // Setup the stores and transports + for i := 0; i < opts.Peers; i++ { + dir, err := os.MkdirTemp("", "raft") + if err != nil { + t.Fatalf("err: %v", err) + } + + store := NewInmemStore() + c.dirs = append(c.dirs, dir) + c.stores = append(c.stores, store) + if opts.ConfigStoreFSM { + c.fsms = append(c.fsms, &MockFSMConfigStore{ + FSM: &MockFSM{}, + }) + } else { + var fsm FSM + if opts.MakeFSMFunc != nil { + fsm = opts.MakeFSMFunc() + } else { + fsm = &MockFSM{} + } + c.fsms = append(c.fsms, fsm) + } + + dir2, snap := FileSnapTest(t) + c.dirs = append(c.dirs, dir2) + c.snaps = append(c.snaps, snap) + + addr, trans := NewInmemTransport("") + c.trans = append(c.trans, trans) + localID := ServerID(fmt.Sprintf("server-%s", addr)) + if opts.Conf.ProtocolVersion < 3 { + localID = ServerID(addr) + } + configuration.Servers = append(configuration.Servers, Server{ + Suffrage: Voter, + ID: localID, + Address: addr, + }) + } + + // Wire the transports together + c.FullyConnect() + + // Create all the rafts + c.startTime = time.Now() + for i := 0; i < opts.Peers; i++ { + var logs LogStore + logs = c.stores[i] + store := c.stores[i] + snap := c.snaps[i] + trans := c.trans[i] + + if opts.MonotonicLogs { + logs = &MockMonotonicLogStore{s: logs} + } + + peerConf := opts.Conf + peerConf.LocalID = configuration.Servers[i].ID + peerConf.Logger = newTestLoggerWithPrefix(t, string(configuration.Servers[i].ID)) + + if opts.Bootstrap { + err := BootstrapCluster(peerConf, logs, store, snap, trans, configuration) + if err != nil { + t.Fatalf("BootstrapCluster failed: %v", err) + } + } + + raft, err := NewRaft(peerConf, c.fsms[i], logs, store, snap, trans) + if err != nil { + t.Fatalf("NewRaft failed: %v", err) + } + + raft.RegisterObserver(NewObserver(c.observationCh, false, nil)) + if err != nil { + t.Fatalf("RegisterObserver failed: %v", err) + } + c.rafts = append(c.rafts, raft) + } + + return c +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func MakeCluster(n int, t *testing.T, conf *Config) *cluster { + return makeCluster(t, &MakeClusterOpts{ + Peers: n, + Bootstrap: true, + Conf: conf, + }) +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func MakeClusterNoBootstrap(n int, t *testing.T, conf *Config) *cluster { + return makeCluster(t, &MakeClusterOpts{ + Peers: n, + Conf: conf, + }) +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func MakeClusterCustom(t *testing.T, opts *MakeClusterOpts) *cluster { + return makeCluster(t, opts) +} + +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func FileSnapTest(t *testing.T) (string, *FileSnapshotStore) { + // Create a test dir + dir, err := os.MkdirTemp("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + + snap, err := NewFileSnapshotStoreWithLogger(dir, 3, newTestLogger(t)) + if err != nil { + t.Fatalf("err: %v", err) + } + snap.noSync = true + return dir, snap +} diff --git a/vendor/github.com/hashicorp/raft/testing_batch.go b/vendor/github.com/hashicorp/raft/testing_batch.go new file mode 100644 index 0000000000000..3903d95a58b88 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/testing_batch.go @@ -0,0 +1,33 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build batchtest +// +build batchtest + +package raft + +func init() { + userSnapshotErrorsOnNoData = false +} + +// ApplyBatch enables MockFSM to satisfy the BatchingFSM interface. This +// function is gated by the batchtest build flag. +// +// NOTE: This is exposed for middleware testing purposes and is not a stable API +func (m *MockFSM) ApplyBatch(logs []*Log) []interface{} { + m.Lock() + defer m.Unlock() + + ret := make([]interface{}, len(logs)) + for i, log := range logs { + switch log.Type { + case LogCommand: + m.logs = append(m.logs, log.Data) + ret[i] = len(m.logs) + default: + ret[i] = nil + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/raft/transport.go b/vendor/github.com/hashicorp/raft/transport.go new file mode 100644 index 0000000000000..c64fff6ec6edb --- /dev/null +++ b/vendor/github.com/hashicorp/raft/transport.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "io" + "time" +) + +// RPCResponse captures both a response and a potential error. +type RPCResponse struct { + Response interface{} + Error error +} + +// RPC has a command, and provides a response mechanism. +type RPC struct { + Command interface{} + Reader io.Reader // Set only for InstallSnapshot + RespChan chan<- RPCResponse +} + +// Respond is used to respond with a response, error or both +func (r *RPC) Respond(resp interface{}, err error) { + r.RespChan <- RPCResponse{resp, err} +} + +// Transport provides an interface for network transports +// to allow Raft to communicate with other nodes. +type Transport interface { + // Consumer returns a channel that can be used to + // consume and respond to RPC requests. + Consumer() <-chan RPC + + // LocalAddr is used to return our local address to distinguish from our peers. + LocalAddr() ServerAddress + + // AppendEntriesPipeline returns an interface that can be used to pipeline + // AppendEntries requests. + AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) + + // AppendEntries sends the appropriate RPC to the target node. + AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error + + // RequestVote sends the appropriate RPC to the target node. + RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error + + // InstallSnapshot is used to push a snapshot down to a follower. The data is read from + // the ReadCloser and streamed to the client. + InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error + + // EncodePeer is used to serialize a peer's address. + EncodePeer(id ServerID, addr ServerAddress) []byte + + // DecodePeer is used to deserialize a peer's address. + DecodePeer([]byte) ServerAddress + + // SetHeartbeatHandler is used to setup a heartbeat handler + // as a fast-pass. This is to avoid head-of-line blocking from + // disk IO. If a Transport does not support this, it can simply + // ignore the call, and push the heartbeat onto the Consumer channel. + SetHeartbeatHandler(cb func(rpc RPC)) + + // TimeoutNow is used to start a leadership transfer to the target node. + TimeoutNow(id ServerID, target ServerAddress, args *TimeoutNowRequest, resp *TimeoutNowResponse) error +} + +// WithPreVote is an interface that a transport may provide which +// allows a transport to support a PreVote request. +// +// It is defined separately from Transport as unfortunately it wasn't in the +// original interface specification. +type WithPreVote interface { + // RequestPreVote sends the appropriate RPC to the target node. + RequestPreVote(id ServerID, target ServerAddress, args *RequestPreVoteRequest, resp *RequestPreVoteResponse) error +} + +// WithClose is an interface that a transport may provide which +// allows a transport to be shut down cleanly when a Raft instance +// shuts down. +// +// It is defined separately from Transport as unfortunately it wasn't in the +// original interface specification. +type WithClose interface { + // Close permanently closes a transport, stopping + // any associated goroutines and freeing other resources. + Close() error +} + +// LoopbackTransport is an interface that provides a loopback transport suitable for testing +// e.g. InmemTransport. It's there so we don't have to rewrite tests. +type LoopbackTransport interface { + Transport // Embedded transport reference + WithPeers // Embedded peer management + WithClose // with a close routine + WithPreVote // with a prevote +} + +// WithPeers is an interface that a transport may provide which allows for connection and +// disconnection. Unless the transport is a loopback transport, the transport specified to +// "Connect" is likely to be nil. +type WithPeers interface { + Connect(peer ServerAddress, t Transport) // Connect a peer + Disconnect(peer ServerAddress) // Disconnect a given peer + DisconnectAll() // Disconnect all peers, possibly to reconnect them later +} + +// AppendPipeline is used for pipelining AppendEntries requests. It is used +// to increase the replication throughput by masking latency and better +// utilizing bandwidth. +type AppendPipeline interface { + // AppendEntries is used to add another request to the pipeline. + // The send may block which is an effective form of back-pressure. + AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) + + // Consumer returns a channel that can be used to consume + // response futures when they are ready. + Consumer() <-chan AppendFuture + + // Close closes the pipeline and cancels all inflight RPCs + Close() error +} + +// AppendFuture is used to return information about a pipelined AppendEntries request. +type AppendFuture interface { + Future + + // Start returns the time that the append request was started. + // It is always OK to call this method. + Start() time.Time + + // Request holds the parameters of the AppendEntries call. + // It is always OK to call this method. + Request() *AppendEntriesRequest + + // Response holds the results of the AppendEntries call. + // This method must only be called after the Error + // method returns, and will only be valid on success. + Response() *AppendEntriesResponse +} diff --git a/vendor/github.com/hashicorp/raft/util.go b/vendor/github.com/hashicorp/raft/util.go new file mode 100644 index 0000000000000..09c7742b2d6b5 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/util.go @@ -0,0 +1,176 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "bytes" + crand "crypto/rand" + "fmt" + "math" + "math/big" + "math/rand" + "time" + + "github.com/hashicorp/go-msgpack/v2/codec" +) + +func init() { + // Ensure we use a high-entropy seed for the pseudo-random generator + rand.Seed(newSeed()) +} + +// returns an int64 from a crypto random source +// can be used to seed a source for a math/rand. +func newSeed() int64 { + r, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + return r.Int64() +} + +// randomTimeout returns a value that is between the minVal and 2x minVal. +func randomTimeout(minVal time.Duration) <-chan time.Time { + if minVal == 0 { + return nil + } + extra := time.Duration(rand.Int63()) % minVal + return time.After(minVal + extra) +} + +// min returns the minimum. +func min(a, b uint64) uint64 { + if a <= b { + return a + } + return b +} + +// max returns the maximum. +func max(a, b uint64) uint64 { + if a >= b { + return a + } + return b +} + +// generateUUID is used to generate a random UUID. +func generateUUID() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} + +// asyncNotifyCh is used to do an async channel send +// to a single channel without blocking. +func asyncNotifyCh(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// drainNotifyCh empties out a single-item notification channel without +// blocking, and returns whether it received anything. +func drainNotifyCh(ch chan struct{}) bool { + select { + case <-ch: + return true + default: + return false + } +} + +// asyncNotifyBool is used to do an async notification +// on a bool channel. +func asyncNotifyBool(ch chan bool, v bool) { + select { + case ch <- v: + default: + } +} + +// overrideNotifyBool is used to notify on a bool channel +// but override existing value if value is present. +// ch must be 1-item buffered channel. +// +// This method does not support multiple concurrent calls. +func overrideNotifyBool(ch chan bool, v bool) { + select { + case ch <- v: + // value sent, all done + case <-ch: + // channel had an old value + select { + case ch <- v: + default: + panic("race: channel was sent concurrently") + } + } +} + +// Decode reverses the encode operation on a byte slice input. +func decodeMsgPack(buf []byte, out interface{}) error { + r := bytes.NewBuffer(buf) + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(r, &hd) + return dec.Decode(out) +} + +// Encode writes an encoded object to a new bytes buffer. +func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + hd := codec.MsgpackHandle{ + BasicHandle: codec.BasicHandle{ + TimeNotBuiltin: true, + }, + } + enc := codec.NewEncoder(buf, &hd) + err := enc.Encode(in) + return buf, err +} + +// backoff is used to compute an exponential backoff +// duration. Base time is scaled by the current round, +// up to some maximum scale factor. +func backoff(base time.Duration, round, limit uint64) time.Duration { + power := min(round, limit) + for power > 2 { + base *= 2 + power-- + } + return base +} + +// cappedExponentialBackoff computes the exponential backoff with an adjustable +// cap on the max timeout. +func cappedExponentialBackoff(base time.Duration, round, limit uint64, cap time.Duration) time.Duration { + power := min(round, limit) + for power > 2 { + if base > cap { + return cap + } + base *= 2 + power-- + } + if base > cap { + return cap + } + return base +} + +// Needed for sorting []uint64, used to determine commitment +type uint64Slice []uint64 + +func (p uint64Slice) Len() int { return len(p) } +func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index 35d8108958ff0..581cf7cdfadf3 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -53,4 +53,10 @@ const ( // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. AnnotationDescription = "org.opencontainers.image.description" + + // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. + AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" + + // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. + AnnotationBaseImageName = "org.opencontainers.image.base.name" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go index fe799bd698c71..36b0aeb8f1fcb 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -48,6 +48,17 @@ type ImageConfig struct { // StopSignal contains the system call signal that will be sent to the container to exit. StopSignal string `json:"StopSignal,omitempty"` + + // ArgsEscaped + // + // Deprecated: This field is present only for legacy compatibility with + // Docker and should not be used by new image builders. It is used by Docker + // for Windows images to indicate that the `Entrypoint` or `Cmd` or both, + // contains only a single element array, that is a pre-escaped, and combined + // into a single string `CommandLine`. If `true` the value in `Entrypoint` or + // `Cmd` should be used as-is to avoid double escaping. + // https://github.com/opencontainers/image-spec/pull/892 + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` } // RootFS describes a layer content addresses @@ -86,11 +97,8 @@ type Image struct { // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. Author string `json:"author,omitempty"` - // Architecture is the CPU architecture which the binaries in this image are built to run on. - Architecture string `json:"architecture"` - - // OS is the name of the operating system which the image is built to run on. - OS string `json:"os"` + // Platform describes the platform which the image in the manifest runs on. + Platform // Config defines the execution parameters which should be used as a base when running a container using the image. Config ImageConfig `json:"config,omitempty"` diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go index 6e442a0853f4b..1881b11814b0c 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -1,4 +1,4 @@ -// Copyright 2016 The Linux Foundation +// Copyright 2016-2022 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import digest "github.com/opencontainers/go-digest" // when marshalled to JSON. type Descriptor struct { // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType,omitempty"` + MediaType string `json:"mediaType"` // Digest is the digest of the targeted content. Digest digest.Digest `json:"digest"` @@ -35,16 +35,24 @@ type Descriptor struct { // Annotations contains arbitrary metadata relating to the targeted content. Annotations map[string]string `json:"annotations,omitempty"` + // Data is an embedding of the targeted content. This is encoded as a base64 + // string when marshalled to JSON (automatically, by encoding/json). If + // present, Data can be used directly to avoid fetching the targeted content. + Data []byte `json:"data,omitempty"` + // Platform describes the platform which the image in the manifest runs on. // // This should only be used when referring to a manifest. Platform *Platform `json:"platform,omitempty"` + + // ArtifactType is the IANA media type of this artifact. + ArtifactType string `json:"artifactType,omitempty"` } // Platform describes the platform which the image in the manifest runs on. type Platform struct { // Architecture field specifies the CPU architecture, for example - // `amd64` or `ppc64`. + // `amd64` or `ppc64le`. Architecture string `json:"architecture"` // OS specifies the operating system, for example `linux` or `windows`. @@ -62,3 +70,11 @@ type Platform struct { // example `v7` to specify ARMv7 when architecture is `arm`. Variant string `json:"variant,omitempty"` } + +// DescriptorEmptyJSON is the descriptor of a blob with content of `{}`. +var DescriptorEmptyJSON = Descriptor{ + MediaType: MediaTypeEmptyJSON, + Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, + Size: 2, + Data: []byte(`{}`), +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go index 82da6c6a89896..e2bed9d4e4695 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -21,12 +21,18 @@ import "github.com/opencontainers/image-spec/specs-go" type Index struct { specs.Versioned - // MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` MediaType string `json:"mediaType,omitempty"` + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + // Manifests references platform specific manifests. Manifests []Descriptor `json:"manifests"` + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + // Annotations contains arbitrary metadata for the image index. Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go index fc79e9e0d140f..c5503cb3053b2 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -15,10 +15,14 @@ package v1 const ( - // ImageLayoutFile is the file name of oci image layout file + // ImageLayoutFile is the file name containing ImageLayout in an OCI Image Layout ImageLayoutFile = "oci-layout" // ImageLayoutVersion is the version of ImageLayout ImageLayoutVersion = "1.0.0" + // ImageIndexFile is the file name of the entry point for references and descriptors in an OCI Image Layout + ImageIndexFile = "index.json" + // ImageBlobsDir is the directory name containing content addressable blobs in an OCI Image Layout + ImageBlobsDir = "blobs" ) // ImageLayout is the structure in the "oci-layout" file, found in the root diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index d72d15ce4bb8b..26fec52a6bcf4 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -1,4 +1,4 @@ -// Copyright 2016 The Linux Foundation +// Copyright 2016-2022 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,9 +20,12 @@ import "github.com/opencontainers/image-spec/specs-go" type Manifest struct { specs.Versioned - // MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` MediaType string `json:"mediaType,omitempty"` + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + // Config references a configuration object for a container, by digest. // The referenced configuration object is a JSON blob that the runtime uses to set up the container. Config Descriptor `json:"config"` @@ -30,6 +33,9 @@ type Manifest struct { // Layers is an indexed list of layers referenced by the manifest. Layers []Descriptor `json:"layers"` + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + // Annotations contains arbitrary metadata for the image manifest. Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index bad7bb97f4734..ce8313e796210 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -21,12 +21,20 @@ const ( // MediaTypeLayoutHeader specifies the media type for the oci-layout. MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + // MediaTypeImageManifest specifies the media type for an image manifest. MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" - // MediaTypeImageIndex specifies the media type for an image index. - MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value "{}". + MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" +) +const ( // MediaTypeImageLayer is the media type used for layers referenced by the manifest. MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" @@ -34,15 +42,44 @@ const ( // referenced by the manifest. MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + // MediaTypeImageLayerZstd is the media type used for zstd compressed + // layers referenced by the manifest. + MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" +) + +// Non-distributable layer media-types. +// +// Deprecated: Non-distributable layers are deprecated, and not recommended +// for future use. Implementations SHOULD NOT produce new non-distributable +// layers. +// https://github.com/opencontainers/image-spec/pull/965 +const ( // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" // MediaTypeImageLayerNonDistributableGzip is the media type for // gzipped layers referenced by the manifest but with distribution // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" - // MediaTypeImageConfig specifies the media type for the image configuration. - MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + // MediaTypeImageLayerNonDistributableZstd is the media type for zstd + // compressed layers referenced by the manifest but with distribution + // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 + MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 0d9543f16000d..7069ae44d715d 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -20,9 +20,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 0 + VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version new file mode 100644 index 0000000000000..f124bfa155441 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/.go-version @@ -0,0 +1 @@ +1.21.9 diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md index 2be669a60a185..495a93ef8f38d 100644 --- a/vendor/go.etcd.io/bbolt/README.md +++ b/vendor/go.etcd.io/bbolt/README.md @@ -421,10 +421,19 @@ Prev() Move to the previous key. ``` Each of those functions has a return signature of `(key []byte, value []byte)`. -When you have iterated to the end of the cursor then `Next()` will return a -`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` -before calling `Next()` or `Prev()`. If you do not seek to a position then -these functions will return a `nil` key. +You must seek to a position using `First()`, `Last()`, or `Seek()` before calling +`Next()` or `Prev()`. If you do not seek to a position then these functions will +return a `nil` key. + +When you have iterated to the end of the cursor, then `Next()` will return a +`nil` key and the cursor still points to the last element if present. When you +have iterated to the beginning of the cursor, then `Prev()` will return a `nil` +key and the cursor still points to the first element if present. + +If you remove key/value pairs during iteration, the cursor may automatically +move to the next position if present in current node each time removing a key. +When you call `c.Next()` after removing a key, it may skip one key/value pair. +Refer to [pull/611](https://github.com/etcd-io/bbolt/pull/611) to get more detailed info. During iteration, if the key is non-`nil` but the value is `nil`, that means the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to @@ -850,6 +859,12 @@ Here are a few things to note when evaluating and using Bolt: to grow. However, it's important to note that deleting large chunks of data will not allow you to reclaim that space on disk. +* Removing key/values pairs in a bucket during iteration on the bucket using + cursor may not work properly. Each time when removing a key/value pair, the + cursor may automatically move to the next position if present. When users + call `c.Next()` after removing a key, it may skip one key/value pair. + Refer to https://github.com/etcd-io/bbolt/pull/611 for more detailed info. + For more information on page allocation, [see this comment][page-allocation]. [page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go index 054467af30389..f3533d3446b8d 100644 --- a/vendor/go.etcd.io/bbolt/bucket.go +++ b/vendor/go.etcd.io/bbolt/bucket.go @@ -162,12 +162,17 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { return nil, ErrBucketNameRequired } + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if there is an existing key. - if bytes.Equal(key, k) { + if bytes.Equal(newKey, k) { if (flags & bucketLeafFlag) != 0 { return nil, ErrBucketExists } @@ -182,16 +187,14 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { } var value = bucket.write() - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) + c.node().put(newKey, newKey, value, 0, bucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to // dereference the inline page, if it exists. This will cause the bucket // to be treated as a regular, non-inline bucket for the rest of the tx. b.page = nil - return b.Bucket(key), nil + return b.Bucket(newKey), nil } // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. @@ -288,18 +291,23 @@ func (b *Bucket) Put(key []byte, value []byte) error { return ErrValueTooLarge } + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + if bytes.Equal(newKey, k) && (flags&bucketLeafFlag) != 0 { return ErrIncompatibleValue } - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) + // gofail: var beforeBucketPut struct{} + + c.node().put(newKey, newKey, value, 0, 0) return nil } diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go index 5dafb0cac3a92..bbfd92a9bc1ee 100644 --- a/vendor/go.etcd.io/bbolt/cursor.go +++ b/vendor/go.etcd.io/bbolt/cursor.go @@ -71,7 +71,7 @@ func (c *Cursor) Last() (key []byte, value []byte) { // If this is an empty page (calling Delete may result in empty pages) // we call prev to find the last page that is not empty - for len(c.stack) > 0 && c.stack[len(c.stack)-1].count() == 0 { + for len(c.stack) > 1 && c.stack[len(c.stack)-1].count() == 0 { c.prev() } @@ -254,6 +254,15 @@ func (c *Cursor) prev() (key []byte, value []byte, flags uint32) { elem.index-- break } + // If we've hit the beginning, we should stop moving the cursor, + // and stay at the first element, so that users can continue to + // iterate over the elements in reverse direction by calling `Next`. + // We should return nil in such case. + // Refer to https://github.com/etcd-io/bbolt/issues/733 + if len(c.stack) == 1 { + c.first() + return nil, nil, 0 + } c.stack = c.stack[:i] } diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go index 50f2d0e174dc3..61d43f81b46db 100644 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ b/vendor/go.etcd.io/bbolt/freelist.go @@ -282,9 +282,8 @@ func (f *freelist) read(p *page) { if count == 0 { f.ids = nil } else { - var ids []pgid - data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx) - unsafeSlice(unsafe.Pointer(&ids), data, count) + data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(pgid(0)), idx) + ids := unsafe.Slice((*pgid)(data), count) // copy the ids, so we don't modify on the freelist page directly idsCopy := make([]pgid, count) @@ -322,15 +321,13 @@ func (f *freelist) write(p *page) error { p.count = uint16(l) } else if l < 0xFFFF { p.count = uint16(l) - var ids []pgid data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&ids), data, l) + ids := unsafe.Slice((*pgid)(data), l) f.copyall(ids) } else { p.count = 0xFFFF - var ids []pgid data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&ids), data, l+1) + ids := unsafe.Slice((*pgid)(data), l+1) ids[0] = pgid(l) f.copyall(ids[1:]) } diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go index 379645c97fd50..bb081b031e659 100644 --- a/vendor/go.etcd.io/bbolt/page.go +++ b/vendor/go.etcd.io/bbolt/page.go @@ -74,9 +74,8 @@ func (p *page) leafPageElements() []leafPageElement { if p.count == 0 { return nil } - var elems []leafPageElement data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + elems := unsafe.Slice((*leafPageElement)(data), int(p.count)) return elems } @@ -91,9 +90,8 @@ func (p *page) branchPageElements() []branchPageElement { if p.count == 0 { return nil } - var elems []branchPageElement data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + elems := unsafe.Slice((*branchPageElement)(data), int(p.count)) return elems } diff --git a/vendor/go.etcd.io/bbolt/unsafe.go b/vendor/go.etcd.io/bbolt/unsafe.go index c0e50375007fa..7745d32ce194f 100644 --- a/vendor/go.etcd.io/bbolt/unsafe.go +++ b/vendor/go.etcd.io/bbolt/unsafe.go @@ -1,7 +1,6 @@ package bbolt import ( - "reflect" "unsafe" ) @@ -26,14 +25,3 @@ func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // all), so this is believed to be correct. return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] } - -// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by -// the slice parameter. This helper should be used over other direct -// manipulation of reflect.SliceHeader to prevent misuse, namely, converting -// from reflect.SliceHeader to a Go slice type. -func unsafeSlice(slice, data unsafe.Pointer, len int) { - s := (*reflect.SliceHeader)(slice) - s.Data = uintptr(data) - s.Cap = len - s.Len = len -} diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go deleted file mode 100644 index 2b19c93e8ea8b..0000000000000 --- a/vendor/golang.org/x/tools/cmd/stringer/stringer.go +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer -// interface. Given the name of a (signed or unsigned) integer type T that has constants -// defined, stringer will create a new self-contained Go source file implementing -// -// func (t T) String() string -// -// The file is created in the same package and directory as the package that defines T. -// It has helpful defaults designed for use with go generate. -// -// Stringer works best with constants that are consecutive values such as created using iota, -// but creates good code regardless. In the future it might also provide custom support for -// constant sets that are bit patterns. -// -// For example, given this snippet, -// -// package painkiller -// -// type Pill int -// -// const ( -// Placebo Pill = iota -// Aspirin -// Ibuprofen -// Paracetamol -// Acetaminophen = Paracetamol -// ) -// -// running this command -// -// stringer -type=Pill -// -// in the same directory will create the file pill_string.go, in package painkiller, -// containing a definition of -// -// func (Pill) String() string -// -// That method will translate the value of a Pill constant to the string representation -// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will -// print the string "Aspirin". -// -// Typically this process would be run using go generate, like this: -// -// //go:generate stringer -type=Pill -// -// If multiple constants have the same value, the lexically first matching name will -// be used (in the example, Acetaminophen will print as "Paracetamol"). -// -// With no arguments, it processes the package in the current directory. -// Otherwise, the arguments must name a single directory holding a Go package -// or a set of Go source files that represent a single Go package. -// -// The -type flag accepts a comma-separated list of types so a single run can -// generate methods for multiple types. The default output file is t_string.go, -// where t is the lower-cased name of the first type listed. It can be overridden -// with the -output flag. -// -// The -linecomment flag tells stringer to generate the text of any line comment, trimmed -// of leading spaces, instead of the constant name. For instance, if the constants above had a -// Pill prefix, one could write -// -// PillAspirin // Aspirin -// -// to suppress it in the output. -package main // import "golang.org/x/tools/cmd/stringer" - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/constant" - "go/format" - "go/token" - "go/types" - "log" - "os" - "path/filepath" - "sort" - "strings" - - "golang.org/x/tools/go/packages" -) - -var ( - typeNames = flag.String("type", "", "comma-separated list of type names; must be set") - output = flag.String("output", "", "output file name; default srcdir/_string.go") - trimprefix = flag.String("trimprefix", "", "trim the `prefix` from the generated constant names") - linecomment = flag.Bool("linecomment", false, "use line comment text as printed text when present") - buildTags = flag.String("tags", "", "comma-separated list of build tags to apply") -) - -// Usage is a replacement usage function for the flags package. -func Usage() { - fmt.Fprintf(os.Stderr, "Usage of stringer:\n") - fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n") - fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T files... # Must be a single package\n") - fmt.Fprintf(os.Stderr, "For more information, see:\n") - fmt.Fprintf(os.Stderr, "\thttps://pkg.go.dev/golang.org/x/tools/cmd/stringer\n") - fmt.Fprintf(os.Stderr, "Flags:\n") - flag.PrintDefaults() -} - -func main() { - log.SetFlags(0) - log.SetPrefix("stringer: ") - flag.Usage = Usage - flag.Parse() - if len(*typeNames) == 0 { - flag.Usage() - os.Exit(2) - } - types := strings.Split(*typeNames, ",") - var tags []string - if len(*buildTags) > 0 { - tags = strings.Split(*buildTags, ",") - } - - // We accept either one directory or a list of files. Which do we have? - args := flag.Args() - if len(args) == 0 { - // Default: process whole package in current directory. - args = []string{"."} - } - - // Parse the package once. - var dir string - g := Generator{ - trimPrefix: *trimprefix, - lineComment: *linecomment, - } - // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc). - if len(args) == 1 && isDirectory(args[0]) { - dir = args[0] - } else { - if len(tags) != 0 { - log.Fatal("-tags option applies only to directories, not when files are specified") - } - dir = filepath.Dir(args[0]) - } - - g.parsePackage(args, tags) - - // Print the header and package clause. - g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) - g.Printf("\n") - g.Printf("package %s", g.pkg.name) - g.Printf("\n") - g.Printf("import \"strconv\"\n") // Used by all methods. - - // Run generate for each type. - for _, typeName := range types { - g.generate(typeName) - } - - // Format the output. - src := g.format() - - // Write to file. - outputName := *output - if outputName == "" { - baseName := fmt.Sprintf("%s_string.go", types[0]) - outputName = filepath.Join(dir, strings.ToLower(baseName)) - } - err := os.WriteFile(outputName, src, 0644) - if err != nil { - log.Fatalf("writing output: %s", err) - } -} - -// isDirectory reports whether the named file is a directory. -func isDirectory(name string) bool { - info, err := os.Stat(name) - if err != nil { - log.Fatal(err) - } - return info.IsDir() -} - -// Generator holds the state of the analysis. Primarily used to buffer -// the output for format.Source. -type Generator struct { - buf bytes.Buffer // Accumulated output. - pkg *Package // Package we are scanning. - - trimPrefix string - lineComment bool - - logf func(format string, args ...interface{}) // test logging hook; nil when not testing -} - -func (g *Generator) Printf(format string, args ...interface{}) { - fmt.Fprintf(&g.buf, format, args...) -} - -// File holds a single parsed file and associated data. -type File struct { - pkg *Package // Package to which this file belongs. - file *ast.File // Parsed AST. - // These fields are reset for each type being generated. - typeName string // Name of the constant type. - values []Value // Accumulator for constant values of that type. - - trimPrefix string - lineComment bool -} - -type Package struct { - name string - defs map[*ast.Ident]types.Object - files []*File -} - -// parsePackage analyzes the single package constructed from the patterns and tags. -// parsePackage exits if there is an error. -func (g *Generator) parsePackage(patterns []string, tags []string) { - cfg := &packages.Config{ - Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, - // TODO: Need to think about constants in test files. Maybe write type_string_test.go - // in a separate pass? For later. - Tests: false, - BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, - Logf: g.logf, - } - pkgs, err := packages.Load(cfg, patterns...) - if err != nil { - log.Fatal(err) - } - if len(pkgs) != 1 { - log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " ")) - } - g.addPackage(pkgs[0]) -} - -// addPackage adds a type checked Package and its syntax files to the generator. -func (g *Generator) addPackage(pkg *packages.Package) { - g.pkg = &Package{ - name: pkg.Name, - defs: pkg.TypesInfo.Defs, - files: make([]*File, len(pkg.Syntax)), - } - - for i, file := range pkg.Syntax { - g.pkg.files[i] = &File{ - file: file, - pkg: g.pkg, - trimPrefix: g.trimPrefix, - lineComment: g.lineComment, - } - } -} - -// generate produces the String method for the named type. -func (g *Generator) generate(typeName string) { - values := make([]Value, 0, 100) - for _, file := range g.pkg.files { - // Set the state for this run of the walker. - file.typeName = typeName - file.values = nil - if file.file != nil { - ast.Inspect(file.file, file.genDecl) - values = append(values, file.values...) - } - } - - if len(values) == 0 { - log.Fatalf("no values defined for type %s", typeName) - } - // Generate code that will fail if the constants change value. - g.Printf("func _() {\n") - g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n") - g.Printf("\t// Re-run the stringer command to generate them again.\n") - g.Printf("\tvar x [1]struct{}\n") - for _, v := range values { - g.Printf("\t_ = x[%s - %s]\n", v.originalName, v.str) - } - g.Printf("}\n") - runs := splitIntoRuns(values) - // The decision of which pattern to use depends on the number of - // runs in the numbers. If there's only one, it's easy. For more than - // one, there's a tradeoff between complexity and size of the data - // and code vs. the simplicity of a map. A map takes more space, - // but so does the code. The decision here (crossover at 10) is - // arbitrary, but considers that for large numbers of runs the cost - // of the linear scan in the switch might become important, and - // rather than use yet another algorithm such as binary search, - // we punt and use a map. In any case, the likelihood of a map - // being necessary for any realistic example other than bitmasks - // is very low. And bitmasks probably deserve their own analysis, - // to be done some other day. - switch { - case len(runs) == 1: - g.buildOneRun(runs, typeName) - case len(runs) <= 10: - g.buildMultipleRuns(runs, typeName) - default: - g.buildMap(runs, typeName) - } -} - -// splitIntoRuns breaks the values into runs of contiguous sequences. -// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}. -// The input slice is known to be non-empty. -func splitIntoRuns(values []Value) [][]Value { - // We use stable sort so the lexically first name is chosen for equal elements. - sort.Stable(byValue(values)) - // Remove duplicates. Stable sort has put the one we want to print first, - // so use that one. The String method won't care about which named constant - // was the argument, so the first name for the given value is the only one to keep. - // We need to do this because identical values would cause the switch or map - // to fail to compile. - j := 1 - for i := 1; i < len(values); i++ { - if values[i].value != values[i-1].value { - values[j] = values[i] - j++ - } - } - values = values[:j] - runs := make([][]Value, 0, 10) - for len(values) > 0 { - // One contiguous sequence per outer loop. - i := 1 - for i < len(values) && values[i].value == values[i-1].value+1 { - i++ - } - runs = append(runs, values[:i]) - values = values[i:] - } - return runs -} - -// format returns the gofmt-ed contents of the Generator's buffer. -func (g *Generator) format() []byte { - src, err := format.Source(g.buf.Bytes()) - if err != nil { - // Should never happen, but can arise when developing this code. - // The user can compile the output to see the error. - log.Printf("warning: internal error: invalid Go generated: %s", err) - log.Printf("warning: compile the package to analyze the error") - return g.buf.Bytes() - } - return src -} - -// Value represents a declared constant. -type Value struct { - originalName string // The name of the constant. - name string // The name with trimmed prefix. - // The value is stored as a bit pattern alone. The boolean tells us - // whether to interpret it as an int64 or a uint64; the only place - // this matters is when sorting. - // Much of the time the str field is all we need; it is printed - // by Value.String. - value uint64 // Will be converted to int64 when needed. - signed bool // Whether the constant is a signed type. - str string // The string representation given by the "go/constant" package. -} - -func (v *Value) String() string { - return v.str -} - -// byValue lets us sort the constants into increasing order. -// We take care in the Less method to sort in signed or unsigned order, -// as appropriate. -type byValue []Value - -func (b byValue) Len() int { return len(b) } -func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byValue) Less(i, j int) bool { - if b[i].signed { - return int64(b[i].value) < int64(b[j].value) - } - return b[i].value < b[j].value -} - -// genDecl processes one declaration clause. -func (f *File) genDecl(node ast.Node) bool { - decl, ok := node.(*ast.GenDecl) - if !ok || decl.Tok != token.CONST { - // We only care about const declarations. - return true - } - // The name of the type of the constants we are declaring. - // Can change if this is a multi-element declaration. - typ := "" - // Loop over the elements of the declaration. Each element is a ValueSpec: - // a list of names possibly followed by a type, possibly followed by values. - // If the type and value are both missing, we carry down the type (and value, - // but the "go/types" package takes care of that). - for _, spec := range decl.Specs { - vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST. - if vspec.Type == nil && len(vspec.Values) > 0 { - // "X = 1". With no type but a value. If the constant is untyped, - // skip this vspec and reset the remembered type. - typ = "" - - // If this is a simple type conversion, remember the type. - // We don't mind if this is actually a call; a qualified call won't - // be matched (that will be SelectorExpr, not Ident), and only unusual - // situations will result in a function call that appears to be - // a type conversion. - ce, ok := vspec.Values[0].(*ast.CallExpr) - if !ok { - continue - } - id, ok := ce.Fun.(*ast.Ident) - if !ok { - continue - } - typ = id.Name - } - if vspec.Type != nil { - // "X T". We have a type. Remember it. - ident, ok := vspec.Type.(*ast.Ident) - if !ok { - continue - } - typ = ident.Name - } - if typ != f.typeName { - // This is not the type we're looking for. - continue - } - // We now have a list of names (from one line of source code) all being - // declared with the desired type. - // Grab their names and actual values and store them in f.values. - for _, name := range vspec.Names { - if name.Name == "_" { - continue - } - // This dance lets the type checker find the values for us. It's a - // bit tricky: look up the object declared by the name, find its - // types.Const, and extract its value. - obj, ok := f.pkg.defs[name] - if !ok { - log.Fatalf("no value for constant %s", name) - } - info := obj.Type().Underlying().(*types.Basic).Info() - if info&types.IsInteger == 0 { - log.Fatalf("can't handle non-integer constant type %s", typ) - } - value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST. - if value.Kind() != constant.Int { - log.Fatalf("can't happen: constant is not an integer %s", name) - } - i64, isInt := constant.Int64Val(value) - u64, isUint := constant.Uint64Val(value) - if !isInt && !isUint { - log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String()) - } - if !isInt { - u64 = uint64(i64) - } - v := Value{ - originalName: name.Name, - value: u64, - signed: info&types.IsUnsigned == 0, - str: value.String(), - } - if c := vspec.Comment; f.lineComment && c != nil && len(c.List) == 1 { - v.name = strings.TrimSpace(c.Text()) - } else { - v.name = strings.TrimPrefix(v.originalName, f.trimPrefix) - } - f.values = append(f.values, v) - } - } - return false -} - -// Helpers - -// usize returns the number of bits of the smallest unsigned integer -// type that will hold n. Used to create the smallest possible slice of -// integers to use as indexes into the concatenated strings. -func usize(n int) int { - switch { - case n < 1<<8: - return 8 - case n < 1<<16: - return 16 - default: - // 2^32 is enough constants for anyone. - return 32 - } -} - -// declareIndexAndNameVars declares the index slices and concatenated names -// strings representing the runs of values. -func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) { - var indexes, names []string - for i, run := range runs { - index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i)) - if len(run) != 1 { - indexes = append(indexes, index) - } - names = append(names, name) - } - g.Printf("const (\n") - for _, name := range names { - g.Printf("\t%s\n", name) - } - g.Printf(")\n\n") - - if len(indexes) > 0 { - g.Printf("var (") - for _, index := range indexes { - g.Printf("\t%s\n", index) - } - g.Printf(")\n\n") - } -} - -// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars -func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) { - index, name := g.createIndexAndNameDecl(run, typeName, "") - g.Printf("const %s\n", name) - g.Printf("var %s\n", index) -} - -// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var". -func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) { - b := new(bytes.Buffer) - indexes := make([]int, len(run)) - for i := range run { - b.WriteString(run[i].name) - indexes[i] = b.Len() - } - nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String()) - nameLen := b.Len() - b.Reset() - fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen)) - for i, v := range indexes { - if i > 0 { - fmt.Fprintf(b, ", ") - } - fmt.Fprintf(b, "%d", v) - } - fmt.Fprintf(b, "}") - return b.String(), nameConst -} - -// declareNameVars declares the concatenated names string representing all the values in the runs. -func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) { - g.Printf("const _%s_name%s = \"", typeName, suffix) - for _, run := range runs { - for i := range run { - g.Printf("%s", run[i].name) - } - } - g.Printf("\"\n") -} - -// buildOneRun generates the variables and String method for a single run of contiguous values. -func (g *Generator) buildOneRun(runs [][]Value, typeName string) { - values := runs[0] - g.Printf("\n") - g.declareIndexAndNameVar(values, typeName) - // The generated code is simple enough to write as a Printf format. - lessThanZero := "" - if values[0].signed { - lessThanZero = "i < 0 || " - } - if values[0].value == 0 { // Signed or unsigned, 0 is still 0. - g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero) - } else { - g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero) - } -} - -// Arguments to format are: -// -// [1]: type name -// [2]: size of index element (8 for uint8 etc.) -// [3]: less than zero check (for signed types) -const stringOneRun = `func (i %[1]s) String() string { - if %[3]si >= %[1]s(len(_%[1]s_index)-1) { - return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]] -} -` - -// Arguments to format are: -// [1]: type name -// [2]: lowest defined value for type, as a string -// [3]: size of index element (8 for uint8 etc.) -// [4]: less than zero check (for signed types) -/* - */ -const stringOneRunWithOffset = `func (i %[1]s) String() string { - i -= %[2]s - if %[4]si >= %[1]s(len(_%[1]s_index)-1) { - return "%[1]s(" + strconv.FormatInt(int64(i + %[2]s), 10) + ")" - } - return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]] -} -` - -// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values. -// For this pattern, a single Printf format won't do. -func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) { - g.Printf("\n") - g.declareIndexAndNameVars(runs, typeName) - g.Printf("func (i %s) String() string {\n", typeName) - g.Printf("\tswitch {\n") - for i, values := range runs { - if len(values) == 1 { - g.Printf("\tcase i == %s:\n", &values[0]) - g.Printf("\t\treturn _%s_name_%d\n", typeName, i) - continue - } - if values[0].value == 0 && !values[0].signed { - // For an unsigned lower bound of 0, "0 <= i" would be redundant. - g.Printf("\tcase i <= %s:\n", &values[len(values)-1]) - } else { - g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1]) - } - if values[0].value != 0 { - g.Printf("\t\ti -= %s\n", &values[0]) - } - g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n", - typeName, i, typeName, i, typeName, i) - } - g.Printf("\tdefault:\n") - g.Printf("\t\treturn \"%s(\" + strconv.FormatInt(int64(i), 10) + \")\"\n", typeName) - g.Printf("\t}\n") - g.Printf("}\n") -} - -// buildMap handles the case where the space is so sparse a map is a reasonable fallback. -// It's a rare situation but has simple code. -func (g *Generator) buildMap(runs [][]Value, typeName string) { - g.Printf("\n") - g.declareNameVars(runs, typeName, "") - g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName) - n := 0 - for _, values := range runs { - for _, value := range values { - g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name)) - n += len(value.name) - } - } - g.Printf("}\n\n") - g.Printf(stringMap, typeName) -} - -// Argument to format is the type name. -const stringMap = `func (i %[1]s) String() string { - if str, ok := _%[1]s_map[i]; ok { - return str - } - return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" -} -` diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go new file mode 100644 index 0000000000000..740745c45f630 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/client.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import ( + "context" + "fmt" + "io" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/status" +) + +var ( + backoffStrategy = backoff.DefaultExponential + backoffFunc = func(ctx context.Context, retries int) bool { + d := backoffStrategy.Backoff(retries) + timer := time.NewTimer(d) + select { + case <-timer.C: + return true + case <-ctx.Done(): + timer.Stop() + return false + } + } +) + +func init() { + internal.HealthCheckFunc = clientHealthCheck +} + +const healthCheckMethod = "/grpc.health.v1.Health/Watch" + +// This function implements the protocol defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +func clientHealthCheck(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), service string) error { + tryCnt := 0 + +retryConnection: + for { + // Backs off if the connection has failed in some way without receiving a message in the previous retry. + if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) { + return nil + } + tryCnt++ + + if ctx.Err() != nil { + return nil + } + setConnectivityState(connectivity.Connecting, nil) + rawS, err := newStream(healthCheckMethod) + if err != nil { + continue retryConnection + } + + s, ok := rawS.(grpc.ClientStream) + // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. + if !ok { + setConnectivityState(connectivity.Ready, nil) + return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) + } + + if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF { + // Stream should have been closed, so we can safely continue to create a new stream. + continue retryConnection + } + s.CloseSend() + + resp := new(healthpb.HealthCheckResponse) + for { + err = s.RecvMsg(resp) + + // Reports healthy for the LBing purposes if health check is not implemented in the server. + if status.Code(err) == codes.Unimplemented { + setConnectivityState(connectivity.Ready, nil) + return err + } + + // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. + if err != nil { + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err)) + continue retryConnection + } + + // As a message has been received, removes the need for backoff for the next retry by resetting the try count. + tryCnt = 0 + if resp.Status == healthpb.HealthCheckResponse_SERVING { + setConnectivityState(connectivity.Ready, nil) + } else { + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status)) + } + } + } +} diff --git a/vendor/google.golang.org/grpc/health/logging.go b/vendor/google.golang.org/grpc/health/logging.go new file mode 100644 index 0000000000000..83c6acf55ef63 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/logging.go @@ -0,0 +1,23 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import "google.golang.org/grpc/grpclog" + +var logger = grpclog.Component("health_service") diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go new file mode 100644 index 0000000000000..cce6312d77f9c --- /dev/null +++ b/vendor/google.golang.org/grpc/health/server.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package health provides a service that exposes server's health and it must be +// imported to enable support for client-side health checks. +package health + +import ( + "context" + "sync" + + "google.golang.org/grpc/codes" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +// Server implements `service Health`. +type Server struct { + healthgrpc.UnimplementedHealthServer + mu sync.RWMutex + // If shutdown is true, it's expected all serving status is NOT_SERVING, and + // will stay in NOT_SERVING. + shutdown bool + // statusMap stores the serving status of the services this Server monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus + updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus +} + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING}, + updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus), + } +} + +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.RLock() + defer s.mu.RUnlock() + if servingStatus, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: servingStatus, + }, nil + } + return nil, status.Error(codes.NotFound, "unknown service") +} + +// Watch implements `service Health`. +func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + service := in.Service + // update channel is used for getting service status updates. + update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1) + s.mu.Lock() + // Puts the initial status to the channel. + if servingStatus, ok := s.statusMap[service]; ok { + update <- servingStatus + } else { + update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN + } + + // Registers the update channel to the correct place in the updates map. + if _, ok := s.updates[service]; !ok { + s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus) + } + s.updates[service][stream] = update + defer func() { + s.mu.Lock() + delete(s.updates[service], stream) + s.mu.Unlock() + }() + s.mu.Unlock() + + var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1 + for { + select { + // Status updated. Sends the up-to-date status to the client. + case servingStatus := <-update: + if lastSentStatus == servingStatus { + continue + } + lastSentStatus = servingStatus + err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus}) + if err != nil { + return status.Error(codes.Canceled, "Stream has ended.") + } + // Context done. Removes the update channel from the updates map. + case <-stream.Context().Done(): + return status.Error(codes.Canceled, "Stream has ended.") + } + } +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + defer s.mu.Unlock() + if s.shutdown { + logger.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) + return + } + + s.setServingStatusLocked(service, servingStatus) +} + +func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.statusMap[service] = servingStatus + for _, update := range s.updates[service] { + // Clears previous updates, that are not sent to the client, from the channel. + // This can happen if the client is not reading and the server gets flow control limited. + select { + case <-update: + default: + } + // Puts the most recent update to the channel. + update <- servingStatus + } +} + +// Shutdown sets all serving status to NOT_SERVING, and configures the server to +// ignore all future status changes. +// +// This changes serving status for all services. To set status for a particular +// services, call SetServingStatus(). +func (s *Server) Shutdown() { + s.mu.Lock() + defer s.mu.Unlock() + s.shutdown = true + for service := range s.statusMap { + s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING) + } +} + +// Resume sets all serving status to SERVING, and configures the server to +// accept all future status changes. +// +// This changes serving status for all services. To set status for a particular +// services, call SetServingStatus(). +func (s *Server) Resume() { + s.mu.Lock() + defer s.mu.Unlock() + s.shutdown = false + for service := range s.statusMap { + s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index e513c15d0aae5..c5e6aa235d230 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -42,8 +42,6 @@ cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb -# github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 -## explicit; go 1.20 # github.com/Azure/azure-pipeline-go v0.2.3 ## explicit; go 1.14 github.com/Azure/azure-pipeline-go/pipeline @@ -234,8 +232,8 @@ github.com/Masterminds/semver/v3 # github.com/Masterminds/sprig/v3 v3.2.3 ## explicit; go 1.13 github.com/Masterminds/sprig/v3 -# github.com/Microsoft/go-winio v0.6.1 -## explicit; go 1.17 +# github.com/Microsoft/go-winio v0.6.2 +## explicit; go 1.21 github.com/Microsoft/go-winio github.com/Microsoft/go-winio/internal/fs github.com/Microsoft/go-winio/internal/socket @@ -464,6 +462,9 @@ github.com/baidubce/bce-sdk-go/util/log # github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 ## explicit; go 1.20 github.com/bboreham/go-loser +# github.com/benbjohnson/immutable v0.4.0 +## explicit; go 1.18 +github.com/benbjohnson/immutable # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -483,7 +484,7 @@ github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 # github.com/cespare/xxhash v1.1.0 ## explicit github.com/cespare/xxhash -# github.com/cespare/xxhash/v2 v2.2.0 +# github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 # github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe @@ -502,12 +503,18 @@ github.com/cncf/xds/go/xds/type/v3 # github.com/coder/quartz v0.1.0 ## explicit; go 1.21.8 github.com/coder/quartz -# github.com/containerd/fifo v1.0.0 -## explicit; go 1.13 +# github.com/containerd/containerd v1.7.20 +## explicit; go 1.21 +github.com/containerd/containerd/pkg/userns +# github.com/containerd/fifo v1.1.0 +## explicit; go 1.18 github.com/containerd/fifo # github.com/containerd/log v0.1.0 ## explicit; go 1.20 github.com/containerd/log +# github.com/coreos/etcd v3.3.27+incompatible +## explicit +github.com/coreos/etcd/pkg/fileutil # github.com/coreos/go-semver v0.3.0 ## explicit github.com/coreos/go-semver/semver @@ -515,11 +522,15 @@ github.com/coreos/go-semver/semver ## explicit github.com/coreos/go-systemd/activation github.com/coreos/go-systemd/internal/dlopen +github.com/coreos/go-systemd/journal github.com/coreos/go-systemd/sdjournal # github.com/coreos/go-systemd/v22 v22.5.0 ## explicit; go 1.12 github.com/coreos/go-systemd/v22/activation github.com/coreos/go-systemd/v22/journal +# github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf +## explicit +github.com/coreos/pkg/capnslog # github.com/cristalhq/hedgedhttp v0.9.1 ## explicit; go 1.16 github.com/cristalhq/hedgedhttp @@ -545,14 +556,14 @@ github.com/digitalocean/godo/metrics # github.com/dimchansky/utfbom v1.1.1 ## explicit github.com/dimchansky/utfbom -# github.com/distribution/reference v0.5.0 +# github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference # github.com/dlclark/regexp2 v1.4.0 ## explicit github.com/dlclark/regexp2 github.com/dlclark/regexp2/syntax -# github.com/docker/docker v25.0.3+incompatible +# github.com/docker/docker v25.0.5+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -826,8 +837,8 @@ github.com/golang-jwt/jwt/v5 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.5.3 -## explicit; go 1.9 +# github.com/golang/protobuf v1.5.4 +## explicit; go 1.17 github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes @@ -921,8 +932,8 @@ github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1 github.com/gophercloud/gophercloud/openstack/identity/v3/tokens github.com/gophercloud/gophercloud/openstack/utils github.com/gophercloud/gophercloud/pagination -# github.com/gorilla/mux v1.8.0 -## explicit; go 1.12 +# github.com/gorilla/mux v1.8.1 +## explicit; go 1.20 github.com/gorilla/mux # github.com/gorilla/websocket v1.5.0 ## explicit; go 1.12 @@ -1029,9 +1040,12 @@ github.com/hashicorp/go-hclog # github.com/hashicorp/go-immutable-radix v1.3.1 ## explicit github.com/hashicorp/go-immutable-radix -# github.com/hashicorp/go-msgpack v0.5.5 -## explicit +# github.com/hashicorp/go-msgpack v1.1.5 +## explicit; go 1.13 github.com/hashicorp/go-msgpack/codec +# github.com/hashicorp/go-msgpack/v2 v2.1.1 +## explicit; go 1.19 +github.com/hashicorp/go-msgpack/v2/codec # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror @@ -1061,6 +1075,17 @@ github.com/hashicorp/golang-lru/v2/simplelru # github.com/hashicorp/memberlist v0.5.0 => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe ## explicit; go 1.12 github.com/hashicorp/memberlist +# github.com/hashicorp/raft v1.7.0 +## explicit; go 1.20 +github.com/hashicorp/raft +# github.com/hashicorp/raft-wal v0.4.1 +## explicit; go 1.18 +github.com/hashicorp/raft-wal +github.com/hashicorp/raft-wal/fs +github.com/hashicorp/raft-wal/metadb +github.com/hashicorp/raft-wal/metrics +github.com/hashicorp/raft-wal/segment +github.com/hashicorp/raft-wal/types # github.com/hashicorp/serf v0.10.1 ## explicit; go 1.12 github.com/hashicorp/serf/coordinate @@ -1271,8 +1296,8 @@ github.com/oklog/ulid # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.0.2 -## explicit +# github.com/opencontainers/image-spec v1.1.0 +## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 # github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e @@ -1570,8 +1595,8 @@ github.com/yuin/gopher-lua/pm # github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 github.com/yusufpapurcu/wmi -# go.etcd.io/bbolt v1.3.8 -## explicit; go 1.17 +# go.etcd.io/bbolt v1.3.10 +## explicit; go 1.21 go.etcd.io/bbolt # go.etcd.io/etcd/api/v3 v3.5.4 ## explicit; go 1.16 @@ -1806,7 +1831,6 @@ golang.org/x/text/unicode/norm golang.org/x/time/rate # golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d ## explicit; go 1.19 -golang.org/x/tools/cmd/stringer golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages @@ -1916,6 +1940,7 @@ google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto google.golang.org/grpc/experimental google.golang.org/grpc/grpclog +google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/admin