diff --git a/.github/workflows/build-loki-binary.yml b/.github/workflows/build-loki-binary.yml index 3d26bb06556..03d884e5bc6 100644 --- a/.github/workflows/build-loki-binary.yml +++ b/.github/workflows/build-loki-binary.yml @@ -9,7 +9,7 @@ permissions: contents: read env: - GO_VERSION: "1.26.1" + GO_VERSION: "1.26.2" jobs: build: diff --git a/.github/workflows/images.yml b/.github/workflows/images.yml index f55000d4ed1..4e74326a739 100644 --- a/.github/workflows/images.yml +++ b/.github/workflows/images.yml @@ -10,7 +10,7 @@ "loki-canary-boringcrypto-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.26.1" + "GO_VERSION": "1.26.2" "IMAGE_PREFIX": "grafana" "RELEASE_LIB_REF": "fb3b12ec3e6411bcb951a5ea08cc983c2f2735fc" "RELEASE_REPO": "grafana/loki" @@ -134,7 +134,7 @@ "loki-canary-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.26.1" + "GO_VERSION": "1.26.2" "IMAGE_PREFIX": "grafana" "RELEASE_LIB_REF": "fb3b12ec3e6411bcb951a5ea08cc983c2f2735fc" "RELEASE_REPO": "grafana/loki" @@ -258,7 +258,7 @@ "loki-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.26.1" + "GO_VERSION": "1.26.2" "IMAGE_PREFIX": "grafana" "RELEASE_LIB_REF": "fb3b12ec3e6411bcb951a5ea08cc983c2f2735fc" "RELEASE_REPO": "grafana/loki" @@ -382,7 +382,7 @@ "querytee-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.26.1" + "GO_VERSION": "1.26.2" "IMAGE_PREFIX": "grafana" "RELEASE_LIB_REF": "fb3b12ec3e6411bcb951a5ea08cc983c2f2735fc" "RELEASE_REPO": "grafana/loki" diff --git a/.github/workflows/logql-correctness.yml b/.github/workflows/logql-correctness.yml index 5ec86c24adf..9ca85b20715 100644 --- a/.github/workflows/logql-correctness.yml +++ b/.github/workflows/logql-correctness.yml @@ -49,7 +49,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v4 with: - go-version: "1.26.1" + go-version: "1.26.2" # The metastore generates invalid filenames for Windows (with colons), # which get rejected by upload-artifact. We zip these files to avoid this @@ -96,7 +96,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v4 with: - go-version: "1.26.1" + go-version: "1.26.2" - name: Create results directory run: mkdir -p ./pkg/logql/bench/results diff --git a/.github/workflows/querytee-images.yml b/.github/workflows/querytee-images.yml index 50bd87691af..880d7f40ab5 100644 --- a/.github/workflows/querytee-images.yml +++ b/.github/workflows/querytee-images.yml @@ -17,7 +17,7 @@ permissions: env: BUILD_TIMEOUT: 60 IMAGE_PREFIX: grafana - GO_VERSION: "1.26.1" + GO_VERSION: "1.26.2" jobs: loki-query-tee-image: diff --git a/Makefile b/Makefile index b80c729f088..0bd5223d96b 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ DOCKER_INTERACTIVE_FLAGS := --tty --interactive endif # Ensure you run `make release-workflows` after changing this -GO_VERSION := 1.26.1 +GO_VERSION := 1.26.2 # Ensure you run `make IMAGE_TAG= build-image-push` after changing this BUILD_IMAGE_TAG := 0.35.1 diff --git a/clients/cmd/docker-driver/Dockerfile b/clients/cmd/docker-driver/Dockerfile index 8d119673e61..133a771241f 100644 --- a/clients/cmd/docker-driver/Dockerfile +++ b/clients/cmd/docker-driver/Dockerfile @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.34.6 +ARG BUILD_IMAGE=grafana/loki-build-image:0.35.1 ARG GOARCH=amd64 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: @@ -11,13 +11,13 @@ WORKDIR /src/loki ARG GOARCH RUN make clean && make BUILD_IN_CONTAINER=false GOARCH=${GOARCH} clients/cmd/docker-driver/docker-driver -FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS temp +FROM alpine:3.23.4@sha256:5b10f432ef3da1b8d4c7eb6c487f2f5a8f096bc91145e68878dd4a5019afde11 AS temp ARG GOARCH RUN apk add --update --no-cache --arch=${GOARCH} ca-certificates tzdata -FROM --platform=linux/${GOARCH} alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 +FROM --platform=linux/${GOARCH} alpine:3.23.4@sha256:5b10f432ef3da1b8d4c7eb6c487f2f5a8f096bc91145e68878dd4a5019afde11 COPY --from=temp /etc/ca-certificates.conf /etc/ca-certificates.conf COPY --from=temp /usr/share/ca-certificates /usr/share/ca-certificates diff --git a/clients/pkg/promtail/targets/azureeventhubs/parser.go b/clients/pkg/promtail/targets/azureeventhubs/parser.go index 0e8b3a4d1d3..6a3983a0463 100644 --- a/clients/pkg/promtail/targets/azureeventhubs/parser.go +++ b/clients/pkg/promtail/targets/azureeventhubs/parser.go @@ -16,6 +16,9 @@ import ( "github.com/grafana/loki/v3/pkg/logproto" ) +// errEntryDropped is returned when a log entry is dropped by relabeling rules. +var errEntryDropped = errors.New("entry dropped by relabeling") + type azureMonitorResourceLogs struct { Records []json.RawMessage `json:"records"` } @@ -143,6 +146,9 @@ func (e *messageParser) processRecords(labelSet model.LabelSet, relabels []*rela result := make([]api.Entry, 0, len(records)) for _, m := range records { entry, err := e.parseRecord(m, labelSet, relabels, useIncomingTimestamp, messageTime) + if errors.Is(err, errEntryDropped) { + continue + } if err != nil { return nil, err } @@ -170,6 +176,9 @@ func (e *messageParser) parseRecord(record []byte, labelSet model.LabelSet, rela } logLabels := e.getLabels(logRecord, relabelConfig) + if logLabels == nil { + return api.Entry{}, errEntryDropped + } ts := e.getTime(messageTime, useIncomingTimestamp, logRecord) return api.Entry{ diff --git a/cmd/logcli/Dockerfile b/cmd/logcli/Dockerfile index 9fd9f21e13a..d108954c145 100644 --- a/cmd/logcli/Dockerfile +++ b/cmd/logcli/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.26 +ARG GO_VERSION=1.26.2 ARG IMAGE_TAG FROM golang:${GO_VERSION} AS build diff --git a/cmd/logql-analyzer/Dockerfile b/cmd/logql-analyzer/Dockerfile index 864df33b4ea..1fce880b702 100644 --- a/cmd/logql-analyzer/Dockerfile +++ b/cmd/logql-analyzer/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.26 +ARG GO_VERSION=1.26.2 FROM golang:${GO_VERSION} AS build COPY . /src/loki diff --git a/cmd/loki-canary-boringcrypto/Dockerfile b/cmd/loki-canary-boringcrypto/Dockerfile index ea9eca4ef78..7335adf448c 100644 --- a/cmd/loki-canary-boringcrypto/Dockerfile +++ b/cmd/loki-canary-boringcrypto/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.26 +ARG GO_VERSION=1.26.2 FROM golang:${GO_VERSION} as build ARG IMAGE_TAG diff --git a/cmd/loki-canary/Dockerfile b/cmd/loki-canary/Dockerfile index 29e6dd483bb..cfc431dbca2 100644 --- a/cmd/loki-canary/Dockerfile +++ b/cmd/loki-canary/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.26.1 +ARG GO_VERSION=1.26.2 FROM golang:${GO_VERSION} AS build ARG IMAGE_TAG diff --git a/cmd/loki-canary/Dockerfile.cross b/cmd/loki-canary/Dockerfile.cross index c95568b858f..537533b8e42 100644 --- a/cmd/loki-canary/Dockerfile.cross +++ b/cmd/loki-canary/Dockerfile.cross @@ -1,5 +1,5 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.34.8 -ARG GO_VERSION=1.26.1 +ARG BUILD_IMAGE=grafana/loki-build-image:0.35.1 +ARG GO_VERSION=1.26.2 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f cmd/promtail/Dockerfile . diff --git a/cmd/loki/Dockerfile b/cmd/loki/Dockerfile index f386c6909f1..232e777e3e5 100644 --- a/cmd/loki/Dockerfile +++ b/cmd/loki/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.26 +ARG GO_VERSION=1.26.2 # Go build stage FROM golang:${GO_VERSION} AS build diff --git a/cmd/loki/Dockerfile.cross b/cmd/loki/Dockerfile.cross index df4e1d9dc9d..48997bde3e2 100644 --- a/cmd/loki/Dockerfile.cross +++ b/cmd/loki/Dockerfile.cross @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.26 +ARG GO_VERSION=1.26.2 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile . diff --git a/cmd/loki/Dockerfile.debug b/cmd/loki/Dockerfile.debug index bc6a80612db..fdd4f8ff250 100644 --- a/cmd/loki/Dockerfile.debug +++ b/cmd/loki/Dockerfile.debug @@ -1,5 +1,5 @@ ARG BUILD_IMAGE=grafana/loki-build-image:0.35.1 -ARG GO_VERSION=1.26 +ARG GO_VERSION=1.26.2 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile.debug . diff --git a/cmd/migrate/Dockerfile b/cmd/migrate/Dockerfile index ae7bb802bd2..ce994a79f5a 100644 --- a/cmd/migrate/Dockerfile +++ b/cmd/migrate/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.26 +ARG GO_VERSION=1.26.2 FROM golang:${GO_VERSION} AS build COPY . /src/loki WORKDIR /src/loki diff --git a/cmd/querytee/Dockerfile b/cmd/querytee/Dockerfile index c51a98a9954..a40ffdd2ad0 100644 --- a/cmd/querytee/Dockerfile +++ b/cmd/querytee/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.26 +ARG GO_VERSION=1.26.2 FROM golang:${GO_VERSION} AS build ARG IMAGE_TAG diff --git a/flake.lock b/flake.lock index 8da90b5850f..22904571e35 100644 --- a/flake.lock +++ b/flake.lock @@ -20,11 +20,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1772047000, - "narHash": "sha256-7DaQVv4R97cii/Qdfy4tmDZMB2xxtyIvNGSwXBBhSmo=", + "lastModified": 1778003029, + "narHash": "sha256-q/nkKLDtHIyLjZpKhWk3cSK5IYsFqtMd6UtXF3ddjgA=", "owner": "nixos", "repo": "nixpkgs", - "rev": "1267bb4920d0fc06ea916734c11b0bf004bbe17e", + "rev": "0c88e1f2bdb93d5999019e99cb0e61e1fe2af4c5", "type": "github" }, "original": { @@ -36,11 +36,11 @@ }, "nixpkgs-unstable": { "locked": { - "lastModified": 1772433332, - "narHash": "sha256-izhTDFKsg6KeVBxJS9EblGeQ8y+O8eCa6RcW874vxEc=", + "lastModified": 1777954456, + "narHash": "sha256-hGdgeU2Nk87RAuZyYjyDjFL6LK7dAZN5RE9+hrDTkDU=", "owner": "nixos", "repo": "nixpkgs", - "rev": "cf59864ef8aa2e178cccedbe2c178185b0365705", + "rev": "549bd84d6279f9852cae6225e372cc67fb91a4c1", "type": "github" }, "original": { diff --git a/go.mod b/go.mod index bb92f633e98..77adaaf6bd4 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/grafana/loki/v3 -go 1.25.7 +go 1.26.2 ignore ./tools/dev @@ -65,7 +65,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 github.com/joncrlsn/dque v0.0.0-20211108142734-c2ef48c5192a github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.18.4 + github.com/klauspost/compress v1.18.5 github.com/klauspost/pgzip v1.2.6 github.com/leodido/go-syslog/v4 v4.3.0 github.com/mattn/go-ieproxy v0.0.12 @@ -75,7 +75,6 @@ require ( github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.2.0 - github.com/oklog/ulid v1.3.1 // indirect github.com/opentracing-contrib/go-grpc v0.1.2 // indirect github.com/opentracing-contrib/go-stdlib v1.1.1 // indirect github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b @@ -86,7 +85,7 @@ require ( github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_model v0.6.2 github.com/prometheus/common v0.67.5 - github.com/prometheus/prometheus v0.310.1-0.20260320085417-166d20151c0d + github.com/prometheus/prometheus v0.311.2-0.20260410083055-07c6232d159b github.com/redis/go-redis/v9 v9.18.0 github.com/segmentio/fasthash v1.0.3 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c @@ -158,7 +157,7 @@ require ( github.com/twmb/franz-go/pkg/kmsg v1.12.0 github.com/twmb/franz-go/plugin/kotel v1.6.0 github.com/twmb/franz-go/plugin/kprom v1.2.1 - go.opentelemetry.io/collector/pdata v1.53.0 + go.opentelemetry.io/collector/pdata v1.54.0 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.67.0 go.opentelemetry.io/otel/sdk v1.43.0 go.yaml.in/yaml/v3 v3.0.4 @@ -184,23 +183,22 @@ require ( github.com/andybalholm/brotli v1.2.0 // indirect github.com/apache/thrift v0.22.0 // indirect github.com/atotto/clipboard v0.1.4 // indirect - github.com/aws/aws-sdk-go v1.55.7 // indirect + github.com/aws/aws-sdk-go v1.55.8 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22 // indirect - github.com/aws/aws-sdk-go-v2/service/ec2 v1.290.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ecs v1.72.0 // indirect - github.com/aws/aws-sdk-go-v2/service/elasticache v1.51.9 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.296.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ecs v1.74.0 // indirect + github.com/aws/aws-sdk-go-v2/service/elasticache v1.51.12 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13 // indirect github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.20 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21 // indirect - github.com/aws/aws-sdk-go-v2/service/kafka v1.48.0 // indirect - github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.11 // indirect - github.com/aws/aws-sdk-go-v2/service/rds v1.116.1 // indirect + github.com/aws/aws-sdk-go-v2/service/kafka v1.49.1 // indirect + github.com/aws/aws-sdk-go-v2/service/lightsail v1.51.0 // indirect + github.com/aws/aws-sdk-go-v2/service/rds v1.117.0 // indirect github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect - github.com/basgys/goxml2json v1.1.1-0.20231018121955-e66ee54ceaad // indirect github.com/bits-and-blooms/bitset v1.24.4 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/charmbracelet/colorprofile v0.4.2 // indirect @@ -222,22 +220,22 @@ require ( github.com/go-ini/ini v1.67.0 // indirect github.com/go-jose/go-jose/v4 v4.1.4 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/go-openapi/swag/cmdutils v0.25.4 // indirect - github.com/go-openapi/swag/conv v0.25.4 // indirect - github.com/go-openapi/swag/fileutils v0.25.4 // indirect - github.com/go-openapi/swag/jsonname v0.25.4 // indirect - github.com/go-openapi/swag/jsonutils v0.25.4 // indirect - github.com/go-openapi/swag/loading v0.25.4 // indirect - github.com/go-openapi/swag/mangling v0.25.4 // indirect - github.com/go-openapi/swag/netutils v0.25.4 // indirect - github.com/go-openapi/swag/stringutils v0.25.4 // indirect - github.com/go-openapi/swag/typeutils v0.25.4 // indirect - github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.5 // indirect + github.com/go-openapi/swag/conv v0.25.5 // indirect + github.com/go-openapi/swag/fileutils v0.25.5 // indirect + github.com/go-openapi/swag/jsonname v0.25.5 // indirect + github.com/go-openapi/swag/jsonutils v0.25.5 // indirect + github.com/go-openapi/swag/loading v0.25.5 // indirect + github.com/go-openapi/swag/mangling v0.25.5 // indirect + github.com/go-openapi/swag/netutils v0.25.5 // indirect + github.com/go-openapi/swag/stringutils v0.25.5 // indirect + github.com/go-openapi/swag/typeutils v0.25.5 // indirect + github.com/go-openapi/swag/yamlutils v0.25.5 // indirect github.com/go-viper/mapstructure/v2 v2.5.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/google/flatbuffers v25.12.19+incompatible // indirect - github.com/gophercloud/gophercloud/v2 v2.10.0 // indirect + github.com/gophercloud/gophercloud/v2 v2.11.1 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/grafana/otel-profiling-go v0.5.1 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect @@ -251,7 +249,7 @@ require ( github.com/klauspost/crc32 v1.3.0 // indirect github.com/knadh/koanf/maps v0.1.2 // indirect github.com/knadh/koanf/providers/confmap v1.0.0 // indirect - github.com/knadh/koanf/v2 v2.3.2 // indirect + github.com/knadh/koanf/v2 v2.3.3 // indirect github.com/lucasb-eyer/go-colorful v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect github.com/mattn/go-runewidth v0.0.20 // indirect @@ -262,33 +260,33 @@ require ( github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect github.com/minio/crc64nvme v1.1.1 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/moby/api v1.54.0 // indirect + github.com/moby/moby/client v0.3.0 // indirect github.com/moby/sys/atomicwriter v0.1.0 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/ncruces/go-strftime v1.0.0 // indirect github.com/ncw/swift v1.0.53 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.145.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.145.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.145.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.148.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.148.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.148.0 // indirect github.com/oschwald/maxminddb-golang/v2 v2.1.1 // indirect github.com/parquet-go/bitpack v1.0.0 // indirect github.com/parquet-go/jsonlite v1.0.0 // indirect - github.com/pb33f/jsonpath v0.8.1 // indirect - github.com/pb33f/libopenapi v0.34.0 // indirect - github.com/pb33f/libopenapi-validator v0.13.0 // indirect - github.com/pb33f/ordered-map/v2 v2.3.0 // indirect + github.com/pb33f/jsonpath v0.8.2 // indirect + github.com/pb33f/libopenapi v0.34.4 // indirect + github.com/pb33f/ordered-map/v2 v2.3.1 // indirect github.com/philhofer/fwd v1.2.0 // indirect github.com/pires/go-proxyproto v0.8.1 // indirect github.com/pkg/xattr v0.4.12 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect - github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562 // indirect - github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect + github.com/prometheus/client_golang/exp v0.0.0-20260325093428-d8591d0db856 // indirect + github.com/puzpuzpuz/xsync/v4 v4.4.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/sahilm/fuzzy v0.1.1 // indirect - github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect github.com/sercand/kuberesolver/v6 v6.0.1 // indirect github.com/sethvargo/go-retry v0.3.0 // indirect github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect @@ -302,14 +300,14 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/xxh3 v1.1.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/collector/component v1.51.0 // indirect - go.opentelemetry.io/collector/confmap v1.51.0 // indirect - go.opentelemetry.io/collector/confmap/xconfmap v0.145.0 // indirect - go.opentelemetry.io/collector/consumer v1.51.0 // indirect - go.opentelemetry.io/collector/featuregate v1.53.0 // indirect - go.opentelemetry.io/collector/internal/componentalias v0.145.0 // indirect - go.opentelemetry.io/collector/pipeline v1.51.0 // indirect - go.opentelemetry.io/collector/processor v1.51.0 // indirect + go.opentelemetry.io/collector/component v1.54.0 // indirect + go.opentelemetry.io/collector/confmap v1.54.0 // indirect + go.opentelemetry.io/collector/confmap/xconfmap v0.148.0 // indirect + go.opentelemetry.io/collector/consumer v1.54.0 // indirect + go.opentelemetry.io/collector/featuregate v1.54.0 // indirect + go.opentelemetry.io/collector/internal/componentalias v0.148.0 // indirect + go.opentelemetry.io/collector/pipeline v1.54.0 // indirect + go.opentelemetry.io/collector/processor v1.54.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.64.0 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect go.opentelemetry.io/contrib/exporters/autoexport v0.64.0 // indirect @@ -321,7 +319,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.61.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.19.0 // indirect @@ -331,7 +329,7 @@ require ( go.opentelemetry.io/otel/sdk/log v0.19.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect go.opentelemetry.io/proto/otlp v1.10.0 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v2 v2.4.4 // indirect go.yaml.in/yaml/v4 v4.0.0-rc.4 // indirect golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa // indirect golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c // indirect @@ -387,7 +385,7 @@ require ( github.com/dennwc/varint v1.0.0 github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/digitalocean/godo v1.175.0 // indirect + github.com/digitalocean/godo v1.178.0 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/go-connections v0.6.0 // indirect @@ -396,21 +394,21 @@ require ( github.com/dolthub/maphash v0.1.0 // indirect github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/edsrzf/mmap-go v1.2.0 // indirect + github.com/edsrzf/mmap-go v1.2.1-0.20241212181136-fad1cd13edbd // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/envoyproxy/protoc-gen-validate v1.3.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.24.2 // indirect - github.com/go-openapi/errors v0.22.6 // indirect - github.com/go-openapi/jsonpointer v0.22.4 // indirect - github.com/go-openapi/jsonreference v0.21.4 // indirect - github.com/go-openapi/loads v0.23.2 // indirect - github.com/go-openapi/spec v0.22.3 // indirect - github.com/go-openapi/strfmt v0.25.0 // indirect - github.com/go-openapi/swag v0.25.4 // indirect - github.com/go-openapi/validate v0.25.1 // indirect + github.com/go-openapi/analysis v0.24.3 // indirect + github.com/go-openapi/errors v0.22.7 // indirect + github.com/go-openapi/jsonpointer v0.22.5 // indirect + github.com/go-openapi/jsonreference v0.21.5 // indirect + github.com/go-openapi/loads v0.23.3 // indirect + github.com/go-openapi/spec v0.22.4 // indirect + github.com/go-openapi/strfmt v0.26.1 // indirect + github.com/go-openapi/swag v0.25.5 // indirect + github.com/go-openapi/validate v0.25.2 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.28.0 // indirect @@ -421,7 +419,7 @@ require ( github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-querystring v1.2.0 // indirect - github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef // indirect + github.com/google/pprof v0.0.0-20260302011040-a15ffb7f9dcc // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect github.com/googleapis/gax-go/v2 v2.18.0 // indirect @@ -482,7 +480,6 @@ require ( go.etcd.io/etcd/api/v3 v3.6.7 // indirect go.etcd.io/etcd/client/pkg/v3 v3.6.7 // indirect go.etcd.io/etcd/client/v3 v3.6.7 // indirect - go.mongodb.org/mongo-driver v1.17.9 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.65.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 @@ -500,8 +497,8 @@ require ( gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - k8s.io/api v0.35.1 // indirect - k8s.io/client-go v0.35.1 // indirect + k8s.io/api v0.35.3 // indirect + k8s.io/client-go v0.35.3 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect rsc.io/binaryregexp v0.2.0 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect diff --git a/go.sum b/go.sum index 207979a89a3..c00cc6f3fdd 100644 --- a/go.sum +++ b/go.sum @@ -194,8 +194,8 @@ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+ github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= -github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= github.com/aws/aws-sdk-go-v2 v1.41.5 h1:dj5kopbwUsVUVFgO4Fi5BIT3t4WyqIDjGKCangnV/yY= github.com/aws/aws-sdk-go-v2 v1.41.5/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8 h1:eBMB84YGghSocM7PsjmmPffTa+1FBUeNvGvFou6V/4o= @@ -216,12 +216,12 @@ github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22 h1:rWyie/PxDRIdhNf4DzRk0lvjVOq github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22/go.mod h1:zd/JsJ4P7oGfUhXn1VyLqaRZwPmZwg44Jf2dS84Dm3Y= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.56.2 h1:xi/ECwajy2mixviBD7bKAlGGSwzEaFKX2wIhrZt9NGw= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.56.2/go.mod h1:dLREOeW66eVaaGIOi2ZlLHDgkR3nuJ02rd00j0YSlBE= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.290.0 h1:Ub4CvLWf8wEQ7/pEiqXM9tTsHXf2BokPLwbqEvrmAq0= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.290.0/go.mod h1:Uy+C+Sc58jozdoL1McQr8bDsEvNFx+/nBY+vpO1HVUY= -github.com/aws/aws-sdk-go-v2/service/ecs v1.72.0 h1:hggRKpv26DpYMOik3wWo1Ty5MkANoXhNobjfWpC3G4M= -github.com/aws/aws-sdk-go-v2/service/ecs v1.72.0/go.mod h1:pMlGFDpHoLTJOIZHGdJOAWmi+xeIlQXuFTuQxs1epYE= -github.com/aws/aws-sdk-go-v2/service/elasticache v1.51.9 h1:hTgZLyNoDWphZUtTtcvQh0LP6TZO0mtdSfZK/GObDLk= -github.com/aws/aws-sdk-go-v2/service/elasticache v1.51.9/go.mod h1:91RkIYy9ubykxB50XGYDsbljLZnrZ6rp/Urt4rZrbwQ= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.296.0 h1:98Miqj16un1WLNyM1RjVDhXYumhqZrQfAeG8i4jPG6o= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.296.0/go.mod h1:T6ndRfdhnXLIY5oKBHjYZDVj706los2zGdpThppquvA= +github.com/aws/aws-sdk-go-v2/service/ecs v1.74.0 h1:YS5TXaEvzDb+sV+wdQFUtuCAk0GeFR9Ai6HFdxpz6q8= +github.com/aws/aws-sdk-go-v2/service/ecs v1.74.0/go.mod h1:10kBgdaNJz0FO/+JWDUH+0rtSjkn5yafgavDDmmhFzs= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.51.12 h1:S066ajzfPRCSW4lsSHOYglne6SNi2CHt1u5omzW1RBg= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.51.12/go.mod h1:86SE4NcXxbxr8KTG3yOyDmd4HyiFmKl8TexXnhYJ+Bw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13 h1:JRaIgADQS/U6uXDqlPiefP32yXTda7Kqfx+LgspooZM= @@ -232,12 +232,12 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 h1:c31//R3x github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21/go.mod h1:r6+pf23ouCB718FUxaqzZdbpYFyDtehyZcmP5KL9FkA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21 h1:ZlvrNcHSFFWURB8avufQq9gFsheUgjVD9536obIknfM= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21/go.mod h1:cv3TNhVrssKR0O/xxLJVRfd2oazSnZnkUeTf6ctUwfQ= -github.com/aws/aws-sdk-go-v2/service/kafka v1.48.0 h1:CKRWqysU9INeoi0nTI9gDzDAJk+GatzFduVYxT/wkrw= -github.com/aws/aws-sdk-go-v2/service/kafka v1.48.0/go.mod h1:tWnHS64fg5ydLHivFlCAtEh/1iMNzr56QsH3F+UTwD4= -github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.11 h1:VM5e5M39zRSs+aT0O9SoxHjUXqXxhbw3Yi0FdMQWPIc= -github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.11/go.mod h1:0jvzYPIQGCpnY/dmdaotTk2JH4QuBlnW0oeyrcGLWJ4= -github.com/aws/aws-sdk-go-v2/service/rds v1.116.1 h1:a5PMhM3lOcu2DKgvYGjhCDToKQnz9VEUo9iSc5+DsyA= -github.com/aws/aws-sdk-go-v2/service/rds v1.116.1/go.mod h1:bMaMwbVQ96bx42kDw/Ko+YiDyT/UCotPO+1RDp6lq7E= +github.com/aws/aws-sdk-go-v2/service/kafka v1.49.1 h1:BgBatWcQIFqF1l6KGHjv66V0d/ISnWrTwxDx/Jf6EJM= +github.com/aws/aws-sdk-go-v2/service/kafka v1.49.1/go.mod h1:pMpys+PlrN//vj8j5s0oOAMJjauj81VkHzIZxPVWOro= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.51.0 h1:cg6PxzoIide2wiEyLfikOFN+XwHafwR8p5+L9U1E8dQ= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.51.0/go.mod h1:YvX7hjUWecrKX8fBkbEncyddEW85xjNH+u5JHioITOw= +github.com/aws/aws-sdk-go-v2/service/rds v1.117.0 h1:T1Xe9sYxSUUQOvd1RsFeVk/IXFPdqSiN0atXu/Hy/8A= +github.com/aws/aws-sdk-go-v2/service/rds v1.117.0/go.mod h1:QbXW4coAMakHQhf1qhE0eVVCen9gwB/Kvn+HHHKhpGY= github.com/aws/aws-sdk-go-v2/service/s3 v1.97.3 h1:HwxWTbTrIHm5qY+CAEur0s/figc3qwvLWsNkF4RPToo= github.com/aws/aws-sdk-go-v2/service/s3 v1.97.3/go.mod h1:uoA43SdFwacedBfSgfFSjjCvYe8aYBS7EnU5GZ/YKMM= github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 h1:0GFOLzEbOyZABS3PhYfBIx2rNBACYcKty+XGkTgw1ow= @@ -266,8 +266,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bitly/go-simplejson v0.5.1 h1:xgwPbetQScXt1gh9BmoJ6j9JMr3TElvuIyjR8pgdoow= -github.com/bitly/go-simplejson v0.5.1/go.mod h1:YOPVLzCfwK14b4Sff3oP1AmGhI9T9Vsg84etUnlyp+Q= github.com/bits-and-blooms/bitset v1.24.2/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bits-and-blooms/bitset v1.24.4 h1:95H15Og1clikBrKr/DuzMXkQzECs1M6hhoGXLwLQOZE= github.com/bits-and-blooms/bitset v1.24.4/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= @@ -346,8 +344,8 @@ github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pq github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs= github.com/cristalhq/hedgedhttp v0.9.1/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U= @@ -362,8 +360,8 @@ github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 h1:ucRHb6/lvW/+mT github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.175.0 h1:tpfwJFkBzpePxvvFazOn69TXctdxuFlOs7DMVXsI7oU= -github.com/digitalocean/godo v1.175.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU= +github.com/digitalocean/godo v1.178.0 h1:+B4xGOaoFwwwpM7TKhoyGHdmFg5eF9zDB1YfOLvNJ2E= +github.com/digitalocean/godo v1.178.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -394,8 +392,8 @@ github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU= github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= -github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84= -github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/edsrzf/mmap-go v1.2.1-0.20241212181136-fad1cd13edbd h1:I4PrRZuNMeDP3VbFrak4QsqwO5tWkQf0tqrrr1L2DsU= +github.com/edsrzf/mmap-go v1.2.1-0.20241212181136-fad1cd13edbd/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/efficientgo/core v1.0.0-rc.3 h1:X6CdgycYWDcbYiJr1H1+lQGzx13o7bq3EUkbB9DsSPc= github.com/efficientgo/core v1.0.0-rc.3/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps= github.com/efficientgo/e2e v0.13.1-0.20220922081603-45de9fc588a8 h1:UFLc39BcUXahSNCLUrKjNGZABMUZaS4M74EZvTRnq3k= @@ -467,52 +465,52 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-openapi/analysis v0.24.2 h1:6p7WXEuKy1llDgOH8FooVeO+Uq2za9qoAOq4ZN08B50= -github.com/go-openapi/analysis v0.24.2/go.mod h1:x27OOHKANE0lutg2ml4kzYLoHGMKgRm1Cj2ijVOjJuE= -github.com/go-openapi/errors v0.22.6 h1:eDxcf89O8odEnohIXwEjY1IB4ph5vmbUsBMsFNwXWPo= -github.com/go-openapi/errors v0.22.6/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= -github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= -github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= -github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8= -github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4= -github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= -github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= -github.com/go-openapi/spec v0.22.3 h1:qRSmj6Smz2rEBxMnLRBMeBWxbbOvuOoElvSvObIgwQc= -github.com/go-openapi/spec v0.22.3/go.mod h1:iIImLODL2loCh3Vnox8TY2YWYJZjMAKYyLH2Mu8lOZs= -github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= -github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= -github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= -github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= -github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= -github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= -github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= -github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= -github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= -github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= -github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= -github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= -github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= -github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= -github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= -github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= -github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= -github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= -github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= -github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= -github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= -github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= -github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= -github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= -github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= -github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= -github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= -github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= -github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= -github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= +github.com/go-openapi/analysis v0.24.3 h1:a1hrvMr8X0Xt69KP5uVTu5jH62DscmDifrLzNglAayk= +github.com/go-openapi/analysis v0.24.3/go.mod h1:Nc+dWJ/FxZbhSow5Yh3ozg5CLJioB+XXT6MdLvJUsUw= +github.com/go-openapi/errors v0.22.7 h1:JLFBGC0Apwdzw3484MmBqspjPbwa2SHvpDm0u5aGhUA= +github.com/go-openapi/errors v0.22.7/go.mod h1://QW6SD9OsWtH6gHllUCddOXDL0tk0ZGNYHwsw4sW3w= +github.com/go-openapi/jsonpointer v0.22.5 h1:8on/0Yp4uTb9f4XvTrM2+1CPrV05QPZXu+rvu2o9jcA= +github.com/go-openapi/jsonpointer v0.22.5/go.mod h1:gyUR3sCvGSWchA2sUBJGluYMbe1zazrYWIkWPjjMUY0= +github.com/go-openapi/jsonreference v0.21.5 h1:6uCGVXU/aNF13AQNggxfysJ+5ZcU4nEAe+pJyVWRdiE= +github.com/go-openapi/jsonreference v0.21.5/go.mod h1:u25Bw85sX4E2jzFodh1FOKMTZLcfifd1Q+iKKOUxExw= +github.com/go-openapi/loads v0.23.3 h1:g5Xap1JfwKkUnZdn+S0L3SzBDpcTIYzZ5Qaag0YDkKQ= +github.com/go-openapi/loads v0.23.3/go.mod h1:NOH07zLajXo8y55hom0omlHWDVVvCwBM/S+csCK8LqA= +github.com/go-openapi/spec v0.22.4 h1:4pxGjipMKu0FzFiu/DPwN3CTBRlVM2yLf/YTWorYfDQ= +github.com/go-openapi/spec v0.22.4/go.mod h1:WQ6Ai0VPWMZgMT4XySjlRIE6GP1bGQOtEThn3gcWLtQ= +github.com/go-openapi/strfmt v0.26.1 h1:7zGCHji7zSYDC2tCXIusoxYQz/48jAf2q+sF6wXTG+c= +github.com/go-openapi/strfmt v0.26.1/go.mod h1:Zslk5VZPOISLwmWTMBIS7oiVFem1o1EI6zULY8Uer7Y= +github.com/go-openapi/swag v0.25.5 h1:pNkwbUEeGwMtcgxDr+2GBPAk4kT+kJ+AaB+TMKAg+TU= +github.com/go-openapi/swag v0.25.5/go.mod h1:B3RT6l8q7X803JRxa2e59tHOiZlX1t8viplOcs9CwTA= +github.com/go-openapi/swag/cmdutils v0.25.5 h1:yh5hHrpgsw4NwM9KAEtaDTXILYzdXh/I8Whhx9hKj7c= +github.com/go-openapi/swag/cmdutils v0.25.5/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.5 h1:wAXBYEXJjoKwE5+vc9YHhpQOFj2JYBMF2DUi+tGu97g= +github.com/go-openapi/swag/conv v0.25.5/go.mod h1:CuJ1eWvh1c4ORKx7unQnFGyvBbNlRKbnRyAvDvzWA4k= +github.com/go-openapi/swag/fileutils v0.25.5 h1:B6JTdOcs2c0dBIs9HnkyTW+5gC+8NIhVBUwERkFhMWk= +github.com/go-openapi/swag/fileutils v0.25.5/go.mod h1:V3cT9UdMQIaH4WiTrUc9EPtVA4txS0TOmRURmhGF4kc= +github.com/go-openapi/swag/jsonname v0.25.5 h1:8p150i44rv/Drip4vWI3kGi9+4W9TdI3US3uUYSFhSo= +github.com/go-openapi/swag/jsonname v0.25.5/go.mod h1:jNqqikyiAK56uS7n8sLkdaNY/uq6+D2m2LANat09pKU= +github.com/go-openapi/swag/jsonutils v0.25.5 h1:XUZF8awQr75MXeC+/iaw5usY/iM7nXPDwdG3Jbl9vYo= +github.com/go-openapi/swag/jsonutils v0.25.5/go.mod h1:48FXUaz8YsDAA9s5AnaUvAmry1UcLcNVWUjY42XkrN4= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.5 h1:SX6sE4FrGb4sEnnxbFL/25yZBb5Hcg1inLeErd86Y1U= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.5/go.mod h1:/2KvOTrKWjVA5Xli3DZWdMCZDzz3uV/T7bXwrKWPquo= +github.com/go-openapi/swag/loading v0.25.5 h1:odQ/umlIZ1ZVRteI6ckSrvP6e2w9UTF5qgNdemJHjuU= +github.com/go-openapi/swag/loading v0.25.5/go.mod h1:I8A8RaaQ4DApxhPSWLNYWh9NvmX2YKMoB9nwvv6oW6g= +github.com/go-openapi/swag/mangling v0.25.5 h1:hyrnvbQRS7vKePQPHHDso+k6CGn5ZBs5232UqWZmJZw= +github.com/go-openapi/swag/mangling v0.25.5/go.mod h1:6hadXM/o312N/h98RwByLg088U61TPGiltQn71Iw0NY= +github.com/go-openapi/swag/netutils v0.25.5 h1:LZq2Xc2QI8+7838elRAaPCeqJnHODfSyOa7ZGfxDKlU= +github.com/go-openapi/swag/netutils v0.25.5/go.mod h1:lHbtmj4m57APG/8H7ZcMMSWzNqIQcu0RFiXrPUara14= +github.com/go-openapi/swag/stringutils v0.25.5 h1:NVkoDOA8YBgtAR/zvCx5rhJKtZF3IzXcDdwOsYzrB6M= +github.com/go-openapi/swag/stringutils v0.25.5/go.mod h1:PKK8EZdu4QJq8iezt17HM8RXnLAzY7gW0O1KKarrZII= +github.com/go-openapi/swag/typeutils v0.25.5 h1:EFJ+PCga2HfHGdo8s8VJXEVbeXRCYwzzr9u4rJk7L7E= +github.com/go-openapi/swag/typeutils v0.25.5/go.mod h1:itmFmScAYE1bSD8C4rS0W+0InZUBrB2xSPbWt6DLGuc= +github.com/go-openapi/swag/yamlutils v0.25.5 h1:kASCIS+oIeoc55j28T4o8KwlV2S4ZLPT6G0iq2SSbVQ= +github.com/go-openapi/swag/yamlutils v0.25.5/go.mod h1:Gek1/SjjfbYvM+Iq4QGwa/2lEXde9n2j4a3wI3pNuOQ= +github.com/go-openapi/testify/enable/yaml/v2 v2.4.1 h1:NZOrZmIb6PTv5LTFxr5/mKV/FjbUzGE7E6gLz7vFoOQ= +github.com/go-openapi/testify/enable/yaml/v2 v2.4.1/go.mod h1:r7dwsujEHawapMsxA69i+XMGZrQ5tRauhLAjV/sxg3Q= +github.com/go-openapi/testify/v2 v2.4.1 h1:zB34HDKj4tHwyUQHrUkpV0Q0iXQ6dUCOQtIqn8hE6Iw= +github.com/go-openapi/testify/v2 v2.4.1/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/validate v0.25.2 h1:12NsfLAwGegqbGWr2CnvT65X/Q2USJipmJ9b7xDJZz0= +github.com/go-openapi/validate v0.25.2/go.mod h1:Pgl1LpPPGFnZ+ys4/hTlDiRYQdI1ocKypgE+8Q8BLfY= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= @@ -521,8 +519,8 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688= github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU= -github.com/go-resty/resty/v2 v2.17.1 h1:x3aMpHK1YM9e4va/TMDRlusDDoZiQ+ViDu/WpA6xTM4= -github.com/go-resty/resty/v2 v2.17.1/go.mod h1:kCKZ3wWmwJaNc7S29BRtUhJwy7iqmn+2mLtQrOyQlVA= +github.com/go-resty/resty/v2 v2.17.2 h1:FQW5oHYcIlkCNrMD2lloGScxcHJ0gkjshV3qcQAyHQk= +github.com/go-resty/resty/v2 v2.17.2/go.mod h1:kCKZ3wWmwJaNc7S29BRtUhJwy7iqmn+2mLtQrOyQlVA= github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -629,8 +627,8 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef h1:xpF9fUHpoIrrjX24DURVKiwHcFpw19ndIs+FwTSMbno= -github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI= +github.com/google/pprof v0.0.0-20260302011040-a15ffb7f9dcc h1:VBbFa1lDYWEeV5FZKUiYKYT0VxCp9twUmmaq9eb8sXw= +github.com/google/pprof v0.0.0-20260302011040-a15ffb7f9dcc/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio/v2 v2.0.2 h1:qKZs+tfn+arruZZhQ7TKC/ergJunuJicWS6gLDt/dGw= github.com/google/renameio/v2 v2.0.2/go.mod h1:OX+G6WHHpHq3NVj7cAOleLOwJfcQ1s3uUJQCrr78SWo= @@ -646,8 +644,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.18.0 h1:jxP5Uuo3bxm3M6gGtV94P4lliVetoCB4Wk2x8QA86LI= github.com/googleapis/gax-go/v2 v2.18.0/go.mod h1:uSzZN4a356eRG985CzJ3WfbFSpqkLTjsnhWGJR6EwrE= -github.com/gophercloud/gophercloud/v2 v2.10.0 h1:NRadC0aHNvy4iMoFXj5AFiPmut/Sj3hAPAo9B59VMGc= -github.com/gophercloud/gophercloud/v2 v2.10.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= +github.com/gophercloud/gophercloud/v2 v2.11.1 h1:jCs4vLH8sJgRqrPzqVfWgl7uI6JnIIlsgeIRM0uHjxY= +github.com/gophercloud/gophercloud/v2 v2.11.1/go.mod h1:Rm0YvKQ4QYX2rY9XaDKnjRzSGwlG5ge4h6ABYnmkKQM= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= @@ -724,8 +722,8 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/nomad/api v0.0.0-20260220212019-daca79db0bd6 h1:QN/GwpGyiW8RdNcHGMA1xVnM8tJkAGNDR/BZ47XR+OU= -github.com/hashicorp/nomad/api v0.0.0-20260220212019-daca79db0bd6/go.mod h1:KkLNLU0Nyfh5jWsFoF/PsmMbKpRIAoIV4lmQoJWgKCk= +github.com/hashicorp/nomad/api v0.0.0-20260324203407-b27b0c2e019a h1:HGwfgBNl90YBiHdbzZ/+8aMxO1UL9B/yNTAXa8iB8z8= +github.com/hashicorp/nomad/api v0.0.0-20260324203407-b27b0c2e019a/go.mod h1:KkLNLU0Nyfh5jWsFoF/PsmMbKpRIAoIV4lmQoJWgKCk= github.com/hashicorp/serf v0.10.2 h1:m5IORhuNSjaxeljg5DeQVDlQyVkhRIjJDimbkCa8aAc= github.com/hashicorp/serf v0.10.2/go.mod h1:T1CmSGfSeGfnfNy/w0odXQUR1rfECGd2Qdsp84DjOiY= github.com/heroku/x v0.5.3 h1:lf4/Yg3j/e/QIgAUFmoVd0ELgQjUX9DUx+CULX2AWag= @@ -791,8 +789,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c= -github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= +github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= @@ -804,8 +802,8 @@ github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpb github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE= github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A= -github.com/knadh/koanf/v2 v2.3.2 h1:Ee6tuzQYFwcZXQpc2MiVeC6qHMandf5SMUJJNoFp/c4= -github.com/knadh/koanf/v2 v2.3.2/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28= +github.com/knadh/koanf/v2 v2.3.3 h1:jLJC8XCRfLC7n4F+ZKKdBsbq1bfXTpuFhf4L7t94D94= +github.com/knadh/koanf/v2 v2.3.3/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -829,8 +827,8 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b h1:11UHH39z1RhZ5dc4y4r/4koJo6IYFgTRMe/LlwRTEw0= github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= -github.com/linode/linodego v1.65.0 h1:SdsuGD8VSsPWeShXpE7ihl5vec+fD3MgwhnfYC/rj7k= -github.com/linode/linodego v1.65.0/go.mod h1:tOFiTErdjkbVnV+4S0+NmIE9dqqZUEM2HsJaGu8wMh8= +github.com/linode/linodego v1.66.0 h1:rK8QJFaV53LWOEJvb/evhTg/dP5ElvtuZmx4iv4RJds= +github.com/linode/linodego v1.66.0/go.mod h1:12ykGs9qsvxE+OU3SXuW2w+DTruWF35FPlXC7gGk2tU= github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k= @@ -882,6 +880,10 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/moby/api v1.54.0 h1:7kbUgyiKcoBhm0UrWbdrMs7RX8dnwzURKVbZGy2GnL0= +github.com/moby/moby/api v1.54.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc= +github.com/moby/moby/client v0.3.0 h1:UUGL5okry+Aomj3WhGt9Aigl3ZOxZGqR7XPo+RLPlKs= +github.com/moby/moby/client v0.3.0/go.mod h1:HJgFbJRvogDQjbM8fqc1MCEm4mIAGMLjXbgwoZp6jCQ= github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= @@ -919,8 +921,6 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E= github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s= github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= @@ -929,12 +929,12 @@ github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.145.0 h1:0dYiJ7krIwaHFX6YLNDo/yawTZIu8X16tT/nwW1UTG8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.145.0/go.mod h1:mhoa9lipcEH0heeKf6+xHzGUrCuAgImQv4/Qpmu0+Fk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.145.0 h1:sB4yuYx45zig1ceQ+kmrEYy0xMZ+mGagwYIFtJkkU1w= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.145.0/go.mod h1:uLhceuH7ZtiVxk+B0MHI0vhJG2Y4aOzT/hrV6c5KjVU= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.145.0 h1:en86L47oOTsAkbDc5VEMF5cziXPBK2D4hqGRqLaJtCw= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.145.0/go.mod h1:osDRUOIfd7IiKkDvcE/VrPp9FFOPJmFp73RuvgOn5gE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.148.0 h1:CiTjQE/Hh5xK2t56ogrDK4nl0+tJPNmASCs4zEYZ/xU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.148.0/go.mod h1:WUFkzTiOpt7EYyL67gv1GOf3RD8qKWGtin3lY9LYzW4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.148.0 h1:1TLg6YrS3Au6F7xw3ws2Njbwj13IMqPplvGFi+18fWs= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.148.0/go.mod h1:P8hZEDIQk4REgUWyLhSVRHwTxK6KkifKfg36BmmQ/DI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.148.0 h1:xgD/kNGp/wWY+bwY599Pc01OamYN17phRiTP934bM5Y= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.148.0/go.mod h1:ZK7wvaefla9lB3bAW0rNKt7IzRPcTRQoOFqr4sZy/XM= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -953,6 +953,8 @@ github.com/oschwald/geoip2-golang/v2 v2.1.0 h1:DjnLhNJu9WHwTrmoiQFvgmyJoczhdnm7L github.com/oschwald/geoip2-golang/v2 v2.1.0/go.mod h1:qdVmcPgrTJ4q2eP9tHq/yldMTdp2VMr33uVdFbHBiBc= github.com/oschwald/maxminddb-golang/v2 v2.1.1 h1:lA8FH0oOrM4u7mLvowq8IT6a3Q/qEnqRzLQn9eH5ojc= github.com/oschwald/maxminddb-golang/v2 v2.1.1/go.mod h1:PLdx6PR+siSIoXqqy7C7r3SB3KZnhxWr1Dp6g0Hacl8= +github.com/outscale/osc-sdk-go/v2 v2.32.0 h1:twcX7/YF32aowN0khwK3fPKXGujRi7oOCLLzWcLTX+M= +github.com/outscale/osc-sdk-go/v2 v2.32.0/go.mod h1:fl+1NvnHptNVE0N57dkDa+H4fyBhlrFaRA+lYiUT44s= github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE= github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/parquet-go/bitpack v1.0.0 h1:AUqzlKzPPXf2bCdjfj4sTeacrUwsT7NlcYDMUQxPcQA= @@ -963,14 +965,14 @@ github.com/parquet-go/parquet-go v0.29.0 h1:xXlPtFVR51jpSVzf+cgHnNIcb7Xet+iuvkbe github.com/parquet-go/parquet-go v0.29.0/go.mod h1:navtkAYr2LGoJVp141oXPlO/sxLvaOe3la2JEoD8+rg= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pb33f/jsonpath v0.8.1 h1:84C6QRyx6HcSm6PZnsMpcqYot3IsZ+m0n95+0NbBbvs= -github.com/pb33f/jsonpath v0.8.1/go.mod h1:zBV5LJW4OQOPatmQE2QdKpGQJvhDTlE5IEj6ASaRNTo= -github.com/pb33f/libopenapi v0.34.0 h1:jY8pf4yBHRObnNBrjuVDhVpgKjSUE8hLFpeoYtyQ/eo= -github.com/pb33f/libopenapi v0.34.0/go.mod h1:YOP20KzYe3mhE5301aQzJtzQ9MnvhABBGO7RMttA4V4= -github.com/pb33f/libopenapi-validator v0.13.0 h1:an3BxwklmLF4bxacudLV8Vysvw1krlAjpYoUfyJUgw8= -github.com/pb33f/libopenapi-validator v0.13.0/go.mod h1:YZQRDh+8xap/H0GM0cJsBrqqT+XLlMivA/qwqRLiidQ= -github.com/pb33f/ordered-map/v2 v2.3.0 h1:k2OhVEQkhTCQMhAicQ3Z6iInzoZNQ7L9MVomwKBZ5WQ= -github.com/pb33f/ordered-map/v2 v2.3.0/go.mod h1:oe5ue+6ZNhy7QN9cPZvPA23Hx0vMHnNVeMg4fGdCANw= +github.com/pb33f/jsonpath v0.8.2 h1:Ou4C7zjYClBm97dfZjDCjdZGusJoynv/vrtiEKNfj2Y= +github.com/pb33f/jsonpath v0.8.2/go.mod h1:zBV5LJW4OQOPatmQE2QdKpGQJvhDTlE5IEj6ASaRNTo= +github.com/pb33f/libopenapi v0.34.4 h1:BWWXA3U4SlsHEvfczk+DJHu2O38ktgKw+zBEYaDZ2uI= +github.com/pb33f/libopenapi v0.34.4/go.mod h1:MsDdUlQ1CdrIDO5v26JfgBxQs7kcaOUEpMP3EqU6bI4= +github.com/pb33f/libopenapi-validator v0.13.3 h1:5KW4Y/mMoQvt6d89rLiNmW1zSfln7Oua2A0BqPXpjro= +github.com/pb33f/libopenapi-validator v0.13.3/go.mod h1:X58CRsmj/7l0iXethEMfq3OJIzQ5hces7EbJ071z6WI= +github.com/pb33f/ordered-map/v2 v2.3.1 h1:5319HDO0aw4DA4gzi+zv4FXU9UlSs3xGZ40wcP1nBjY= +github.com/pb33f/ordered-map/v2 v2.3.1/go.mod h1:qxFQgd0PkVUtOMCkTapqotNgzRhMPL7VvaHKbd1HnmQ= github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM= @@ -1007,8 +1009,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562 h1:vwqZvuobg82U0gcG2eVrFH27806bUbNr32SvfRbvdsg= -github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562/go.mod h1:PmAYDB13uBFBG9qE1qxZZgZWhg7Rg6SfKM5DMK7hjyI= +github.com/prometheus/client_golang/exp v0.0.0-20260325093428-d8591d0db856 h1:1Y6bmpZb8peQCy1IpctnAhIFuyhrdtMaDnETChhSNns= +github.com/prometheus/client_golang/exp v0.0.0-20260325093428-d8591d0db856/go.mod h1:Vf0QcmVhGqpjLxZOaWrFSep86vchQtJmbztFaMM4f6Q= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1037,12 +1039,12 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= -github.com/prometheus/prometheus v0.310.1-0.20260320085417-166d20151c0d h1:6IKqApENq2a/7Cpx3MZmjZcYKwKVClxF+oyAqoIzHO4= -github.com/prometheus/prometheus v0.310.1-0.20260320085417-166d20151c0d/go.mod h1:inufIOTbzFkTjM5hLQJTih8pcstVJRNpISmqf+jTlVY= +github.com/prometheus/prometheus v0.311.2-0.20260410083055-07c6232d159b h1:tjxqNQlYTJzrQrY7HM2SbnxqzuE64vnvlSmSbAvBBDE= +github.com/prometheus/prometheus v0.311.2-0.20260410083055-07c6232d159b/go.mod h1:h4Ogksuo6VUZmnm6q/ruKTUzrg9Vvu6u/6O/rQ5xPMg= github.com/prometheus/sigv4 v0.4.1 h1:EIc3j+8NBea9u1iV6O5ZAN8uvPq2xOIUPcqCTivHuXs= github.com/prometheus/sigv4 v0.4.1/go.mod h1:eu+ZbRvsc5TPiHwqh77OWuCnWK73IdkETYY46P4dXOU= -github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= -github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= +github.com/puzpuzpuz/xsync/v4 v4.4.0 h1:vlSN6/CkEY0pY8KaB0yqo/pCLZvp9nhdbBdjipT4gWo= +github.com/puzpuzpuz/xsync/v4 v4.4.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo= github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs= @@ -1101,8 +1103,8 @@ github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= -github.com/stackitcloud/stackit-sdk-go/core v0.21.1 h1:Y/PcAgM7DPYMNqum0MLv4n1mF9ieuevzcCIZYQfm3Ts= -github.com/stackitcloud/stackit-sdk-go/core v0.21.1/go.mod h1:osMglDby4csGZ5sIfhNyYq1bS1TxIdPY88+skE/kkmI= +github.com/stackitcloud/stackit-sdk-go/core v0.23.0 h1:zPrOhf3Xe47rKRs1fg/AqKYUiJJRYjdcv+3qsS50mEs= +github.com/stackitcloud/stackit-sdk-go/core v0.23.0/go.mod h1:osMglDby4csGZ5sIfhNyYq1bS1TxIdPY88+skE/kkmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -1159,8 +1161,8 @@ github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6 github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= -github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= +github.com/vultr/govultr/v3 v3.28.1 h1:KR3LhppYARlBujY7+dcrE7YKL0Yo9qXL+msxykKQrLI= +github.com/vultr/govultr/v3 v3.28.1/go.mod h1:2zyUw9yADQaGwKnwDesmIOlBNLrm7edsCfWHFJpWKf8= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= @@ -1198,8 +1200,6 @@ go.etcd.io/etcd/client/pkg/v3 v3.6.7 h1:vvzgyozz46q+TyeGBuFzVuI53/yd133CHceNb/Ah go.etcd.io/etcd/client/pkg/v3 v3.6.7/go.mod h1:2IVulJ3FZ/czIGl9T4lMF1uxzrhRahLqe+hSgy+Kh7Q= go.etcd.io/etcd/client/v3 v3.6.7 h1:9WqA5RpIBtdMxAy1ukXLAdtg2pAxNqW5NUoO2wQrE6U= go.etcd.io/etcd/client/v3 v3.6.7/go.mod h1:2XfROY56AXnUqGsvl+6k29wrwsSbEh1lAouQB1vHpeE= -go.mongodb.org/mongo-driver v1.17.9 h1:IexDdCuuNJ3BHrELgBlyaH9p60JXAvdzWR128q+U5tU= -go.mongodb.org/mongo-driver v1.17.9/go.mod h1:LlOhpH5NUEfhxcAwG0UEkMqwYcc4JU18gtCdGudk/tQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1209,42 +1209,42 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/collector/component v1.51.0 h1:btNW76MCRmpsk0ARRT5wspDXF9tvdaLd3uBtYXIiQn0= -go.opentelemetry.io/collector/component v1.51.0/go.mod h1:Zlgwh4yTLDhJglOXqiyXZ7paepTvvoijfFjLqOr/Qww= -go.opentelemetry.io/collector/component/componentstatus v0.145.0 h1:EwUZfSaagdpRXnlrb0TqReJXXW2p9HWBU5YiIeXPCAE= -go.opentelemetry.io/collector/component/componentstatus v0.145.0/go.mod h1:OiYb8rT4FtSJPFSGCKYvOaajdueDUTJZncixGrmy5aM= -go.opentelemetry.io/collector/component/componenttest v0.145.0 h1:ryhRrXqQybGMhz7A7t32NC8BXAFcX2o1RetgPM7vw88= -go.opentelemetry.io/collector/component/componenttest v0.145.0/go.mod h1:5uStrhUdZ0Fw3se00CPmVaRtW8o9N8kKiY76OSCWFjQ= -go.opentelemetry.io/collector/confmap v1.51.0 h1:C9YlMNkIgzuauLpUz2F7DLlWwqAmkQKNcKj1XATVWuE= -go.opentelemetry.io/collector/confmap v1.51.0/go.mod h1:uWi4b9lHfvEC2poJ2I2vXwGUREVEQTcdUguOpfqdcHM= -go.opentelemetry.io/collector/confmap/xconfmap v0.145.0 h1:ngbyfh4+SKlA+osgsak3AxUNPxVxaJTmA0Sl7VfJzwY= -go.opentelemetry.io/collector/confmap/xconfmap v0.145.0/go.mod h1:zTSK+c76NAy/tI1R3xfZjdoI04D9EYDnzAHQQwl6AmA= -go.opentelemetry.io/collector/consumer v1.51.0 h1:Ex1x/k9VEEA2DOgt/eSc2Z9KTp0I6xBSruLmrYFfIFY= -go.opentelemetry.io/collector/consumer v1.51.0/go.mod h1:Erk6qdfVj+24QTrGCpurcrF+qdUlHkb4dgMy5wJxLvY= -go.opentelemetry.io/collector/consumer/consumertest v0.145.0 h1:3+uMwuMHoXMAU+Z6mwCRA3AxWeL7SujcAQwqqHJ1gCc= -go.opentelemetry.io/collector/consumer/consumertest v0.145.0/go.mod h1:IFc/FeaIHQClb8KK0aVn0tFDNMc+/MmfQ+aBT1cJNeo= -go.opentelemetry.io/collector/consumer/xconsumer v0.145.0 h1:9w7KKv9lVJoHvMLC6SUJHenU/KySdEgFJXbB4JQOEsk= -go.opentelemetry.io/collector/consumer/xconsumer v0.145.0/go.mod h1:SryDCLP2ZaFeZJtA2CSksJ0XvjH8k3LmlfXvy/kC7Wc= -go.opentelemetry.io/collector/featuregate v1.53.0 h1:cgjXdtl7jezWxq6V0eohe/JqjY4PBotZGb5+bTR2OJw= -go.opentelemetry.io/collector/featuregate v1.53.0/go.mod h1:PS7zY/zaCb28EqciePVwRHVhc3oKortTFXsi3I6ee4g= -go.opentelemetry.io/collector/internal/componentalias v0.145.0 h1:A9V5IiETzz8FCtjxjRM5gf7RE3sOtA1h8phmpQjXTZ4= -go.opentelemetry.io/collector/internal/componentalias v0.145.0/go.mod h1:sEKEAwAn45ZiXRk3T/vbkvetw14tIRd0CJIxcEx9SsQ= -go.opentelemetry.io/collector/internal/testutil v0.147.0 h1:DFlRxBRp23/sZnpTITK25yqe0d56yNvK+63IaWc6OsU= -go.opentelemetry.io/collector/internal/testutil v0.147.0/go.mod h1:Jkjs6rkqs973LqgZ0Fe3zrokQRKULYXPIf4HuqStiEE= -go.opentelemetry.io/collector/pdata v1.53.0 h1:DlYDbRwammEZaxDZHINx5v0n8SEOVNniPbi6FRTlVkA= -go.opentelemetry.io/collector/pdata v1.53.0/go.mod h1:LRSYGNjKXaUrZEwZv3Yl+8/zV2HmRGKXW62zB2bysms= -go.opentelemetry.io/collector/pdata/pprofile v0.145.0 h1:ASMKpoqokf8HhzjoeMKZf0K6UXLhufVwNXH0sSuUn5w= -go.opentelemetry.io/collector/pdata/pprofile v0.145.0/go.mod h1:a60GC7wQPhLAixWzKbbP51QLwwc+J0Cmp4SurOlhGUk= -go.opentelemetry.io/collector/pdata/testdata v0.145.0 h1:iFsxsCMtE3lnAc/5kZbhZHpRv1OMmM+O5ry46xdQHbg= -go.opentelemetry.io/collector/pdata/testdata v0.145.0/go.mod h1:0y2ERArdzqmYdJHdKLKue+AUubSEGlwK49F+23+Mbic= -go.opentelemetry.io/collector/pipeline v1.51.0 h1:GZBNW+aaOE+zufGzAkXy0OI7n1cqepEa5J+beaOpS2k= -go.opentelemetry.io/collector/pipeline v1.51.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI= -go.opentelemetry.io/collector/processor v1.51.0 h1:PKpCzkLQmqaW08TOVh/zM0qx07Ihq+DR5J/OBkPiL9o= -go.opentelemetry.io/collector/processor v1.51.0/go.mod h1:rtIPFS+EFRAkG+CSwtjxs2IsIkuZStObvALeueD02XI= -go.opentelemetry.io/collector/processor/processortest v0.145.0 h1:RDGBmyZnHk7XVK/EdLt/8iPWj+QLStbbVi1nFTNR01s= -go.opentelemetry.io/collector/processor/processortest v0.145.0/go.mod h1:WAvxAzSojkdoZB915Z1lsVHCPDJBb2fepjJBjenrzjg= -go.opentelemetry.io/collector/processor/xprocessor v0.145.0 h1:DaIE7MxRlg0OL1o2P0GQZtmZeExAmVso3qWv8S0RLps= -go.opentelemetry.io/collector/processor/xprocessor v0.145.0/go.mod h1:kUwRyKBU/kjCmXodd+0z7CpvcP0A9G9/QL+MaJt4U2o= +go.opentelemetry.io/collector/component v1.54.0 h1:LvtX0Tzz18n44OrUFVk77N1FNsejfWJqztB28hrmDM8= +go.opentelemetry.io/collector/component v1.54.0/go.mod h1:yUMBYsySY/sDcXm8kOzEoZxt+JLdala6hxzSW0npOxY= +go.opentelemetry.io/collector/component/componentstatus v0.148.0 h1:sCGRaXNQolHFhPjrNJEwQ1WZOf96iL99tzm9GxuZsvg= +go.opentelemetry.io/collector/component/componentstatus v0.148.0/go.mod h1:yqg3SpGQc22W3wGICdnb+2kZVW9daBr3+LrGUCHkKfc= +go.opentelemetry.io/collector/component/componenttest v0.148.0 h1:tBXJWmy2X6KD8S0QU2YZa2zYBqP+IycSM4iOtwDD2pA= +go.opentelemetry.io/collector/component/componenttest v0.148.0/go.mod h1:1c1+6mZOmI0raoya5vA/X0F+fawEjNS6tCEs5xLATtA= +go.opentelemetry.io/collector/confmap v1.54.0 h1:RUoxQ4uAYHTI57GfHh61D00tTQsXm9T88ozrAiicByc= +go.opentelemetry.io/collector/confmap v1.54.0/go.mod h1:mQxG8bk0IWIt9gbWMvzE+cRkOuCuzbzkNGBq2YJ4wNM= +go.opentelemetry.io/collector/confmap/xconfmap v0.148.0 h1:UW8MX5VlKJf67x4Et7J9kPwP9Rv4VSmJ+UUpgRcb//c= +go.opentelemetry.io/collector/confmap/xconfmap v0.148.0/go.mod h1:4qTMr3V0uSXXac9wVs/UD5fIqRKw5yIl58+Vjsc6RHM= +go.opentelemetry.io/collector/consumer v1.54.0 h1:RGGtUN+GbkV1px3T6XdUHmgJ+ldJ1hAHdesFzW/wgL0= +go.opentelemetry.io/collector/consumer v1.54.0/go.mod h1:1PC6XINTL9DdT1bwvfMdHE72EB4RWU/WcPemUrhqKN8= +go.opentelemetry.io/collector/consumer/consumertest v0.148.0 h1:ms0HtWMj17tI1Yds0hSuUI5QYpNEqd11AAhwIoUY2HE= +go.opentelemetry.io/collector/consumer/consumertest v0.148.0/go.mod h1:wScw/OzKkf/ZzJn4ToI30OoI1kJiY16WNrcFToXSzK0= +go.opentelemetry.io/collector/consumer/xconsumer v0.148.0 h1:m3b9rY7CLD5Pcge6sSKHIT3OlcPN6xqYsdtVs9oJ528= +go.opentelemetry.io/collector/consumer/xconsumer v0.148.0/go.mod h1:bG+Wz6xmIBl/gHzq1sqvksWXqTLuTX17Wo//zIsdZpw= +go.opentelemetry.io/collector/featuregate v1.54.0 h1:ufo5Hy4Co9pcHVg24hyanm8qFG3TkkYbVyQXPVAbwDc= +go.opentelemetry.io/collector/featuregate v1.54.0/go.mod h1:PS7zY/zaCb28EqciePVwRHVhc3oKortTFXsi3I6ee4g= +go.opentelemetry.io/collector/internal/componentalias v0.148.0 h1:Y6MftNIZSzOr47TTj6A2z2UR3IwbeG46sAQshicGtDg= +go.opentelemetry.io/collector/internal/componentalias v0.148.0/go.mod h1:uwKzfehzwRgHxdHgFXYSBHNBeWSSqsqQYGWr5fk08G0= +go.opentelemetry.io/collector/internal/testutil v0.148.0 h1:3Z9hperte3vSmbBTYeNndoEUICICrNz8hzx+v0FYXBQ= +go.opentelemetry.io/collector/internal/testutil v0.148.0/go.mod h1:Jkjs6rkqs973LqgZ0Fe3zrokQRKULYXPIf4HuqStiEE= +go.opentelemetry.io/collector/pdata v1.54.0 h1:3LharKb792cQ3VrUGxd3IcpWwfu3ST+GSTU382jVz1s= +go.opentelemetry.io/collector/pdata v1.54.0/go.mod h1:+MqC3VVOv/EX9YVFUo+mI4F0YmwJ+fXBYwjmu+mRiZ8= +go.opentelemetry.io/collector/pdata/pprofile v0.148.0 h1:MgrNZmqwhZGfiYwcKKtM/iXgTZqqvG5dUphriRXMZHU= +go.opentelemetry.io/collector/pdata/pprofile v0.148.0/go.mod h1:MTTMnZPqWX1S/rBDatU0W19udlycBkWuzVV5qnemHdc= +go.opentelemetry.io/collector/pdata/testdata v0.148.0 h1:yzakPuFgoKK8WcrlhyYHLMLA/kLScQKGsXkIgwieAQ8= +go.opentelemetry.io/collector/pdata/testdata v0.148.0/go.mod h1:2rFvxm8qwd3nlO90FtJw6ZGAjt+bLndxmQuJaMO9kfQ= +go.opentelemetry.io/collector/pipeline v1.54.0 h1:jYlCkdFLITVBdeB+IGS07zXWywEgvT3Ky46vdKKT+Ks= +go.opentelemetry.io/collector/pipeline v1.54.0/go.mod h1:RD90NG3Jbk965Xaqym3JyHkuol4uZJjQVUkD9ddXJIs= +go.opentelemetry.io/collector/processor v1.54.0 h1:zmHBFiEFmU9ZYuHhVP3lHIkbfy+ueapzGpTdXVMcWBg= +go.opentelemetry.io/collector/processor v1.54.0/go.mod h1:L0lA6DZ0VbrtQBg44cmYfSpRlgm4zxW1I6QfBnRizPw= +go.opentelemetry.io/collector/processor/processortest v0.148.0 h1:p0k59frZxy/Z4fXe82i5eOJv/UyOH75XhI8nFD1ZWCE= +go.opentelemetry.io/collector/processor/processortest v0.148.0/go.mod h1:E2Li2gnkUXgvApvGyEtn3Eq5KyzV05ljfbFRsZ7sTC4= +go.opentelemetry.io/collector/processor/xprocessor v0.148.0 h1:v7Qv6k2b2cvgGWuTO5KN5QYDLl1r5sznt7Le4Fhpa4c= +go.opentelemetry.io/collector/processor/xprocessor v0.148.0/go.mod h1:r7ADpSX2nf0rZR9STxh956Qw1740QOWMXLnEM/ZiaF8= go.opentelemetry.io/contrib/bridges/prometheus v0.64.0 h1:7TYhBCu6Xz6vDJGNtEslWZLuuX2IJ/aH50hBY4MVeUg= go.opentelemetry.io/contrib/bridges/prometheus v0.64.0/go.mod h1:tHQctZfAe7e4PBPGyt3kae6mQFXNpj+iiDJa3ithM50= go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= @@ -1276,8 +1276,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 h1:w1K go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0/go.mod h1:HBy4BjzgVE8139ieRI75oXm3EcDN+6GhD88JT1Kjvxg= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0/go.mod h1:Vl1/iaggsuRlrHf/hfPJPvVag77kKyvrLeD10kpMl+A= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 h1:zWWrB1U6nqhS/k6zYB74CjRpuiitRtLLi68VcgmOEto= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0/go.mod h1:2qXPNBX1OVRC0IwOnfo1ljoid+RD0QK3443EaqVlsOU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 h1:3iZJKlCZufyRzPzlQhUIWVmfltrXuGyfjREgGP3UUjc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0/go.mod h1:/G+nUPfhq2e+qiXMGxMwumDrP5jtzU+mWN7/sjT2rak= go.opentelemetry.io/otel/exporters/prometheus v0.61.0 h1:cCyZS4dr67d30uDyh8etKM2QyDsQ4zC9ds3bdbrVoD0= @@ -1307,12 +1307,12 @@ go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09 go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= -go.opentelemetry.io/proto/slim/otlp v1.9.0 h1:fPVMv8tP3TrsqlkH1HWYUpbCY9cAIemx184VGkS6vlE= -go.opentelemetry.io/proto/slim/otlp v1.9.0/go.mod h1:xXdeJJ90Gqyll+orzUkY4bOd2HECo5JofeoLpymVqdI= -go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0 h1:o13nadWDNkH/quoDomDUClnQBpdQQ2Qqv0lQBjIXjE8= -go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0/go.mod h1:Gyb6Xe7FTi/6xBHwMmngGoHqL0w29Y4eW8TGFzpefGA= -go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0 h1:EiUYvtwu6PMrMHVjcPfnsG3v+ajPkbUeH+IL93+QYyk= -go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0/go.mod h1:mUUHKFiN2SST3AhJ8XhJxEoeVW12oqfXog0Bo8W3Ec4= +go.opentelemetry.io/proto/slim/otlp v1.10.0 h1:iR97Vs/ZDR+y9TfuP9b1XBtdPWeC+OMslIBmhcLU7jM= +go.opentelemetry.io/proto/slim/otlp v1.10.0/go.mod h1:lV9250stpjYLPNA5viFabIgP2QlUGRT1GdTgAf8SIUk= +go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.3.0 h1:RUF5rO0hAlgiJt1fzQVzcVs3vZVNHIcMLgOgG4rWNcQ= +go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.3.0/go.mod h1:I89cynRj8y+383o7tEQVg2SVA6SRgDVIouWPUVXjx0U= +go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.3.0 h1:CQvJSldHRUN6Z8jsUeYv8J0lXRvygALXIzsmAeCcZE0= +go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.3.0/go.mod h1:xSQ+mEfJe/GjK1LXEyVOoSI1N9JV9ZI923X5kup43W4= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -1321,8 +1321,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ= +go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= go.yaml.in/yaml/v4 v4.0.0-rc.4 h1:UP4+v6fFrBIb1l934bDl//mmnoIZEDK0idg1+AIvX5U= @@ -1339,7 +1339,6 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= @@ -1418,7 +1417,6 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1499,7 +1497,6 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= @@ -1510,7 +1507,6 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU= golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= @@ -1524,7 +1520,6 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= @@ -1714,8 +1709,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1723,12 +1718,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q= -k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM= +k8s.io/api v0.35.3 h1:pA2fiBc6+N9PDf7SAiluKGEBuScsTzd2uYBkA5RzNWQ= +k8s.io/api v0.35.3/go.mod h1:9Y9tkBcFwKNq2sxwZTQh1Njh9qHl81D0As56tu42GA4= k8s.io/apimachinery v0.35.3 h1:MeaUwQCV3tjKP4bcwWGgZ/cp/vpsRnQzqO6J6tJyoF8= k8s.io/apimachinery v0.35.3/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= -k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM= -k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA= +k8s.io/client-go v0.35.3 h1:s1lZbpN4uI6IxeTM2cpdtrwHcSOBML1ODNTCCfsP1pg= +k8s.io/client-go v0.35.3/go.mod h1:RzoXkc0mzpWIDvBrRnD+VlfXP+lRzqQjCmKtiwZ8Q9c= k8s.io/klog/v2 v2.140.0 h1:Tf+J3AH7xnUzZyVVXhTgGhEKnFqye14aadWv7bzXdzc= k8s.io/klog/v2 v2.140.0/go.mod h1:o+/RWfJ6PwpnFn7OyAG3QnO47BFsymfEfrz6XyYSSp0= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= @@ -1763,6 +1758,8 @@ modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/loki-build-image/Dockerfile b/loki-build-image/Dockerfile index 11b4f52c935..b4f47b5a849 100644 --- a/loki-build-image/Dockerfile +++ b/loki-build-image/Dockerfile @@ -4,7 +4,7 @@ # tag of the Docker image in `../.drone/drone.jsonnet` and run `make drone`. # See ../docs/sources/community/maintaining/release-loki-build-image.md for instructions # on how to publish a new build image. -ARG GO_VERSION=1.26.1 +ARG GO_VERSION=1.26.2 ARG GOLANG_BASE_IMAGE=golang:${GO_VERSION}-trixie # Install helm (https://helm.sh/) and helm-docs (https://github.com/norwoodj/helm-docs) for generating Helm Chart reference. @@ -17,7 +17,7 @@ RUN BIN=$([ "$TARGETARCH" = "arm64" ] && echo "helm-docs_Linux_arm64" || echo "h curl -L "https://github.com/norwoodj/helm-docs/releases/download/v1.14.2/$BIN.tar.gz" | tar zx && \ install -t /usr/local/bin helm-docs -FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS lychee +FROM alpine:3.23.4@sha256:5b10f432ef3da1b8d4c7eb6c487f2f5a8f096bc91145e68878dd4a5019afde11 AS lychee ARG TARGETARCH ARG LYCHEE_VER="0.7.0" RUN apk add --no-cache curl && \ @@ -26,18 +26,18 @@ RUN apk add --no-cache curl && \ mv /tmp/lychee /usr/bin/lychee && \ rm -rf "/tmp/linux-$TARGETARCH" /tmp/lychee-$LYCHEE_VER.tgz -FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS golangci +FROM alpine:3.23.4@sha256:5b10f432ef3da1b8d4c7eb6c487f2f5a8f096bc91145e68878dd4a5019afde11 AS golangci RUN apk add --no-cache curl && \ cd / && \ curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v2.11.4 -FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS buf +FROM alpine:3.23.4@sha256:5b10f432ef3da1b8d4c7eb6c487f2f5a8f096bc91145e68878dd4a5019afde11 AS buf ARG TARGETOS RUN apk add --no-cache curl && \ curl -sSL "https://github.com/bufbuild/buf/releases/download/v1.66.1/buf-$TARGETOS-$(uname -m)" -o "/usr/bin/buf" && \ chmod +x "/usr/bin/buf" -FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS docker +FROM alpine:3.23.4@sha256:5b10f432ef3da1b8d4c7eb6c487f2f5a8f096bc91145e68878dd4a5019afde11 AS docker RUN apk add --no-cache docker-cli docker-cli-buildx FROM ${GOLANG_BASE_IMAGE} AS drone diff --git a/loki-build-image/README.md b/loki-build-image/README.md index 9f6c1ee08a1..2e55955e9a0 100644 --- a/loki-build-image/README.md +++ b/loki-build-image/README.md @@ -4,7 +4,7 @@ ### 0.35.1 -- Update to Go 1.26.1 +- Update to Go 1.26.2 ### 0.35.0 diff --git a/operator/Dockerfile.cross b/operator/Dockerfile.cross index 6960920f475..d51b5e14be5 100644 --- a/operator/Dockerfile.cross +++ b/operator/Dockerfile.cross @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.33.6 +ARG BUILD_IMAGE=grafana/loki-build-image:0.35.1 FROM golang:1.25.7-alpine as goenv RUN go env GOARCH > /goarch && \ diff --git a/pkg/ruler/base/notifier.go b/pkg/ruler/base/notifier.go index 5cab8b20d35..e1a71c43893 100644 --- a/pkg/ruler/base/notifier.go +++ b/pkg/ruler/base/notifier.go @@ -27,7 +27,7 @@ import ( // TODO: Instead of using the same metrics for all notifiers, // should we have separate metrics for each discovery.NewManager? var ( - sdMetrics map[string]discovery.DiscovererMetrics + sdMetrics *discovery.SDMetrics srvDNSregexp = regexp.MustCompile(`^_.+._.+`) ) diff --git a/pkg/ruler/base/storage.go b/pkg/ruler/base/storage.go index b4359ec51fc..8e95fb0037f 100644 --- a/pkg/ruler/base/storage.go +++ b/pkg/ruler/base/storage.go @@ -4,6 +4,8 @@ import ( "context" "flag" "fmt" + "io" + "log/slog" "github.com/go-kit/log" "github.com/pkg/errors" @@ -142,15 +144,19 @@ func NewRuleStore(ctx context.Context, cfg rulestore.Config, cfgProvider bucket. // default PromQL parser. This replaces direct use of newDefaultFileLoader(), // whose parser field is unexported and nil by default, causing panics. type defaultFileLoader struct { - p parser.Parser + p parser.Parser + noopLogger *slog.Logger } func newDefaultFileLoader() defaultFileLoader { - return defaultFileLoader{p: parser.NewParser(parser.Options{})} + return defaultFileLoader{ + p: parser.NewParser(parser.Options{}), + noopLogger: slog.New(slog.NewTextHandler(io.Discard, nil)), + } } func (fl defaultFileLoader) Load(identifier string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*rulefmt.RuleGroups, []error) { - return rulefmt.ParseFile(identifier, ignoreUnknownFields, nameValidationScheme, fl.p) + return rulefmt.ParseFile(identifier, ignoreUnknownFields, nameValidationScheme, fl.p, fl.noopLogger) } func (fl defaultFileLoader) Parse(query string) (parser.Expr, error) { diff --git a/pkg/ruler/rulestore/local/local_test.go b/pkg/ruler/rulestore/local/local_test.go index fc4af41b18f..a759bb1f056 100644 --- a/pkg/ruler/rulestore/local/local_test.go +++ b/pkg/ruler/rulestore/local/local_test.go @@ -2,6 +2,8 @@ package local import ( "context" + "io" + "log/slog" "os" "path" "testing" @@ -87,7 +89,8 @@ func TestClient_LoadAllRuleGroups(t *testing.T) { type testFileLoader struct{} func (testFileLoader) Load(identifier string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme) (*rulefmt.RuleGroups, []error) { - return rulefmt.ParseFile(identifier, ignoreUnknownFields, nameValidationScheme, parser.NewParser(parser.Options{})) + noopLogger := slog.New(slog.NewTextHandler(io.Discard, nil)) + return rulefmt.ParseFile(identifier, ignoreUnknownFields, nameValidationScheme, parser.NewParser(parser.Options{}), noopLogger) } func (testFileLoader) Parse(query string) (parser.Expr, error) { diff --git a/tools/dev/loki-tsdb-storage-s3/dev.dockerfile b/tools/dev/loki-tsdb-storage-s3/dev.dockerfile index c40dd214ac1..405cd3f274b 100644 --- a/tools/dev/loki-tsdb-storage-s3/dev.dockerfile +++ b/tools/dev/loki-tsdb-storage-s3/dev.dockerfile @@ -2,7 +2,7 @@ FROM golang:1.24 ENV CGO_ENABLED=0 RUN go install github.com/go-delve/delve/cmd/dlv@v1.24.2 -FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 +FROM alpine:3.23.4@sha256:5b10f432ef3da1b8d4c7eb6c487f2f5a8f096bc91145e68878dd4a5019afde11 RUN mkdir /loki WORKDIR /loki diff --git a/tools/stream-generator/Dockerfile b/tools/stream-generator/Dockerfile index 49d065769d5..e2040e2bc9a 100644 --- a/tools/stream-generator/Dockerfile +++ b/tools/stream-generator/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.26 +ARG GO_VERSION=1.26.2 # Go build stage FROM golang:${GO_VERSION} AS build diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md index 41ed56689a9..2f5fdefadf6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md @@ -1,3 +1,40 @@ +# v1.296.0 (2026-03-19) + +* **Feature**: Amazon EC2 Fleet instant mode now supports launching instances into Interruptible Capacity Reservations, enabling customers to use spare capacity shared by Capacity Reservation owners within their AWS Organization. + +# v1.295.0 (2026-03-18) + +* **Feature**: The DescribeInstanceTypes API now returns default connection tracking timeout values for TCP, UDP, and UDP stream via the new connectionTrackingConfiguration field on NetworkInfo. + +# v1.294.1 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.294.0 (2026-03-05) + +* **Feature**: Added metadata field to CapacityAllocation. + +# v1.293.1 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.293.0 (2026-02-26) + +* **Feature**: Add c8id, m8id and hpc8a instance types. + +# v1.292.0 (2026-02-25) + +* **Feature**: Add support for EC2 Capacity Blocks in Local Zones. + +# v1.291.0 (2026-02-24) + +* **Feature**: Adds httpTokensEnforced property to ModifyInstanceMetadataDefaults API. Set per account or manage organization-wide using declarative policies to prevent IMDSv1-enabled instance launch and block attempts to enable IMDSv1 on existing IMDSv2-only instances. + +# v1.290.1 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.290.0 (2026-02-17) * **Feature**: Add Operator field to CreatePlacementGroup and DescribePlacementGroup APIs. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFleet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFleet.go index a96e0f3d619..41e02006a57 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFleet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFleet.go @@ -83,6 +83,12 @@ type CreateFleetInput struct { // [EC2 Fleet health checks]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#ec2-fleet-health-checks ReplaceUnhealthyInstances *bool + // Defines EC2 Fleet preferences for utilizing reserved capacity when + // DefaultTargetCapacityType is set to reserved-capacity . + // + // Supported only for fleets of type instant . + ReservedCapacityOptions *types.ReservedCapacityOptionsRequest + // Describes the configuration of Spot Instances in an EC2 Fleet. SpotOptions *types.SpotOptionsRequest diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockOfferings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockOfferings.go index bac56211ebc..1062adcf8f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockOfferings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockOfferings.go @@ -41,6 +41,12 @@ type DescribeCapacityBlockOfferingsInput struct { // This member is required. CapacityDurationHours *int32 + // Include all Availability Zones and Local Zones, regardless of your opt-in + // status. If you do not use this parameter, the results include available + // offerings from all Availability Zones in the Amazon Web Services Region and + // Local Zones you are opted into. + AllAvailabilityZones *bool + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have the // required permissions, the error response is DryRunOperation . Otherwise, it is diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataDefaults.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataDefaults.go index 3476cd0ec38..b210ca7be6a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataDefaults.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataDefaults.go @@ -62,6 +62,14 @@ type ModifyInstanceMetadataDefaultsInput struct { // must use IMDSv2. HttpTokens types.MetadataDefaultHttpTokensState + // Specifies whether to enforce the requirement of IMDSv2 on an instance at the + // time of launch. When enforcement is enabled, the instance can't launch unless + // IMDSv2 ( HttpTokens ) is set to required . For more information, see [Enforce IMDSv2 at the account level] in the + // Amazon EC2 User Guide. + // + // [Enforce IMDSv2 at the account level]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-IMDS-new-instances.html#enforce-imdsv2-at-the-account-level + HttpTokensEnforced types.DefaultHttpTokensEnforcedState + // Enables or disables access to an instance's tags from the instance metadata. // For more information, see [View tags for your EC2 instances using instance metadata]in the Amazon EC2 User Guide. // diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go index a0506228cef..2deaf725436 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go @@ -78007,6 +78007,12 @@ func awsEc2query_deserializeDocumentCapacityAllocation(v **types.CapacityAllocat originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("allocationMetadataList", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentCapacityAllocationMetadataList(&sv.AllocationMetadata, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("allocationType", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -78051,6 +78057,136 @@ func awsEc2query_deserializeDocumentCapacityAllocation(v **types.CapacityAllocat return nil } +func awsEc2query_deserializeDocumentCapacityAllocationMetadataEntry(v **types.CapacityAllocationMetadataEntry, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CapacityAllocationMetadataEntry + if *v == nil { + sv = &types.CapacityAllocationMetadataEntry{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("value", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Value = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentCapacityAllocationMetadataList(v *[]types.CapacityAllocationMetadataEntry, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.CapacityAllocationMetadataEntry + if *v == nil { + sv = make([]types.CapacityAllocationMetadataEntry, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.CapacityAllocationMetadataEntry + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentCapacityAllocationMetadataEntry(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentCapacityAllocationMetadataListUnwrapped(v *[]types.CapacityAllocationMetadataEntry, decoder smithyxml.NodeDecoder) error { + var sv []types.CapacityAllocationMetadataEntry + if *v == nil { + sv = make([]types.CapacityAllocationMetadataEntry, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.CapacityAllocationMetadataEntry + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentCapacityAllocationMetadataEntry(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} func awsEc2query_deserializeDocumentCapacityAllocations(v *[]types.CapacityAllocation, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -78494,6 +78630,19 @@ func awsEc2query_deserializeDocumentCapacityBlockExtension(v **types.CapacityBlo sv.UpfrontFee = ptr.String(xtv) } + case strings.EqualFold("zoneType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ZoneType = ptr.String(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -78706,6 +78855,19 @@ func awsEc2query_deserializeDocumentCapacityBlockExtensionOffering(v **types.Cap sv.UpfrontFee = ptr.String(xtv) } + case strings.EqualFold("zoneType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ZoneType = ptr.String(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -79071,6 +79233,19 @@ func awsEc2query_deserializeDocumentCapacityBlockOffering(v **types.CapacityBloc sv.UpfrontFee = ptr.String(xtv) } + case strings.EqualFold("zoneType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ZoneType = ptr.String(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -87039,6 +87214,93 @@ func awsEc2query_deserializeDocumentDedicatedHostIdListUnwrapped(v *[]string, de *v = sv return nil } +func awsEc2query_deserializeDocumentDefaultConnectionTrackingConfiguration(v **types.DefaultConnectionTrackingConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DefaultConnectionTrackingConfiguration + if *v == nil { + sv = &types.DefaultConnectionTrackingConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("defaultTcpEstablishedTimeout", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.DefaultTcpEstablishedTimeout = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("defaultUdpStreamTimeout", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.DefaultUdpStreamTimeout = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("defaultUdpTimeout", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.DefaultUdpTimeout = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentDeleteFleetError(v **types.DeleteFleetError, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -96258,6 +96520,12 @@ func awsEc2query_deserializeDocumentFleetData(v **types.FleetData, decoder smith sv.ReplaceUnhealthyInstances = ptr.Bool(xtv) } + case strings.EqualFold("reservedCapacityOptions", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentReservedCapacityOptions(&sv.ReservedCapacityOptions, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("spotOptions", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentSpotOptions(&sv.SpotOptions, nodeDecoder); err != nil { @@ -107330,6 +107598,19 @@ func awsEc2query_deserializeDocumentInstanceMetadataDefaultsResponse(v **types.I sv.HttpTokens = types.HttpTokensState(xtv) } + case strings.EqualFold("httpTokensEnforced", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HttpTokensEnforced = types.HttpTokensEnforcedState(xtv) + } + case strings.EqualFold("instanceMetadataTags", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -129888,6 +130169,12 @@ func awsEc2query_deserializeDocumentNetworkInfo(v **types.NetworkInfo, decoder s return err } + case strings.EqualFold("connectionTrackingConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentDefaultConnectionTrackingConfiguration(&sv.ConnectionTrackingConfiguration, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("defaultNetworkCardIndex", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -140428,6 +140715,86 @@ func awsEc2query_deserializeDocumentReservationListUnwrapped(v *[]types.Reservat *v = sv return nil } +func awsEc2query_deserializeDocumentReservationTypeList(v *[]types.FleetReservationType, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.FleetReservationType + if *v == nil { + sv = make([]types.FleetReservationType, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.FleetReservationType + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = types.FleetReservationType(xtv) + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentReservationTypeListUnwrapped(v *[]types.FleetReservationType, decoder smithyxml.NodeDecoder) error { + var sv []types.FleetReservationType + if *v == nil { + sv = make([]types.FleetReservationType, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.FleetReservationType + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = types.FleetReservationType(xtv) + } + sv = append(sv, mv) + } + *v = sv + return nil +} func awsEc2query_deserializeDocumentReservationValue(v **types.ReservationValue, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -140503,6 +140870,48 @@ func awsEc2query_deserializeDocumentReservationValue(v **types.ReservationValue, return nil } +func awsEc2query_deserializeDocumentReservedCapacityOptions(v **types.ReservedCapacityOptions, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReservedCapacityOptions + if *v == nil { + sv = &types.ReservedCapacityOptions{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("reservationTypeSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentReservationTypeList(&sv.ReservationTypes, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentReservedInstanceReservationValue(v **types.ReservedInstanceReservationValue, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/generated.json index 0b7cebf252a..fe91ae53d6e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/generated.json @@ -777,7 +777,6 @@ "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", "options.go", - "protocol_test.go", "serializers.go", "snapshot_test.go", "sra_operation_order_test.go", @@ -785,7 +784,7 @@ "types/types.go", "validators.go" ], - "go": "1.23", + "go": "1.24", "module": "github.com/aws/aws-sdk-go-v2/service/ec2", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go index 369efded1da..962a5f20c2d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go @@ -3,4 +3,4 @@ package ec2 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.290.0" +const goModuleVersion = "1.296.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/options.go index 3c934b00815..6bb1d113c65 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/options.go @@ -58,8 +58,7 @@ type Options struct { // the client option BaseEndpoint instead. EndpointResolver EndpointResolver - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. + // Resolves the endpoint used for a particular service operation. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go index 482f61650e0..2009856744b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go @@ -61523,6 +61523,33 @@ func awsEc2query_serializeDocumentReservationFleetInstanceSpecificationList(v [] return nil } +func awsEc2query_serializeDocumentReservationTypeListRequest(v []types.FleetReservationType, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("ReservationType") + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsEc2query_serializeDocumentReservedCapacityOptionsRequest(v *types.ReservedCapacityOptionsRequest, value query.Value) error { + object := value.Object() + _ = object + + if v.ReservationTypes != nil { + objectKey := object.FlatKey("ReservationType") + if err := awsEc2query_serializeDocumentReservationTypeListRequest(v.ReservationTypes, objectKey); err != nil { + return err + } + } + + return nil +} + func awsEc2query_serializeDocumentReservedInstanceIdSet(v []string, value query.Value) error { if len(v) == 0 { return nil @@ -67340,6 +67367,13 @@ func awsEc2query_serializeOpDocumentCreateFleetInput(v *CreateFleetInput, value objectKey.Boolean(*v.ReplaceUnhealthyInstances) } + if v.ReservedCapacityOptions != nil { + objectKey := object.Key("ReservedCapacityOptions") + if err := awsEc2query_serializeDocumentReservedCapacityOptionsRequest(v.ReservedCapacityOptions, objectKey); err != nil { + return err + } + } + if v.SpotOptions != nil { objectKey := object.Key("SpotOptions") if err := awsEc2query_serializeDocumentSpotOptionsRequest(v.SpotOptions, objectKey); err != nil { @@ -73614,6 +73648,11 @@ func awsEc2query_serializeOpDocumentDescribeCapacityBlockOfferingsInput(v *Descr object := value.Object() _ = object + if v.AllAvailabilityZones != nil { + objectKey := object.Key("AllAvailabilityZones") + objectKey.Boolean(*v.AllAvailabilityZones) + } + if v.CapacityDurationHours != nil { objectKey := object.Key("CapacityDurationHours") objectKey.Integer(*v.CapacityDurationHours) @@ -84464,6 +84503,11 @@ func awsEc2query_serializeOpDocumentModifyInstanceMetadataDefaultsInput(v *Modif objectKey.String(string(v.HttpTokens)) } + if len(v.HttpTokensEnforced) > 0 { + objectKey := object.Key("HttpTokensEnforced") + objectKey.String(string(v.HttpTokensEnforced)) + } + if len(v.InstanceMetadataTags) > 0 { objectKey := object.Key("InstanceMetadataTags") objectKey.String(string(v.InstanceMetadataTags)) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go index 288fb25b2a6..36a76dd4342 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go @@ -1800,6 +1800,28 @@ func (DatafeedSubscriptionState) Values() []DatafeedSubscriptionState { } } +type DefaultHttpTokensEnforcedState string + +// Enum values for DefaultHttpTokensEnforcedState +const ( + DefaultHttpTokensEnforcedStateDisabled DefaultHttpTokensEnforcedState = "disabled" + DefaultHttpTokensEnforcedStateEnabled DefaultHttpTokensEnforcedState = "enabled" + DefaultHttpTokensEnforcedStateNoPreference DefaultHttpTokensEnforcedState = "no-preference" +) + +// Values returns all known values for DefaultHttpTokensEnforcedState. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DefaultHttpTokensEnforcedState) Values() []DefaultHttpTokensEnforcedState { + return []DefaultHttpTokensEnforcedState{ + "disabled", + "enabled", + "no-preference", + } +} + type DefaultInstanceMetadataEndpointState string // Enum values for DefaultInstanceMetadataEndpointState @@ -1888,9 +1910,10 @@ type DefaultTargetCapacityType string // Enum values for DefaultTargetCapacityType const ( - DefaultTargetCapacityTypeSpot DefaultTargetCapacityType = "spot" - DefaultTargetCapacityTypeOnDemand DefaultTargetCapacityType = "on-demand" - DefaultTargetCapacityTypeCapacityBlock DefaultTargetCapacityType = "capacity-block" + DefaultTargetCapacityTypeSpot DefaultTargetCapacityType = "spot" + DefaultTargetCapacityTypeOnDemand DefaultTargetCapacityType = "on-demand" + DefaultTargetCapacityTypeCapacityBlock DefaultTargetCapacityType = "capacity-block" + DefaultTargetCapacityTypeReservedCapacity DefaultTargetCapacityType = "reserved-capacity" ) // Values returns all known values for DefaultTargetCapacityType. Note that this @@ -1902,6 +1925,7 @@ func (DefaultTargetCapacityType) Values() []DefaultTargetCapacityType { "spot", "on-demand", "capacity-block", + "reserved-capacity", } } @@ -2852,6 +2876,23 @@ func (FleetReplacementStrategy) Values() []FleetReplacementStrategy { } } +type FleetReservationType string + +// Enum values for FleetReservationType +const ( + FleetReservationTypeInterruptibleCapacityReservation FleetReservationType = "interruptible-capacity-reservation" +) + +// Values returns all known values for FleetReservationType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (FleetReservationType) Values() []FleetReservationType { + return []FleetReservationType{ + "interruptible-capacity-reservation", + } +} + type FleetStateCode string // Enum values for FleetStateCode @@ -3184,6 +3225,25 @@ func (HostTenancy) Values() []HostTenancy { } } +type HttpTokensEnforcedState string + +// Enum values for HttpTokensEnforcedState +const ( + HttpTokensEnforcedStateDisabled HttpTokensEnforcedState = "disabled" + HttpTokensEnforcedStateEnabled HttpTokensEnforcedState = "enabled" +) + +// Values returns all known values for HttpTokensEnforcedState. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (HttpTokensEnforcedState) Values() []HttpTokensEnforcedState { + return []HttpTokensEnforcedState{ + "disabled", + "enabled", + } +} + type HttpTokensState string // Enum values for HttpTokensState @@ -3690,8 +3750,9 @@ type InstanceLifecycle string // Enum values for InstanceLifecycle const ( - InstanceLifecycleSpot InstanceLifecycle = "spot" - InstanceLifecycleOnDemand InstanceLifecycle = "on-demand" + InstanceLifecycleSpot InstanceLifecycle = "spot" + InstanceLifecycleOnDemand InstanceLifecycle = "on-demand" + InstanceLifecycleInterruptibleCapacityReservation InstanceLifecycle = "interruptible-capacity-reservation" ) // Values returns all known values for InstanceLifecycle. Note that this can be @@ -3702,6 +3763,7 @@ func (InstanceLifecycle) Values() []InstanceLifecycle { return []InstanceLifecycle{ "spot", "on-demand", + "interruptible-capacity-reservation", } } @@ -5082,6 +5144,33 @@ const ( InstanceTypeR8id96xlarge InstanceType = "r8id.96xlarge" InstanceTypeR8idMetal48xl InstanceType = "r8id.metal-48xl" InstanceTypeR8idMetal96xl InstanceType = "r8id.metal-96xl" + InstanceTypeC8idLarge InstanceType = "c8id.large" + InstanceTypeC8idXlarge InstanceType = "c8id.xlarge" + InstanceTypeC8id2xlarge InstanceType = "c8id.2xlarge" + InstanceTypeC8id4xlarge InstanceType = "c8id.4xlarge" + InstanceTypeC8id8xlarge InstanceType = "c8id.8xlarge" + InstanceTypeC8id12xlarge InstanceType = "c8id.12xlarge" + InstanceTypeC8id16xlarge InstanceType = "c8id.16xlarge" + InstanceTypeC8id24xlarge InstanceType = "c8id.24xlarge" + InstanceTypeC8id32xlarge InstanceType = "c8id.32xlarge" + InstanceTypeC8id48xlarge InstanceType = "c8id.48xlarge" + InstanceTypeC8id96xlarge InstanceType = "c8id.96xlarge" + InstanceTypeC8idMetal48xl InstanceType = "c8id.metal-48xl" + InstanceTypeC8idMetal96xl InstanceType = "c8id.metal-96xl" + InstanceTypeM8idLarge InstanceType = "m8id.large" + InstanceTypeM8idXlarge InstanceType = "m8id.xlarge" + InstanceTypeM8id2xlarge InstanceType = "m8id.2xlarge" + InstanceTypeM8id4xlarge InstanceType = "m8id.4xlarge" + InstanceTypeM8id8xlarge InstanceType = "m8id.8xlarge" + InstanceTypeM8id12xlarge InstanceType = "m8id.12xlarge" + InstanceTypeM8id16xlarge InstanceType = "m8id.16xlarge" + InstanceTypeM8id24xlarge InstanceType = "m8id.24xlarge" + InstanceTypeM8id32xlarge InstanceType = "m8id.32xlarge" + InstanceTypeM8id48xlarge InstanceType = "m8id.48xlarge" + InstanceTypeM8id96xlarge InstanceType = "m8id.96xlarge" + InstanceTypeM8idMetal48xl InstanceType = "m8id.metal-48xl" + InstanceTypeM8idMetal96xl InstanceType = "m8id.metal-96xl" + InstanceTypeHpc8a96xlarge InstanceType = "hpc8a.96xlarge" ) // Values returns all known values for InstanceType. Note that this can be @@ -6275,6 +6364,33 @@ func (InstanceType) Values() []InstanceType { "r8id.96xlarge", "r8id.metal-48xl", "r8id.metal-96xl", + "c8id.large", + "c8id.xlarge", + "c8id.2xlarge", + "c8id.4xlarge", + "c8id.8xlarge", + "c8id.12xlarge", + "c8id.16xlarge", + "c8id.24xlarge", + "c8id.32xlarge", + "c8id.48xlarge", + "c8id.96xlarge", + "c8id.metal-48xl", + "c8id.metal-96xl", + "m8id.large", + "m8id.xlarge", + "m8id.2xlarge", + "m8id.4xlarge", + "m8id.8xlarge", + "m8id.12xlarge", + "m8id.16xlarge", + "m8id.24xlarge", + "m8id.32xlarge", + "m8id.48xlarge", + "m8id.96xlarge", + "m8id.metal-48xl", + "m8id.metal-96xl", + "hpc8a.96xlarge", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go index b65dd2bb372..616723b1849 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go @@ -1363,6 +1363,10 @@ type CancelSpotFleetRequestsSuccessItem struct { // Information about instance capacity usage for a Capacity Reservation. type CapacityAllocation struct { + // Additional metadata associated with the capacity allocation. Each entry + // contains a key-value pair providing context about the allocation. + AllocationMetadata []CapacityAllocationMetadataEntry + // The usage type. used indicates that the instance capacity is in use by // instances that are running in the Capacity Reservation. AllocationType AllocationType @@ -1374,6 +1378,18 @@ type CapacityAllocation struct { noSmithyDocumentSerde } +// A key-value pair that provides additional metadata about a capacity allocation. +type CapacityAllocationMetadataEntry struct { + + // The key of the metadata entry. + Key *string + + // The value of the metadata entry. + Value *string + + noSmithyDocumentSerde +} + // Reserve powerful GPU instances on a future date to support your short duration // machine learning (ML) workloads. Instances that run inside a Capacity Block are // automatically placed close together inside [Amazon EC2 UltraClusters], for low-latency, petabit-scale, @@ -1478,6 +1494,9 @@ type CapacityBlockExtension struct { // The total price to be paid up front. UpfrontFee *string + // The type of zone where the Capacity Block extension is located. + ZoneType *string + noSmithyDocumentSerde } @@ -1531,6 +1550,9 @@ type CapacityBlockExtensionOffering struct { // The total price of the Capacity Block extension offering, to be paid up front. UpfrontFee *string + // The type of zone where the Capacity Block extension offering is available. + ZoneType *string + noSmithyDocumentSerde } @@ -1580,6 +1602,9 @@ type CapacityBlockOffering struct { // The total price to be paid up front. UpfrontFee *string + // The type of zone where the Capacity Block offering is available. + ZoneType *string + noSmithyDocumentSerde } @@ -3331,8 +3356,8 @@ type CreateFleetError struct { // template. LaunchTemplateAndOverrides *LaunchTemplateAndOverridesResponse - // Indicates if the instance that could not be launched was a Spot Instance or - // On-Demand Instance. + // Indicates if the instance that could not be launched was a Spot, On-Demand, + // Capacity Block, or Interruptible Capacity Reservation instance. Lifecycle InstanceLifecycle noSmithyDocumentSerde @@ -3352,8 +3377,8 @@ type CreateFleetInstance struct { // template. LaunchTemplateAndOverrides *LaunchTemplateAndOverridesResponse - // Indicates if the instance that was launched is a Spot Instance or On-Demand - // Instance. + // Indicates if the instance that was launched is a Spot, On-Demand, Capacity + // Block, or Interruptible Capacity Reservation instance. Lifecycle InstanceLifecycle // The value is windows for Windows instances in an EC2 Fleet. Otherwise, the @@ -3816,6 +3841,26 @@ type DeclarativePoliciesReport struct { noSmithyDocumentSerde } +// Indicates default conntrack information for the instance type. For more +// information, see [Connection tracking timeouts]in the Amazon EC2 User Guide. +// +// [Connection tracking timeouts]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-connection-tracking.html#connection-tracking-timeouts +type DefaultConnectionTrackingConfiguration struct { + + // Default timeout (in seconds) for idle TCP connections in an established state. + DefaultTcpEstablishedTimeout *int32 + + // Default timeout (in seconds) for idle UDP flows classified as streams which + // have seen more than one request-response transaction. + DefaultUdpStreamTimeout *int32 + + // Default timeout (in seconds) for idle UDP flows that have seen traffic only in + // a single direction or a single request-response transaction. + DefaultUdpTimeout *int32 + + noSmithyDocumentSerde +} + // Describes an EC2 Fleet error. type DeleteFleetError struct { @@ -4078,8 +4123,8 @@ type DescribeFleetError struct { // template. LaunchTemplateAndOverrides *LaunchTemplateAndOverridesResponse - // Indicates if the instance that could not be launched was a Spot Instance or - // On-Demand Instance. + // Indicates if the instance that could not be launched was a Spot, On-Demand, + // Capacity Block, or Interruptible Capacity Reservation instance. Lifecycle InstanceLifecycle noSmithyDocumentSerde @@ -4099,8 +4144,8 @@ type DescribeFleetsInstances struct { // template. LaunchTemplateAndOverrides *LaunchTemplateAndOverridesResponse - // Indicates if the instance that was launched is a Spot Instance or On-Demand - // Instance. + // Indicates if the instance that was launched is a Spot, On-Demand, Capacity + // Block, or Interruptible Capacity Reservation instance. Lifecycle InstanceLifecycle // The value is windows for Windows instances in an EC2 Fleet. Otherwise, the @@ -6135,6 +6180,10 @@ type FleetData struct { // [EC2 Fleet health checks]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#ec2-fleet-health-checks ReplaceUnhealthyInstances *bool + // Defines EC2 Fleet preferences for utilizing reserved capacity when + // DefaultTargetCapacityType is set to reserved-capacity . + ReservedCapacityOptions *ReservedCapacityOptions + // The configuration of Spot Instances in an EC2 Fleet. SpotOptions *SpotOptions @@ -8943,6 +8992,11 @@ type InstanceMetadataDefaultsResponse struct { // must use IMDSv2. HttpTokens HttpTokensState + // Indicates whether to enforce the requirement of IMDSv2 on an instance at the + // time of launch. When enforcement is enabled, the instance can't launch unless + // IMDSv2 ( HttpTokens ) is set to required . + HttpTokensEnforced HttpTokensEnforcedState + // Indicates whether access to instance tags from the instance metadata is enabled // or disabled. For more information, see [View tags for your EC2 instances using instance metadata]in the Amazon EC2 User Guide. // @@ -15631,6 +15685,9 @@ type NetworkInfo struct { // type, if supported. BandwidthWeightings []BandwidthWeightingType + // Indicates conntrack information for the instance type + ConnectionTrackingConfiguration *DefaultConnectionTrackingConfiguration + // The index of the default network card, starting at 0. DefaultNetworkCardIndex *int32 @@ -18251,6 +18308,37 @@ type ReservationValue struct { noSmithyDocumentSerde } +// Defines EC2 Fleet preferences for utilizing reserved capacity when +// DefaultTargetCapacityType is set to reserved-capacity . +type ReservedCapacityOptions struct { + + // The types of Capacity Reservations used for fulfilling the EC2 Fleet request. + ReservationTypes []FleetReservationType + + noSmithyDocumentSerde +} + +// Defines EC2 Fleet preferences for utilizing reserved capacity when +// DefaultTargetCapacityType is set to reserved-capacity . +// +// This configuration can only be used if the EC2 Fleet is of type instant . +// +// When you specify ReservedCapacityOptions , you must also set +// DefaultTargetCapacityType to reserved-capacity in the +// TargetCapacitySpecification . +// +// For more information about Interruptible Capacity Reservations, see [Launch instances into an Interruptible Capacity Reservation] in the +// Amazon EC2 User Guide. +// +// [Launch instances into an Interruptible Capacity Reservation]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-launch-instances-interruptible-cr-walkthrough.html +type ReservedCapacityOptionsRequest struct { + + // The types of Capacity Reservations to use for fulfilling the EC2 Fleet request. + ReservationTypes []FleetReservationType + + noSmithyDocumentSerde +} + // Describes the limit price of a Reserved Instance offering. type ReservedInstanceLimitPrice struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/CHANGELOG.md index ae70f397b90..656dadf8279 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/CHANGELOG.md @@ -1,3 +1,24 @@ +# v1.74.0 (2026-03-16) + +* **Feature**: Amazon ECS now supports configuring whether tags are propagated to the EC2 Instance Metadata Service (IMDS) for instances launched by the Managed Instances capacity provider. This gives customers control over tag visibility in IMDS when using ECS Managed Instances. + +# v1.73.2 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.73.1 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.73.0 (2026-02-26) + +* **Feature**: Adding support for Capacity Reservations for ECS Managed Instances by introducing a new "capacityOptionType" value of "RESERVED" and new field "capacityReservations" for CreateCapacityProvider and UpdateCapacityProvider APIs. + +# v1.72.1 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.72.0 (2026-02-20) * **Feature**: Migrated to Smithy. No functional changes diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/deserializers.go index 3bf829ca3c4..8905552b2ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/deserializers.go @@ -10097,6 +10097,55 @@ func awsAwsjson11_deserializeDocumentCapacityProviderStrategyItem(v **types.Capa return nil } +func awsAwsjson11_deserializeDocumentCapacityReservationRequest(v **types.CapacityReservationRequest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CapacityReservationRequest + if *v == nil { + sv = &types.CapacityReservationRequest{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "reservationGroupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ReservationGroupArn = ptr.String(jtv) + } + + case "reservationPreference": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CapacityReservationPreference to be of type string, got %T instead", value) + } + sv.ReservationPreference = types.CapacityReservationPreference(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentClientException(v **types.ClientException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15125,6 +15174,11 @@ func awsAwsjson11_deserializeDocumentInstanceLaunchTemplate(v **types.InstanceLa sv.CapacityOptionType = types.CapacityOptionType(jtv) } + case "capacityReservations": + if err := awsAwsjson11_deserializeDocumentCapacityReservationRequest(&sv.CapacityReservations, value); err != nil { + return err + } + case "ec2InstanceProfileArn": if value != nil { jtv, ok := value.(string) @@ -15143,6 +15197,15 @@ func awsAwsjson11_deserializeDocumentInstanceLaunchTemplate(v **types.InstanceLa sv.FipsEnabled = ptr.Bool(jtv) } + case "instanceMetadataTagsPropagation": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.InstanceMetadataTagsPropagation = ptr.Bool(jtv) + } + case "instanceRequirements": if err := awsAwsjson11_deserializeDocumentInstanceRequirementsRequest(&sv.InstanceRequirements, value); err != nil { return err diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/generated.json index b81f2121a1c..baeb834df6d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/generated.json @@ -86,7 +86,6 @@ "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", "options.go", - "protocol_test.go", "serializers.go", "snapshot_test.go", "sra_operation_order_test.go", @@ -95,7 +94,7 @@ "types/types.go", "validators.go" ], - "go": "1.23", + "go": "1.24", "module": "github.com/aws/aws-sdk-go-v2/service/ecs", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/go_module_metadata.go index c87df633db6..9fcf911d852 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/go_module_metadata.go @@ -3,4 +3,4 @@ package ecs // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.72.0" +const goModuleVersion = "1.74.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/options.go index 51e9784a637..14ab29ad532 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/options.go @@ -58,8 +58,7 @@ type Options struct { // the client option BaseEndpoint instead. EndpointResolver EndpointResolver - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. + // Resolves the endpoint used for a particular service operation. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/serializers.go index 0a39efe90a5..db94442a71d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/serializers.go @@ -4270,6 +4270,23 @@ func awsAwsjson11_serializeDocumentCapacityProviderStrategyItem(v *types.Capacit return nil } +func awsAwsjson11_serializeDocumentCapacityReservationRequest(v *types.CapacityReservationRequest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ReservationGroupArn != nil { + ok := object.Key("reservationGroupArn") + ok.String(*v.ReservationGroupArn) + } + + if len(v.ReservationPreference) > 0 { + ok := object.Key("reservationPreference") + ok.String(string(v.ReservationPreference)) + } + + return nil +} + func awsAwsjson11_serializeDocumentClusterConfiguration(v *types.ClusterConfiguration, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -5753,6 +5770,13 @@ func awsAwsjson11_serializeDocumentInstanceLaunchTemplate(v *types.InstanceLaunc ok.String(string(v.CapacityOptionType)) } + if v.CapacityReservations != nil { + ok := object.Key("capacityReservations") + if err := awsAwsjson11_serializeDocumentCapacityReservationRequest(v.CapacityReservations, ok); err != nil { + return err + } + } + if v.Ec2InstanceProfileArn != nil { ok := object.Key("ec2InstanceProfileArn") ok.String(*v.Ec2InstanceProfileArn) @@ -5763,6 +5787,11 @@ func awsAwsjson11_serializeDocumentInstanceLaunchTemplate(v *types.InstanceLaunc ok.Boolean(*v.FipsEnabled) } + if v.InstanceMetadataTagsPropagation != nil { + ok := object.Key("instanceMetadataTagsPropagation") + ok.Boolean(*v.InstanceMetadataTagsPropagation) + } + if v.InstanceRequirements != nil { ok := object.Key("instanceRequirements") if err := awsAwsjson11_serializeDocumentInstanceRequirementsRequest(v.InstanceRequirements, ok); err != nil { @@ -5796,11 +5825,23 @@ func awsAwsjson11_serializeDocumentInstanceLaunchTemplateUpdate(v *types.Instanc object := value.Object() defer object.Close() + if v.CapacityReservations != nil { + ok := object.Key("capacityReservations") + if err := awsAwsjson11_serializeDocumentCapacityReservationRequest(v.CapacityReservations, ok); err != nil { + return err + } + } + if v.Ec2InstanceProfileArn != nil { ok := object.Key("ec2InstanceProfileArn") ok.String(*v.Ec2InstanceProfileArn) } + if v.InstanceMetadataTagsPropagation != nil { + ok := object.Key("instanceMetadataTagsPropagation") + ok.Boolean(*v.InstanceMetadataTagsPropagation) + } + if v.InstanceRequirements != nil { ok := object.Key("instanceRequirements") if err := awsAwsjson11_serializeDocumentInstanceRequirementsRequest(v.InstanceRequirements, ok); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/enums.go index a6aed51ae11..0639ecd175b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/enums.go @@ -240,6 +240,7 @@ type CapacityOptionType string const ( CapacityOptionTypeOnDemand CapacityOptionType = "ON_DEMAND" CapacityOptionTypeSpot CapacityOptionType = "SPOT" + CapacityOptionTypeReserved CapacityOptionType = "RESERVED" ) // Values returns all known values for CapacityOptionType. Note that this can be @@ -250,6 +251,7 @@ func (CapacityOptionType) Values() []CapacityOptionType { return []CapacityOptionType{ "ON_DEMAND", "SPOT", + "RESERVED", } } @@ -350,6 +352,28 @@ func (CapacityProviderUpdateStatus) Values() []CapacityProviderUpdateStatus { } } +type CapacityReservationPreference string + +// Enum values for CapacityReservationPreference +const ( + CapacityReservationPreferenceReservationsOnly CapacityReservationPreference = "RESERVATIONS_ONLY" + CapacityReservationPreferenceReservationsFirst CapacityReservationPreference = "RESERVATIONS_FIRST" + CapacityReservationPreferenceReservationsExcluded CapacityReservationPreference = "RESERVATIONS_EXCLUDED" +) + +// Values returns all known values for CapacityReservationPreference. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (CapacityReservationPreference) Values() []CapacityReservationPreference { + return []CapacityReservationPreference{ + "RESERVATIONS_ONLY", + "RESERVATIONS_FIRST", + "RESERVATIONS_EXCLUDED", + } +} + type ClusterField string // Enum values for ClusterField diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/types.go index f15b8432d92..4b07014da26 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/types.go @@ -469,6 +469,32 @@ type CapacityProviderStrategyItem struct { noSmithyDocumentSerde } +// The Capacity Reservation configurations to be used when using the RESERVED +// capacity option type. +type CapacityReservationRequest struct { + + // The ARN of the Capacity Reservation resource group in which to run the instance. + ReservationGroupArn *string + + // The preference on when capacity reservations should be used. + // + // Valid values are: + // + // - RESERVATIONS_ONLY - Exclusively launch instances into capacity reservations + // that match the instance requirements configured for the capacity provider. If + // none exist, instances will fail to provision. + // + // - RESERVATIONS_FIRST - Prefer to launch instances into a capacity reservation + // if any exist that match the instance requirements configured for the capacity + // provider. If none exist, fall back to launching instances On-Demand. + // + // - RESERVATIONS_EXCLUDED - Avoid using capacity reservations and launch + // exclusively On-Demand. + ReservationPreference CapacityReservationPreference + + noSmithyDocumentSerde +} + // A regional grouping of one or more container instances where you can run task // requests. Each account receives a default cluster the first time you use the // Amazon ECS service, but you may also create other clusters. Clusters may contain @@ -3370,8 +3396,9 @@ type InstanceLaunchTemplate struct { // This member is required. NetworkConfiguration *ManagedInstancesNetworkConfiguration - // The capacity option type. This determines whether Amazon ECS launches On-Demand - // or Spot Instances for your managed instance capacity provider. + // The capacity option type. This determines whether Amazon ECS launches + // On-Demand, Spot or Capacity Reservation Instances for your managed instance + // capacity provider. // // Valid values are: // @@ -3382,6 +3409,10 @@ type InstanceLaunchTemplate struct { // cost. Spot Instances can be interrupted by Amazon EC2 with a two-minute // notification when the capacity is needed back. // + // - RESERVED - Launches Instances using Amazon EC2 Capacity Reservations. + // Capacity Reservations allow you to reserve compute capacity for Amazon EC2 + // instances in a specific Availability Zone. + // // The default is On-Demand // // For more information about Amazon EC2 capacity options, see [Instance purchasing options] in the Amazon EC2 @@ -3390,6 +3421,15 @@ type InstanceLaunchTemplate struct { // [Instance purchasing options]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-purchasing-options.html CapacityOptionType CapacityOptionType + // Capacity reservation specifications. You can specify: + // + // - Capacity reservation preference + // + // - Reservation resource group to be used for targeted capacity reservations + // + // Amazon ECS will launch instances according to the specified criteria. + CapacityReservations *CapacityReservationRequest + // Determines whether to enable FIPS 140-2 validated cryptographic modules on EC2 // instances launched by the capacity provider. If true , instances use // FIPS-compliant cryptographic algorithms and modules for enhanced security @@ -3399,6 +3439,22 @@ type InstanceLaunchTemplate struct { // regions and FIPS disabled in other regions. FipsEnabled *bool + // Determines whether tags are propagated to the instance metadata service (IMDS) + // for Amazon EC2 instances launched by the Managed Instances capacity provider. + // When enabled, all tags associated with the instance are available through the + // instance metadata service. When disabled, tags are not propagated to IMDS. + // + // Disable this setting if your tags contain characters that are not compatible + // with IMDS, such as / . IMDS requires tag keys to match the pattern + // [0-9a-zA-Z\-_+=,.@:]{1,255} . + // + // The default value is true . + // + // For more information, see [Work with instance tags in instance metadata] in the Amazon EC2 User Guide. + // + // [Work with instance tags in instance metadata]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS + InstanceMetadataTagsPropagation *bool + // The instance requirements. You can specify: // // - The instance types @@ -3436,6 +3492,11 @@ type InstanceLaunchTemplate struct { // [Store instance launch parameters in Amazon EC2 launch templates]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html type InstanceLaunchTemplateUpdate struct { + // The updated capacity reservations specifications for Amazon ECS Managed + // Instances. Changes to capacity reservations settings apply to new instances + // launched after the update. + CapacityReservations *CapacityReservationRequest + // The updated Amazon Resource Name (ARN) of the instance profile. The new // instance profile must have the necessary permissions for your tasks. // @@ -3444,6 +3505,22 @@ type InstanceLaunchTemplateUpdate struct { // [Amazon ECS instance profile for Managed Instances]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/managed-instances-instance-profile.html Ec2InstanceProfileArn *string + // Determines whether tags are propagated to the instance metadata service (IMDS) + // for Amazon EC2 instances launched by the Managed Instances capacity provider. + // When enabled, all tags associated with the instance are available through the + // instance metadata service. When disabled, tags are not propagated to IMDS. + // + // Disable this setting if your tags contain characters that are not compatible + // with IMDS, such as / . IMDS requires tag keys to match the pattern + // [0-9a-zA-Z\-_+=,.@:]{1,255} . + // + // The default value is true . + // + // For more information, see [Work with instance tags in instance metadata] in the Amazon EC2 User Guide. + // + // [Work with instance tags in instance metadata]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS + InstanceMetadataTagsPropagation *bool + // The updated instance requirements for attribute-based instance type selection. // Changes to instance requirements affect which instance types Amazon ECS selects // for new instances. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/CHANGELOG.md index 3079119b142..f515a80ed1f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/CHANGELOG.md @@ -1,3 +1,16 @@ +# v1.51.12 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.11 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.10 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.51.9 (2026-01-09) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/generated.json index 1e1666fbcad..953c576fb9a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/generated.json @@ -93,7 +93,6 @@ "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", "options.go", - "protocol_test.go", "serializers.go", "snapshot_test.go", "sra_operation_order_test.go", @@ -102,7 +101,7 @@ "types/types.go", "validators.go" ], - "go": "1.23", + "go": "1.24", "module": "github.com/aws/aws-sdk-go-v2/service/elasticache", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/go_module_metadata.go index b4f82bfe1cd..d4b35ed3e3a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/go_module_metadata.go @@ -3,4 +3,4 @@ package elasticache // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.51.9" +const goModuleVersion = "1.51.12" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/options.go index e939f232695..0e6a172275a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/elasticache/options.go @@ -58,8 +58,7 @@ type Options struct { // the client option BaseEndpoint instead. EndpointResolver EndpointResolver - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. + // Resolves the endpoint used for a particular service operation. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/CHANGELOG.md index aecc8e7d4a7..3a2bc883160 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/CHANGELOG.md @@ -1,3 +1,20 @@ +# v1.49.1 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.0 (2026-03-10) + +* **Feature**: Add dual stack endpoint to SDK + +# v1.48.2 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.48.1 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.48.0 (2026-02-16) * **Feature**: Amazon MSK now supports dual-stack connectivity (IPv4 and IPv6) for existing MSK clusters. You can enable dual-stack on existing clusters by specifying the NetworkType parameter in updateConnectivity API. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/endpoints.go index a0415731b2e..4dd2ba33099 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/endpoints.go @@ -232,14 +232,6 @@ func bindRegion(region string) (*string, error) { // EndpointParameters provides the parameters that influence how endpoints are // resolved. type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - // When true, use the dual-stack endpoint. If the configured endpoint does not // support dual-stack, dispatching the request MAY return an error. // @@ -266,6 +258,14 @@ type EndpointParameters struct { // // SDK::Endpoint Endpoint *string + + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string } // ValidateRequired validates required parameters are set. @@ -364,6 +364,174 @@ func (r *resolver) ResolveEndpoint( if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { _PartitionResult := *exprVal _ = _PartitionResult + if _PartitionResult.Name == "aws" { + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://kafka-api.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws" { + if _UseFIPS == true { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://kafka-api-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-cn" { + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://kafka-api.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-us-gov" { + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://kafka-api.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-us-gov" { + if _UseFIPS == true { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://kafka-api.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-us-gov" { + if _UseFIPS == true { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://kafka.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + if _PartitionResult.Name == "aws-eusc" { + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://kafka-api.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } if _UseFIPS == true { if _UseDualStack == true { if true == _PartitionResult.SupportsFIPS { @@ -392,13 +560,14 @@ func (r *resolver) ResolveEndpoint( } } if _UseFIPS == true { - if _PartitionResult.SupportsFIPS == true { - if _PartitionResult.Name == "aws-us-gov" { + if _UseDualStack == false { + if _PartitionResult.SupportsFIPS == true { uriString := func() string { var out strings.Builder - out.WriteString("https://kafka.") + out.WriteString("https://kafka-fips.") out.WriteString(_Region) - out.WriteString(".amazonaws.com") + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) return out.String() }() @@ -412,49 +581,33 @@ func (r *resolver) ResolveEndpoint( Headers: http.Header{}, }, nil } - uriString := func() string { - var out strings.Builder - out.WriteString("https://kafka-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://kafka.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } + if _UseFIPS == false { + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://kafka.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") } uriString := func() string { var out strings.Builder @@ -487,16 +640,15 @@ type endpointParamsBinder interface { func bindEndpointParams(ctx context.Context, input interface{}, options Options) (*EndpointParameters, error) { params := &EndpointParameters{} + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint region, err := bindRegion(options.Region) if err != nil { return nil, err } params.Region = region - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - if b, ok := input.(endpointParamsBinder); ok { b.bindEndpointParams(params) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/generated.json index 10fae59e442..768aa70306a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/generated.json @@ -77,7 +77,6 @@ "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", "options.go", - "protocol_test.go", "serializers.go", "snapshot_test.go", "sra_operation_order_test.go", @@ -86,7 +85,7 @@ "types/types.go", "validators.go" ], - "go": "1.23", + "go": "1.24", "module": "github.com/aws/aws-sdk-go-v2/service/kafka", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/go_module_metadata.go index a59b91a89ed..00662a0b681 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/go_module_metadata.go @@ -3,4 +3,4 @@ package kafka // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.48.0" +const goModuleVersion = "1.49.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/options.go index 0dbdf2435f6..0094b5c7122 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kafka/options.go @@ -58,8 +58,7 @@ type Options struct { // the client option BaseEndpoint instead. EndpointResolver EndpointResolver - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. + // Resolves the endpoint used for a particular service operation. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/CHANGELOG.md index 78844c3f679..a08f9ae3f8f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/CHANGELOG.md @@ -1,3 +1,20 @@ +# v1.51.0 (2026-03-23) + +* **Feature**: Add support for tagging of ContactMethod resource type + +# v1.50.14 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.50.13 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.50.12 (2026-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.50.11 (2026-01-09) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/api_op_CreateContactMethod.go b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/api_op_CreateContactMethod.go index e96be5d5446..4fa9f150e67 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/api_op_CreateContactMethod.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/api_op_CreateContactMethod.go @@ -80,6 +80,11 @@ type CreateContactMethodInput struct { // This member is required. Protocol types.ContactProtocol + // The tag keys and optional values to add to the contact method during create. + // + // Use the TagResource action to tag a resource after it's created. + Tags []types.Tag + noSmithyDocumentSerde } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/deserializers.go index bbfb865b0d7..d91191fec7e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/deserializers.go @@ -24470,6 +24470,11 @@ func awsAwsjson11_deserializeDocumentContactMethod(v **types.ContactMethod, valu sv.SupportCode = ptr.String(jtv) } + case "tags": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.Tags, value); err != nil { + return err + } + default: _, _ = key, value diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/generated.json index dd988b3d0a5..bb69097a826 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/generated.json @@ -179,7 +179,6 @@ "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", "options.go", - "protocol_test.go", "serializers.go", "snapshot_test.go", "sra_operation_order_test.go", @@ -188,7 +187,7 @@ "types/types.go", "validators.go" ], - "go": "1.23", + "go": "1.24", "module": "github.com/aws/aws-sdk-go-v2/service/lightsail", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/go_module_metadata.go index 91b9f88019c..370cd494030 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/go_module_metadata.go @@ -3,4 +3,4 @@ package lightsail // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.50.11" +const goModuleVersion = "1.51.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/options.go index 0485712d645..8b6cebd6049 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/options.go @@ -58,8 +58,7 @@ type Options struct { // the client option BaseEndpoint instead. EndpointResolver EndpointResolver - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. + // Resolves the endpoint used for a particular service operation. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/serializers.go index c84a9980a5f..75cd83d1ac7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/serializers.go @@ -11128,6 +11128,13 @@ func awsAwsjson11_serializeOpDocumentCreateContactMethodInput(v *CreateContactMe ok.String(string(v.Protocol)) } + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/types/types.go index cf26005fe87..923d9dfd410 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/lightsail/types/types.go @@ -1261,6 +1261,12 @@ type ContactMethod struct { // team to look up your Lightsail information more easily. SupportCode *string + // The tag keys and optional values for the resource. For more information about + // tags in Lightsail, see the [Amazon Lightsail Developer Guide]. + // + // [Amazon Lightsail Developer Guide]: https://docs.aws.amazon.com/lightsail/latest/userguide/amazon-lightsail-tags + Tags []Tag + noSmithyDocumentSerde } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/CHANGELOG.md index b976f64e49d..cc5e31b71b6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/CHANGELOG.md @@ -1,3 +1,16 @@ +# v1.117.0 (2026-03-24) + +* **Feature**: Adds support in Aurora PostgreSQL serverless databases for express configuration based creation through WithExpressConfiguration in CreateDbCluster API, and for restoring clusters using RestoreDBClusterToPointInTime and RestoreDBClusterFromSnapshot APIs. + +# v1.116.3 (2026-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.116.2 (2026-03-03) + +* **Dependency Update**: Bump minimum Go version to 1.24 +* **Dependency Update**: Updated to the latest SDK module versions + # v1.116.1 (2026-02-23) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/api_op_CreateDBCluster.go b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/api_op_CreateDBCluster.go index aaed84f3675..baea6ecbcfc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/api_op_CreateDBCluster.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/api_op_CreateDBCluster.go @@ -30,6 +30,11 @@ import ( // source. For more information about Multi-AZ DB clusters, see [Multi-AZ DB cluster deployments]in the Amazon RDS // User Guide. // +// You can use the WithExpressConfiguration parameter to create an Aurora DB +// Cluster with express configuration and create cluster in seconds. Express +// configuration provides a cluster with a writer instance and feature specific +// values set to all other input parameters of this API. +// // [CreateDBInstance]: https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html // [What is Amazon Aurora?]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html // [Multi-AZ DB cluster deployments]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html @@ -864,6 +869,13 @@ type CreateDBClusterInput struct { // Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters VpcSecurityGroupIds []string + // Specifies to create an Aurora DB Cluster with express configuration in seconds. + // Express configuration provides a cluster with a writer instance and feature + // specific values set to all other input parameters of this API. + // + // Valid for Cluster Type: Aurora DB clusters + WithExpressConfiguration *bool + // Used by the SDK's PresignURL autofill customization to specify the region the // of the client's request. destinationRegion *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/api_op_RestoreDBClusterFromSnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/api_op_RestoreDBClusterFromSnapshot.go index 4ae49612dd6..527793ef064 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/api_op_RestoreDBClusterFromSnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/api_op_RestoreDBClusterFromSnapshot.go @@ -17,6 +17,16 @@ import ( // configuration. If you don't specify a security group, the new DB cluster is // associated with the default security group. // +// You can use the EnableVPCNetworking and EnableInternetAccessGateway parameters +// together to restore an Aurora PostgreSQL cluster without VPC networking and with +// internet-based connectivity. These two parameters must always be specified +// together. Set EnableVPCNetworking to false to disable the VPC network interface +// (ENI) for the cluster. EnableInternetAccessGateway enables internet-based +// connectivity through an internet access gateway. IAM database authentication is +// required and must be enabled using EnableIAMDatabaseAuthentication . Once the +// cluster is restored, you need to modify the DB cluster to update +// MasterUserAuthenticationType to iam-db-auth . +// // This operation only restores the DB cluster, not the DB instances for that DB // cluster. You must invoke the CreateDBInstance operation to create DB instances // for the restored DB cluster, specifying the identifier of the restored DB @@ -244,9 +254,31 @@ type RestoreDBClusterFromSnapshotInput struct { // [IAM database authentication for MariaDB, MySQL, and PostgreSQL]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html EnableIAMDatabaseAuthentication *bool + // Specifies that the restored DB cluster should use internet-based connectivity + // through an internet access gateway. This allows clients to connect to the + // cluster over the internet without requiring a VPC. + // + // This parameter must be used together with EnableVPCNetworking set to false . + // When both parameters are specified, IAM database authentication is required. You + // must also specify EnableIAMDatabaseAuthentication . + // + // Valid for Cluster Type: Aurora PostgreSQL clusters + EnableInternetAccessGateway *bool + // Specifies whether to turn on Performance Insights for the DB cluster. EnablePerformanceInsights *bool + // Specifies whether to enable VPC networking for the restored DB cluster. Set + // this parameter to false to create a cluster without the VPC network interface + // (ENI). + // + // This parameter must be used together with EnableInternetAccessGateway . When + // both parameters are specified, IAM database authentication is required. You must + // also specify EnableIAMDatabaseAuthentication . + // + // Valid for Cluster Type: Aurora PostgreSQL clusters + EnableVPCNetworking *bool + // The life cycle type for this DB cluster. // // By default, this value is set to open-source-rds-extended-support , which diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/api_op_RestoreDBClusterToPointInTime.go b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/api_op_RestoreDBClusterToPointInTime.go index b2742a9a8b5..e2818672d66 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/api_op_RestoreDBClusterToPointInTime.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/api_op_RestoreDBClusterToPointInTime.go @@ -21,6 +21,16 @@ import ( // the original DB cluster. The AZ where RDS restores the DB cluster depends on the // AZs in the specified subnet group. // +// You can use the EnableVPCNetworking and EnableInternetAccessGateway parameters +// together to restore an Aurora PostgreSQL cluster without VPC networking and with +// internet-based connectivity. These two parameters must always be specified +// together. Set EnableVPCNetworking to false to disable the VPC network interface +// (ENI) for the cluster. EnableInternetAccessGateway enables internet-based +// connectivity through an internet access gateway. IAM database authentication is +// required and must be enabled using EnableIAMDatabaseAuthentication . Once the +// cluster is restored, you need to modify the DB cluster to update +// MasterUserAuthenticationType to iam-db-auth . +// // For Aurora, this operation only restores the DB cluster, not the DB instances // for that DB cluster. You must invoke the CreateDBInstance operation to create // DB instances for the restored DB cluster, specifying the identifier of the @@ -208,9 +218,31 @@ type RestoreDBClusterToPointInTimeInput struct { // [IAM database authentication for MariaDB, MySQL, and PostgreSQL]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html EnableIAMDatabaseAuthentication *bool + // Specifies that the restored DB cluster should use internet-based connectivity + // through an internet access gateway. This allows clients to connect to the + // cluster over the internet without requiring a VPC. + // + // This parameter must be used together with EnableVPCNetworking set to false . + // When both parameters are specified, IAM database authentication is required. You + // must also specify EnableIAMDatabaseAuthentication . + // + // Valid for Cluster Type: Aurora PostgreSQL clusters + EnableInternetAccessGateway *bool + // Specifies whether to turn on Performance Insights for the DB cluster. EnablePerformanceInsights *bool + // Specifies whether to enable VPC networking for the restored DB cluster. Set + // this parameter to false to create a cluster without the VPC network interface + // (ENI). + // + // This parameter must be used together with EnableInternetAccessGateway . When + // both parameters are specified, IAM database authentication is required. You must + // also specify EnableIAMDatabaseAuthentication . + // + // Valid for Cluster Type: Aurora PostgreSQL clusters + EnableVPCNetworking *bool + // The life cycle type for this DB cluster. // // By default, this value is set to open-source-rds-extended-support , which diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/deserializers.go index 797243a5b78..3fcf8a75341 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/deserializers.go @@ -29841,6 +29841,22 @@ func awsAwsquery_deserializeDocumentDBCluster(v **types.DBCluster, decoder smith sv.IAMDatabaseAuthenticationEnabled = ptr.Bool(xtv) } + case strings.EqualFold("InternetAccessGatewayEnabled", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected BooleanOptional to be of type *bool, got %T instead", val) + } + sv.InternetAccessGatewayEnabled = ptr.Bool(xtv) + } + case strings.EqualFold("IOOptimizedNextAllowedModificationTime", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -30286,6 +30302,22 @@ func awsAwsquery_deserializeDocumentDBCluster(v **types.DBCluster, decoder smith sv.UpgradeRolloutOrder = types.UpgradeRolloutOrder(xtv) } + case strings.EqualFold("VPCNetworkingEnabled", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected BooleanOptional to be of type *bool, got %T instead", val) + } + sv.VPCNetworkingEnabled = ptr.Bool(xtv) + } + case strings.EqualFold("VpcSecurityGroups", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsAwsquery_deserializeDocumentVpcSecurityGroupMembershipList(&sv.VpcSecurityGroups, nodeDecoder); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/generated.json index e46658d0234..72ad01c509c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/generated.json @@ -187,7 +187,6 @@ "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", "options.go", - "protocol_test.go", "serializers.go", "snapshot_test.go", "sra_operation_order_test.go", @@ -196,7 +195,7 @@ "types/types.go", "validators.go" ], - "go": "1.23", + "go": "1.24", "module": "github.com/aws/aws-sdk-go-v2/service/rds", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/go_module_metadata.go index 1bc632120bf..349b6d3ca9c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/go_module_metadata.go @@ -3,4 +3,4 @@ package rds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.116.1" +const goModuleVersion = "1.117.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/options.go index 640007934c6..7b57a03c0c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/options.go @@ -58,8 +58,7 @@ type Options struct { // the client option BaseEndpoint instead. EndpointResolver EndpointResolver - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. + // Resolves the endpoint used for a particular service operation. EndpointResolverV2 EndpointResolverV2 // Signature Version 4 (SigV4) Signer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/serializers.go index 37f2519388c..d12029dd10e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/serializers.go @@ -13102,6 +13102,11 @@ func awsAwsquery_serializeOpDocumentCreateDBClusterInput(v *CreateDBClusterInput } } + if v.WithExpressConfiguration != nil { + objectKey := object.Key("WithExpressConfiguration") + objectKey.Boolean(*v.WithExpressConfiguration) + } + return nil } @@ -18144,11 +18149,21 @@ func awsAwsquery_serializeOpDocumentRestoreDBClusterFromSnapshotInput(v *Restore objectKey.Boolean(*v.EnableIAMDatabaseAuthentication) } + if v.EnableInternetAccessGateway != nil { + objectKey := object.Key("EnableInternetAccessGateway") + objectKey.Boolean(*v.EnableInternetAccessGateway) + } + if v.EnablePerformanceInsights != nil { objectKey := object.Key("EnablePerformanceInsights") objectKey.Boolean(*v.EnablePerformanceInsights) } + if v.EnableVPCNetworking != nil { + objectKey := object.Key("EnableVPCNetworking") + objectKey.Boolean(*v.EnableVPCNetworking) + } + if v.Engine != nil { objectKey := object.Key("Engine") objectKey.String(*v.Engine) @@ -18345,11 +18360,21 @@ func awsAwsquery_serializeOpDocumentRestoreDBClusterToPointInTimeInput(v *Restor objectKey.Boolean(*v.EnableIAMDatabaseAuthentication) } + if v.EnableInternetAccessGateway != nil { + objectKey := object.Key("EnableInternetAccessGateway") + objectKey.Boolean(*v.EnableInternetAccessGateway) + } + if v.EnablePerformanceInsights != nil { objectKey := object.Key("EnablePerformanceInsights") objectKey.Boolean(*v.EnablePerformanceInsights) } + if v.EnableVPCNetworking != nil { + objectKey := object.Key("EnableVPCNetworking") + objectKey.Boolean(*v.EnableVPCNetworking) + } + if v.EngineLifecycleSupport != nil { objectKey := object.Key("EngineLifecycleSupport") objectKey.String(*v.EngineLifecycleSupport) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/types/types.go index b6729f824d0..181cbeea84d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/rds/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/rds/types/types.go @@ -942,6 +942,13 @@ type DBCluster struct { // This setting is only for Aurora DB clusters. IOOptimizedNextAllowedModificationTime *time.Time + // Indicates whether the DB cluster has internet-based connectivity enabled + // through an internet access gateway. + // + // This setting is applicable only for Aurora PostgreSQL clusters created through + // express configuration. + InternetAccessGatewayEnabled *bool + // The Provisioned IOPS (I/O operations per second) value. // // This setting is only for non-Aurora Multi-AZ DB clusters. @@ -1164,6 +1171,12 @@ type DBCluster struct { // - [last] - Usually reserved for production environments. UpgradeRolloutOrder UpgradeRolloutOrder + // Indicates whether the DB cluster uses VPC-based networking. + // + // This setting is applicable only for Aurora PostgreSQL clusters created through + // express configuration. + VPCNetworkingEnabled *bool + // The list of VPC security groups that the DB cluster belongs to. VpcSecurityGroups []VpcSecurityGroupMembership diff --git a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go index dd950a286fb..b20f75e1624 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go @@ -1,8 +1,11 @@ +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package bearer import ( - "github.com/aws/aws-sdk-go/aws" "time" + + "github.com/aws/aws-sdk-go/aws" ) // Token provides a type wrapping a bearer token and expiration metadata. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go index 99849c0e19c..8a31d481f1e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -1,4 +1,7 @@ // Package awserr represents API error interface accessors for the SDK. +// +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package awserr // An Error wraps lower level errors with code, message and an original error. @@ -10,24 +13,23 @@ package awserr // // Example: // -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } type Error interface { // Satisfy the generic error interface. error @@ -100,32 +102,31 @@ func NewBatchError(code, message string, errs []error) BatchedErrors { // // Example: // -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } // // Combined with awserr.Error: // -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } type RequestFailure interface { Error diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/doc.go new file mode 100644 index 00000000000..5ebbfcaeb5d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/doc.go @@ -0,0 +1,3 @@ +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. +package awsutil diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index b147f103ce1..12b3f7ead0d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -1,3 +1,5 @@ +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package client import ( diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index a7530ebb389..972103eddd7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -1,3 +1,5 @@ +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package metadata // ClientInfo wraps immutable data from the client.Client structure. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 36a915efea8..fba8d74c726 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -1,3 +1,5 @@ +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package corehandlers import ( diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index a880a3de8fe..c561f4a6f06 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -14,38 +14,39 @@ // // Example of using the environment variable credentials. // -// creds := credentials.NewEnvCredentials() +// creds := credentials.NewEnvCredentials() // -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } // // Example of forcing credentials to expire and be refreshed on the next Get(). // This may be helpful to proactively expire credentials and refresh them sooner // than they would naturally expire on their own. // -// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. // -// -// Custom Provider +// # Custom Provider // // Each Provider built into this package also provides a helper method to generate // a Credentials pointer setup with the provider. To use a custom Provider just // create a type which satisfies the Provider interface and pass it to the // NewCredentials method. // -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} // -// creds := credentials.NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() // +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package credentials import ( @@ -64,10 +65,10 @@ import ( // when making service API calls. For example, when accessing public // s3 buckets. // -// svc := s3.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: credentials.AnonymousCredentials, -// }))) -// // Access public S3 buckets. +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. var AnonymousCredentials = NewStaticCredentials("", "", "") // A Value is the AWS credentials value for individual credential fields. @@ -150,10 +151,11 @@ func (p ErrorProvider) IsExpired() bool { // provider's struct. // // Example: -// type EC2RoleProvider struct { -// Expiry -// ... -// } +// +// type EC2RoleProvider struct { +// Expiry +// ... +// } type Expiry struct { // The date/time when to expire on expiration time.Time diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go index 25a66d1dda2..7636aa6aa68 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -3,7 +3,7 @@ // control options, and configuration for the CSM client. The client can be // controlled manually, or automatically via the SDK's Session configuration. // -// Enabling CSM client via SDK's Session configuration +// # Enabling CSM client via SDK's Session configuration // // The CSM client can be enabled automatically via SDK's Session configuration. // The SDK's session configuration enables the CSM client if the AWS_CSM_PORT @@ -12,39 +12,39 @@ // The configuration options for the CSM client via the SDK's session // configuration are: // -// * AWS_CSM_PORT= -// The port number the CSM agent will receive metrics on. +// - AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. // -// * AWS_CSM_HOST= -// The hostname, or IP address the CSM agent will receive metrics on. -// Without port number. +// - AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. // -// Manually enabling the CSM client +// # Manually enabling the CSM client // // The CSM client can be started, paused, and resumed manually. The Start // function will enable the CSM client to publish metrics to the CSM agent. It // is safe to call Start concurrently, but if Start is called additional times // with different ClientID or address it will panic. // -// r, err := csm.Start("clientID", ":31000") -// if err != nil { -// panic(fmt.Errorf("failed starting CSM: %v", err)) -// } +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } // // When controlling the CSM client manually, you must also inject its request // handlers into the SDK's Session configuration for the SDK's API clients to // publish metrics. // -// sess, err := session.NewSession(&aws.Config{}) -// if err != nil { -// panic(fmt.Errorf("failed loading session: %v", err)) -// } +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } // -// // Add CSM client's metric publishing request handlers to the SDK's -// // Session Configuration. -// r.InjectHandlers(&sess.Handlers) +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. +// r.InjectHandlers(&sess.Handlers) // -// Controlling CSM client +// # Controlling CSM client // // Once the CSM client has been enabled the Get function will return a Reporter // value that you can use to pause and resume the metrics published to the CSM @@ -54,16 +54,19 @@ // The Pause method can be called to stop the CSM client publishing metrics to // the CSM agent. The Continue method will resume metric publishing. // -// // Get the CSM client Reporter. -// r := csm.Get() +// // Get the CSM client Reporter. +// r := csm.Get() // -// // Will pause monitoring -// r.Pause() -// resp, err = client.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) // -// // Resume monitoring -// r.Continue() +// // Resume monitoring +// r.Continue() +// +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 1ba80b57609..3675504c6b8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -5,6 +5,9 @@ // instead. This package is useful when you need to reset the defaults // of a session or service client to the SDK defaults before setting // additional parameters. +// +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package defaults import ( diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go index 4fcb6161848..bf20b773b3b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -1,7 +1,7 @@ // Package aws provides the core SDK's utilities and shared types. Use this package's // utilities to simplify setting and reading API operations parameters. // -// Value and Pointer Conversion Utilities +// # Value and Pointer Conversion Utilities // // This package includes a helper conversion utility for each scalar type the SDK's // API use. These utilities make getting a pointer of the scalar, and dereferencing @@ -16,33 +16,33 @@ // to get pointer of a literal string value, because getting the address of a // literal requires assigning the value to a variable first. // -// var strPtr *string +// var strPtr *string // -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str // -// // With the SDK's conversion functions -// strPtr = aws.String("my string") +// // With the SDK's conversion functions +// strPtr = aws.String("my string") // -// // Convert *string to string value -// str = aws.StringValue(strPtr) +// // Convert *string to string value +// str = aws.StringValue(strPtr) // // In addition to scalars the aws package also includes conversion utilities for // map and slice for commonly types used in API parameters. The map and slice // conversion functions use similar naming pattern as the scalar conversion // functions. // -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} // -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) // -// // Convert []*string to []string -// strs = aws.StringValueSlice(strPtrs) +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) // -// SDK Default HTTP Client +// # SDK Default HTTP Client // // The SDK will use the http.DefaultClient if a HTTP client is not provided to // the SDK's Session, or service client constructor. This means that if the @@ -53,4 +53,7 @@ // a custom HTTP Client to share explicitly through your application. You can // configure the SDK to use the custom HTTP Client by setting the HTTPClient // value of the SDK's Config type when creating a Session or service client. +// +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index f4cc8751d04..5fcc35a1fce 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -9,6 +9,9 @@ // The endpoint of the EC2 IMDS client can be configured via the environment // variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a // Session. See aws/session#Options.EC2IMDSEndpoint for more details. +// +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package ec2metadata import ( diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go index 66dec6bebf0..7ba7aee50f3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -62,4 +62,7 @@ // Region: aws.String("us-west-2"), // EndpointResolver: endpoints.ResolverFunc(myCustomResolver), // })) +// +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/request/doc.go new file mode 100644 index 00000000000..bd3c3441f06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/doc.go @@ -0,0 +1,3 @@ +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. +package request diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index ff3cc012ae3..f91389034a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -1,367 +1,366 @@ -/* -Package session provides configuration for the SDK's service clients. Sessions -can be shared across service clients that share the same base configuration. - -Sessions are safe to use concurrently as long as the Session is not being -modified. Sessions should be cached when possible, because creating a new -Session will load all configuration values from the environment, and config -files each time the Session is created. Sharing the Session value across all of -your service clients will ensure the configuration is loaded the fewest number -of times possible. - -Sessions options from Shared Config - -By default NewSession will only load credentials from the shared credentials -file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is -set to a truthy value the Session will be created from the configuration -values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. Using the NewSessionWithOptions with -SharedConfigState set to SharedConfigEnable will create the session as if the -AWS_SDK_LOAD_CONFIG environment variable was set. - -Credential and config loading order - -The Session will attempt to load configuration and credentials from the -environment, configuration files, and other credential sources. The order -configuration is loaded in is: - - * Environment Variables - * Shared Credentials file - * Shared Configuration file (if SharedConfig is enabled) - * EC2 Instance Metadata (credentials only) - -The Environment variables for credentials will have precedence over shared -config even if SharedConfig is enabled. To override this behavior, and use -shared config credentials instead specify the session.Options.Profile, (e.g. -when using credential_source=Environment to assume a role). - - sess, err := session.NewSessionWithOptions(session.Options{ - Profile: "myProfile", - }) - -Creating Sessions - -Creating a Session without additional options will load credentials region, and -profile loaded from the environment and shared config automatically. See, -"Environment Variables" section for information on environment variables used -by Session. - - // Create Session - sess, err := session.NewSession() - - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded, config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. - - // Create a Session with a custom region - sess, err := session.NewSession(&aws.Config{ - Region: aws.String("us-west-2"), - }) - -Use NewSessionWithOptions to provide additional configuration driving how the -Session's configuration will be loaded. Such as, specifying shared config -profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). - - // Equivalent to session.NewSession() - sess, err := session.NewSessionWithOptions(session.Options{ - // Options - }) - - sess, err := session.NewSessionWithOptions(session.Options{ - // Specify profile to load for the session's config - Profile: "profile_name", - - // Provide SDK Config options, such as Region. - Config: aws.Config{ - Region: aws.String("us-west-2"), - }, - - // Force enable Shared Config support - SharedConfigState: session.SharedConfigEnable, - }) - -Adding Handlers - -You can add handlers to a session to decorate API operation, (e.g. adding HTTP -headers). All clients that use the Session receive a copy of the Session's -handlers. For example, the following request handler added to the Session logs -every requests made. - - // Create a session, and add additional handlers for all service - // clients created with the Session to inherit. Adds logging handler. - sess := session.Must(session.NewSession()) - - sess.Handlers.Send.PushFront(func(r *request.Request) { - // Log every request made and its payload - logger.Printf("Request: %s/%s, Params: %s", - r.ClientInfo.ServiceName, r.Operation, r.Params) - }) - -Shared Config Fields - -By default the SDK will only load the shared credentials file's -(~/.aws/credentials) credentials values, and all other config is provided by -the environment variables, SDK defaults, and user provided aws.Config values. - -If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable -option is used to create the Session the full shared config values will be -loaded. This includes credentials, region, and support for assume role. In -addition the Session will load its configuration from both the shared config -file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both -files have the same format. - -If both config files are present the configuration from both files will be -read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file -(~/.aws/config). - -Credentials are the values the SDK uses to authenticating requests with AWS -Services. When specified in a file, both aws_access_key_id and -aws_secret_access_key must be provided together in the same file to be -considered valid. They will be ignored if both are not present. -aws_session_token is an optional field that can be provided in addition to the -other two fields. - - aws_access_key_id = AKID - aws_secret_access_key = SECRET - aws_session_token = TOKEN - - ; region only supported if SharedConfigEnabled. - region = us-east-1 - -Assume Role configuration - -The role_arn field allows you to configure the SDK to assume an IAM role using -a set of credentials from another source. Such as when paired with static -credentials, "profile_source", "credential_process", or "credential_source" -fields. If "role_arn" is provided, a source of credentials must also be -specified, such as "source_profile", "credential_source", or -"credential_process". - - role_arn = arn:aws:iam:::role/ - source_profile = profile_with_creds - external_id = 1234 - mfa_serial = - role_session_name = session_name - - -The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you -must also set the Session Option.AssumeRoleTokenProvider. The Session will fail -to load if the AssumeRoleTokenProvider is not specified. - - sess := session.Must(session.NewSessionWithOptions(session.Options{ - AssumeRoleTokenProvider: stscreds.StdinTokenProvider, - })) - -To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider -documentation. - -Environment Variables - -When a Session is created several environment variables can be set to adjust -how the SDK functions, and what configuration data it loads when creating -Sessions. All environment values are optional, but some values like credentials -require multiple of the values to set or the partial values will be ignored. -All environment variable values are strings unless otherwise noted. - -Environment configuration values. If set both Access Key ID and Secret Access -Key must be provided. Session Token and optionally also be provided, but is -not required. - - # Access Key ID - AWS_ACCESS_KEY_ID=AKID - AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - - # Secret Access Key - AWS_SECRET_ACCESS_KEY=SECRET - AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - - # Session Token - AWS_SESSION_TOKEN=TOKEN - -Region value will instruct the SDK where to make service API requests to. If is -not provided in the environment the region must be provided before a service -client request is made. - - AWS_REGION=us-east-1 - - # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_REGION is not also set. - AWS_DEFAULT_REGION=us-east-1 - -Profile name the SDK should load use when loading shared config from the -configuration files. If not provided "default" will be used as the profile name. - - AWS_PROFILE=my_profile - - # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_PROFILE is not also set. - AWS_DEFAULT_PROFILE=my_profile - -SDK load config instructs the SDK to load the shared config in addition to -shared credentials. This also expands the configuration loaded so the shared -credentials will have parity with the shared config file. This also enables -Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE -env values as well. - - AWS_SDK_LOAD_CONFIG=1 - -Custom Shared Config and Credential Files - -Shared credentials file path can be set to instruct the SDK to use an alternative -file for the shared credentials. If not set the file will be loaded from -$HOME/.aws/credentials on Linux/Unix based systems, and -%USERPROFILE%\.aws\credentials on Windows. - - AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - -Shared config file path can be set to instruct the SDK to use an alternative -file for the shared config. If not set the file will be loaded from -$HOME/.aws/config on Linux/Unix based systems, and -%USERPROFILE%\.aws\config on Windows. - - AWS_CONFIG_FILE=$HOME/my_shared_config - -Custom CA Bundle - -Path to a custom Credentials Authority (CA) bundle PEM file that the SDK -will use instead of the default system's root CA bundle. Use this only -if you want to replace the CA bundle the SDK uses for TLS requests. - - AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - -Enabling this option will attempt to merge the Transport into the SDK's HTTP -client. If the client's Transport is not a http.Transport an error will be -returned. If the Transport's TLS config is set this option will cause the SDK -to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file -contains multiple certificates all of them will be loaded. - -The Session option CustomCABundle is also available when creating sessions -to also enable this feature. CustomCABundle session option field has priority -over the AWS_CA_BUNDLE environment variable, and will be used if both are set. - -Setting a custom HTTPClient in the aws.Config options will override this setting. -To use this option and custom HTTP client, the HTTP client needs to be provided -when creating the session. Not the service client. - -Custom Client TLS Certificate - -The SDK supports the environment and session option being configured with -Client TLS certificates that are sent as a part of the client's TLS handshake -for client authentication. If used, both Cert and Key values are required. If -one is missing, or either fail to load the contents of the file an error will -be returned. - -HTTP Client's Transport concrete implementation must be a http.Transport -or creating the session will fail. - - AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key - AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert - -This can also be configured via the session.Options ClientTLSCert and ClientTLSKey. - - sess, err := session.NewSessionWithOptions(session.Options{ - ClientTLSCert: myCertFile, - ClientTLSKey: myKeyFile, - }) - -Custom EC2 IMDS Endpoint - -The endpoint of the EC2 IMDS client can be configured via the environment -variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a -Session. See Options.EC2IMDSEndpoint for more details. - - AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 - -If using an URL with an IPv6 address literal, the IPv6 address -component must be enclosed in square brackets. - - AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] - -The custom EC2 IMDS endpoint can also be specified via the Session options. - - sess, err := session.NewSessionWithOptions(session.Options{ - EC2MetadataEndpoint: "http://[::1]", - }) - -FIPS and DualStack Endpoints - -The SDK can be configured to resolve an endpoint with certain capabilities such as FIPS and DualStack. - -You can configure a FIPS endpoint using an environment variable, shared config ($HOME/.aws/config), -or programmatically. - -To configure a FIPS endpoint set the environment variable set the AWS_USE_FIPS_ENDPOINT to true or false to enable -or disable FIPS endpoint resolution. - - AWS_USE_FIPS_ENDPOINT=true - -To configure a FIPS endpoint using shared config, set use_fips_endpoint to true or false to enable -or disable FIPS endpoint resolution. - - [profile myprofile] - region=us-west-2 - use_fips_endpoint=true - -To configure a FIPS endpoint programmatically - - // Option 1: Configure it on a session for all clients - sess, err := session.NewSessionWithOptions(session.Options{ - UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, - }) - if err != nil { - // handle error - } - - client := s3.New(sess) - - // Option 2: Configure it per client - sess, err := session.NewSession() - if err != nil { - // handle error - } - - client := s3.New(sess, &aws.Config{ - UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, - }) - -You can configure a DualStack endpoint using an environment variable, shared config ($HOME/.aws/config), -or programmatically. - -To configure a DualStack endpoint set the environment variable set the AWS_USE_DUALSTACK_ENDPOINT to true or false to -enable or disable DualStack endpoint resolution. - - AWS_USE_DUALSTACK_ENDPOINT=true - -To configure a DualStack endpoint using shared config, set use_dualstack_endpoint to true or false to enable -or disable DualStack endpoint resolution. - - [profile myprofile] - region=us-west-2 - use_dualstack_endpoint=true - -To configure a DualStack endpoint programmatically - - // Option 1: Configure it on a session for all clients - sess, err := session.NewSessionWithOptions(session.Options{ - UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, - }) - if err != nil { - // handle error - } - - client := s3.New(sess) - - // Option 2: Configure it per client - sess, err := session.NewSession() - if err != nil { - // handle error - } - - client := s3.New(sess, &aws.Config{ - UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, - }) -*/ +// Package session provides configuration for the SDK's service clients. Sessions +// can be shared across service clients that share the same base configuration. +// +// Sessions are safe to use concurrently as long as the Session is not being +// modified. Sessions should be cached when possible, because creating a new +// Session will load all configuration values from the environment, and config +// files each time the Session is created. Sharing the Session value across all of +// your service clients will ensure the configuration is loaded the fewest number +// of times possible. +// +// # Sessions options from Shared Config +// +// By default NewSession will only load credentials from the shared credentials +// file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +// set to a truthy value the Session will be created from the configuration +// values from the shared config (~/.aws/config) and shared credentials +// (~/.aws/credentials) files. Using the NewSessionWithOptions with +// SharedConfigState set to SharedConfigEnable will create the session as if the +// AWS_SDK_LOAD_CONFIG environment variable was set. +// +// # Credential and config loading order +// +// The Session will attempt to load configuration and credentials from the +// environment, configuration files, and other credential sources. The order +// configuration is loaded in is: +// +// - Environment Variables +// - Shared Credentials file +// - Shared Configuration file (if SharedConfig is enabled) +// - EC2 Instance Metadata (credentials only) +// +// The Environment variables for credentials will have precedence over shared +// config even if SharedConfig is enabled. To override this behavior, and use +// shared config credentials instead specify the session.Options.Profile, (e.g. +// when using credential_source=Environment to assume a role). +// +// sess, err := session.NewSessionWithOptions(session.Options{ +// Profile: "myProfile", +// }) +// +// # Creating Sessions +// +// Creating a Session without additional options will load credentials region, and +// profile loaded from the environment and shared config automatically. See, +// "Environment Variables" section for information on environment variables used +// by Session. +// +// // Create Session +// sess, err := session.NewSession() +// +// When creating Sessions optional aws.Config values can be passed in that will +// override the default, or loaded, config values the Session is being created +// with. This allows you to provide additional, or case based, configuration +// as needed. +// +// // Create a Session with a custom region +// sess, err := session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// }) +// +// Use NewSessionWithOptions to provide additional configuration driving how the +// Session's configuration will be loaded. Such as, specifying shared config +// profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). +// +// // Equivalent to session.NewSession() +// sess, err := session.NewSessionWithOptions(session.Options{ +// // Options +// }) +// +// sess, err := session.NewSessionWithOptions(session.Options{ +// // Specify profile to load for the session's config +// Profile: "profile_name", +// +// // Provide SDK Config options, such as Region. +// Config: aws.Config{ +// Region: aws.String("us-west-2"), +// }, +// +// // Force enable Shared Config support +// SharedConfigState: session.SharedConfigEnable, +// }) +// +// # Adding Handlers +// +// You can add handlers to a session to decorate API operation, (e.g. adding HTTP +// headers). All clients that use the Session receive a copy of the Session's +// handlers. For example, the following request handler added to the Session logs +// every requests made. +// +// // Create a session, and add additional handlers for all service +// // clients created with the Session to inherit. Adds logging handler. +// sess := session.Must(session.NewSession()) +// +// sess.Handlers.Send.PushFront(func(r *request.Request) { +// // Log every request made and its payload +// logger.Printf("Request: %s/%s, Params: %s", +// r.ClientInfo.ServiceName, r.Operation, r.Params) +// }) +// +// # Shared Config Fields +// +// By default the SDK will only load the shared credentials file's +// (~/.aws/credentials) credentials values, and all other config is provided by +// the environment variables, SDK defaults, and user provided aws.Config values. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +// option is used to create the Session the full shared config values will be +// loaded. This includes credentials, region, and support for assume role. In +// addition the Session will load its configuration from both the shared config +// file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +// files have the same format. +// +// If both config files are present the configuration from both files will be +// read. The Session will be created from configuration values from the shared +// credentials file (~/.aws/credentials) over those in the shared config file +// (~/.aws/config). +// +// Credentials are the values the SDK uses to authenticating requests with AWS +// Services. When specified in a file, both aws_access_key_id and +// aws_secret_access_key must be provided together in the same file to be +// considered valid. They will be ignored if both are not present. +// aws_session_token is an optional field that can be provided in addition to the +// other two fields. +// +// aws_access_key_id = AKID +// aws_secret_access_key = SECRET +// aws_session_token = TOKEN +// +// ; region only supported if SharedConfigEnabled. +// region = us-east-1 +// +// # Assume Role configuration +// +// The role_arn field allows you to configure the SDK to assume an IAM role using +// a set of credentials from another source. Such as when paired with static +// credentials, "profile_source", "credential_process", or "credential_source" +// fields. If "role_arn" is provided, a source of credentials must also be +// specified, such as "source_profile", "credential_source", or +// "credential_process". +// +// role_arn = arn:aws:iam:::role/ +// source_profile = profile_with_creds +// external_id = 1234 +// mfa_serial = +// role_session_name = session_name +// +// The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +// must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +// to load if the AssumeRoleTokenProvider is not specified. +// +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// AssumeRoleTokenProvider: stscreds.StdinTokenProvider, +// })) +// +// To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider +// documentation. +// +// # Environment Variables +// +// When a Session is created several environment variables can be set to adjust +// how the SDK functions, and what configuration data it loads when creating +// Sessions. All environment values are optional, but some values like credentials +// require multiple of the values to set or the partial values will be ignored. +// All environment variable values are strings unless otherwise noted. +// +// Environment configuration values. If set both Access Key ID and Secret Access +// Key must be provided. Session Token and optionally also be provided, but is +// not required. +// +// # Access Key ID +// AWS_ACCESS_KEY_ID=AKID +// AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. +// +// # Secret Access Key +// AWS_SECRET_ACCESS_KEY=SECRET +// AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. +// +// # Session Token +// AWS_SESSION_TOKEN=TOKEN +// +// Region value will instruct the SDK where to make service API requests to. If is +// not provided in the environment the region must be provided before a service +// client request is made. +// +// AWS_REGION=us-east-1 +// +// # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, +// # and AWS_REGION is not also set. +// AWS_DEFAULT_REGION=us-east-1 +// +// Profile name the SDK should load use when loading shared config from the +// configuration files. If not provided "default" will be used as the profile name. +// +// AWS_PROFILE=my_profile +// +// # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, +// # and AWS_PROFILE is not also set. +// AWS_DEFAULT_PROFILE=my_profile +// +// SDK load config instructs the SDK to load the shared config in addition to +// shared credentials. This also expands the configuration loaded so the shared +// credentials will have parity with the shared config file. This also enables +// Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +// env values as well. +// +// AWS_SDK_LOAD_CONFIG=1 +// +// # Custom Shared Config and Credential Files +// +// Shared credentials file path can be set to instruct the SDK to use an alternative +// file for the shared credentials. If not set the file will be loaded from +// $HOME/.aws/credentials on Linux/Unix based systems, and +// %USERPROFILE%\.aws\credentials on Windows. +// +// AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials +// +// Shared config file path can be set to instruct the SDK to use an alternative +// file for the shared config. If not set the file will be loaded from +// $HOME/.aws/config on Linux/Unix based systems, and +// %USERPROFILE%\.aws\config on Windows. +// +// AWS_CONFIG_FILE=$HOME/my_shared_config +// +// # Custom CA Bundle +// +// Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +// will use instead of the default system's root CA bundle. Use this only +// if you want to replace the CA bundle the SDK uses for TLS requests. +// +// AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle +// +// Enabling this option will attempt to merge the Transport into the SDK's HTTP +// client. If the client's Transport is not a http.Transport an error will be +// returned. If the Transport's TLS config is set this option will cause the SDK +// to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +// contains multiple certificates all of them will be loaded. +// +// The Session option CustomCABundle is also available when creating sessions +// to also enable this feature. CustomCABundle session option field has priority +// over the AWS_CA_BUNDLE environment variable, and will be used if both are set. +// +// Setting a custom HTTPClient in the aws.Config options will override this setting. +// To use this option and custom HTTP client, the HTTP client needs to be provided +// when creating the session. Not the service client. +// +// # Custom Client TLS Certificate +// +// The SDK supports the environment and session option being configured with +// Client TLS certificates that are sent as a part of the client's TLS handshake +// for client authentication. If used, both Cert and Key values are required. If +// one is missing, or either fail to load the contents of the file an error will +// be returned. +// +// HTTP Client's Transport concrete implementation must be a http.Transport +// or creating the session will fail. +// +// AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key +// AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert +// +// This can also be configured via the session.Options ClientTLSCert and ClientTLSKey. +// +// sess, err := session.NewSessionWithOptions(session.Options{ +// ClientTLSCert: myCertFile, +// ClientTLSKey: myKeyFile, +// }) +// +// # Custom EC2 IMDS Endpoint +// +// The endpoint of the EC2 IMDS client can be configured via the environment +// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +// Session. See Options.EC2IMDSEndpoint for more details. +// +// AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 +// +// If using an URL with an IPv6 address literal, the IPv6 address +// component must be enclosed in square brackets. +// +// AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] +// +// The custom EC2 IMDS endpoint can also be specified via the Session options. +// +// sess, err := session.NewSessionWithOptions(session.Options{ +// EC2MetadataEndpoint: "http://[::1]", +// }) +// +// # FIPS and DualStack Endpoints +// +// The SDK can be configured to resolve an endpoint with certain capabilities such as FIPS and DualStack. +// +// You can configure a FIPS endpoint using an environment variable, shared config ($HOME/.aws/config), +// or programmatically. +// +// To configure a FIPS endpoint set the environment variable set the AWS_USE_FIPS_ENDPOINT to true or false to enable +// or disable FIPS endpoint resolution. +// +// AWS_USE_FIPS_ENDPOINT=true +// +// To configure a FIPS endpoint using shared config, set use_fips_endpoint to true or false to enable +// or disable FIPS endpoint resolution. +// +// [profile myprofile] +// region=us-west-2 +// use_fips_endpoint=true +// +// To configure a FIPS endpoint programmatically +// +// // Option 1: Configure it on a session for all clients +// sess, err := session.NewSessionWithOptions(session.Options{ +// UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, +// }) +// if err != nil { +// // handle error +// } +// +// client := s3.New(sess) +// +// // Option 2: Configure it per client +// sess, err := session.NewSession() +// if err != nil { +// // handle error +// } +// +// client := s3.New(sess, &aws.Config{ +// UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, +// }) +// +// You can configure a DualStack endpoint using an environment variable, shared config ($HOME/.aws/config), +// or programmatically. +// +// To configure a DualStack endpoint set the environment variable set the AWS_USE_DUALSTACK_ENDPOINT to true or false to +// enable or disable DualStack endpoint resolution. +// +// AWS_USE_DUALSTACK_ENDPOINT=true +// +// To configure a DualStack endpoint using shared config, set use_dualstack_endpoint to true or false to enable +// or disable DualStack endpoint resolution. +// +// [profile myprofile] +// region=us-west-2 +// use_dualstack_endpoint=true +// +// To configure a DualStack endpoint programmatically +// +// // Option 1: Configure it on a session for all clients +// sess, err := session.NewSessionWithOptions(session.Options{ +// UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, +// }) +// if err != nil { +// // handle error +// } +// +// client := s3.New(sess) +// +// // Option 2: Configure it per client +// sess, err := session.NewSession() +// if err != nil { +// // handle error +// } +// +// client := s3.New(sess, &aws.Config{ +// UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, +// }) +// +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index b542df93156..3370f03e813 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -52,6 +52,9 @@ // // Test `TestStandaloneSign` provides a complete example of using the signer // outside of the SDK and pre-escaping the URI path. +// +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package v4 import ( diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index e72db4de1e4..4116bc3f18d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.55.7" +const SDKVersion = "1.55.8" diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go index 15e61a32282..91049220b7f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go @@ -42,4 +42,7 @@ // See the AWS Single Sign-On client SSO for more // information on creating client for this service. // https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New +// +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package sso diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go index 083568c616f..1285e91e874 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go @@ -64,4 +64,7 @@ // See the AWS SSO OIDC client SSOOIDC for more // information on creating client for this service. // https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/#New +// +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index ea1d9eb0ccf..d1a66b56086 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -28,4 +28,7 @@ // See the AWS Security Token Service client STS for more // information on creating client for this service. // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +// +// Deprecated: aws-sdk-go is deprecated. Use aws-sdk-go-v2. +// See https://aws.amazon.com/blogs/developer/announcing-end-of-support-for-aws-sdk-for-go-v1-on-july-31-2025/. package sts diff --git a/vendor/github.com/basgys/goxml2json/.gitignore b/vendor/github.com/basgys/goxml2json/.gitignore deleted file mode 100644 index 6bfad54227c..00000000000 --- a/vendor/github.com/basgys/goxml2json/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -/.tags diff --git a/vendor/github.com/basgys/goxml2json/LICENSE b/vendor/github.com/basgys/goxml2json/LICENSE deleted file mode 100644 index dc5a2e3ebe6..00000000000 --- a/vendor/github.com/basgys/goxml2json/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Bastien Gysler - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/basgys/goxml2json/README.md b/vendor/github.com/basgys/goxml2json/README.md deleted file mode 100644 index 0abdfcda4a2..00000000000 --- a/vendor/github.com/basgys/goxml2json/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# goxml2json [![CircleCI](https://circleci.com/gh/basgys/goxml2json.svg?style=svg)](https://circleci.com/gh/basgys/goxml2json) - -Go package that converts XML to JSON - -### Install - - go get -u github.com/basgys/goxml2json - -### Importing - - import github.com/basgys/goxml2json - -### Usage - -**Code example** - -```go - package main - - import ( - "fmt" - "strings" - - xj "github.com/basgys/goxml2json" - ) - - func main() { - // xml is an io.Reader - xml := strings.NewReader(`world`) - json, err := xj.Convert(xml) - if err != nil { - panic("That's embarrassing...") - } - - fmt.Println(json.String()) - // {"hello": "world"} - } - -``` - -**Input** - -```xml - - - - bar - -``` - -**Output** - -```json - { - "osm": { - "-version": 0.6, - "-generator": "CGImap 0.0.2", - "bounds": { - "-minlat": "54.0889580", - "-minlon": "12.2487570", - "-maxlat": "54.0913900", - "-maxlon": "12.2524800" - }, - "foo": "bar" - } - } -``` - -**With type conversion** - -```go - package main - - import ( - "fmt" - "strings" - - xj "github.com/basgys/goxml2json" - ) - - func main() { - // xml is an io.Reader - xml := strings.NewReader(`19.95`) - json, err := xj.Convert(xml, xj.WithTypeConverter(xj.Float)) - if err != nil { - panic("That's embarrassing...") - } - - fmt.Println(json.String()) - // {"price": 19.95} - } -``` - -### Contributing -Feel free to contribute to this project if you want to fix/extend/improve it. - -### Contributors - - - [DirectX](https://github.com/directx) - - [powerslacker](https://github.com/powerslacker) - - [samuelhug](https://github.com/samuelhug) - -### TODO - - * Categorise errors - * Option to prettify the JSON output - * Benchmark diff --git a/vendor/github.com/basgys/goxml2json/converter.go b/vendor/github.com/basgys/goxml2json/converter.go deleted file mode 100644 index a1311ab8053..00000000000 --- a/vendor/github.com/basgys/goxml2json/converter.go +++ /dev/null @@ -1,26 +0,0 @@ -package xml2json - -import ( - "bytes" - "io" -) - -// Convert converts the given XML document to JSON -func Convert(r io.Reader, ps ...plugin) (*bytes.Buffer, error) { - // Decode XML document - root := &Node{} - err := NewDecoder(r, ps...).Decode(root) - if err != nil { - return nil, err - } - - // Then encode it in JSON - buf := new(bytes.Buffer) - e := NewEncoder(buf, ps...) - err = e.Encode(root) - if err != nil { - return nil, err - } - - return buf, nil -} diff --git a/vendor/github.com/basgys/goxml2json/decoder.go b/vendor/github.com/basgys/goxml2json/decoder.go deleted file mode 100644 index a45079f47c7..00000000000 --- a/vendor/github.com/basgys/goxml2json/decoder.go +++ /dev/null @@ -1,155 +0,0 @@ -package xml2json - -import ( - "encoding/xml" - "io" - "unicode" - - "golang.org/x/net/html/charset" -) - -const ( - attrPrefix = "-" - contentPrefix = "#" -) - -// A Decoder reads and decodes XML objects from an input stream. -type Decoder struct { - r io.Reader - err error - attributePrefix string - contentPrefix string - excludeAttrs map[string]bool - formatters []nodeFormatter -} - -type element struct { - parent *element - n *Node - label string -} - -func (dec *Decoder) SetAttributePrefix(prefix string) { - dec.attributePrefix = prefix -} - -func (dec *Decoder) SetContentPrefix(prefix string) { - dec.contentPrefix = prefix -} - -func (dec *Decoder) AddFormatters(formatters []nodeFormatter) { - dec.formatters = formatters -} - -func (dec *Decoder) ExcludeAttributes(attrs []string) { - for _, attr := range attrs { - dec.excludeAttrs[attr] = true - } -} - -func (dec *Decoder) DecodeWithCustomPrefixes(root *Node, contentPrefix string, attributePrefix string) error { - dec.contentPrefix = contentPrefix - dec.attributePrefix = attributePrefix - return dec.Decode(root) -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader, plugins ...plugin) *Decoder { - d := &Decoder{r: r, contentPrefix: contentPrefix, attributePrefix: attrPrefix, excludeAttrs: map[string]bool{}} - for _, p := range plugins { - d = p.AddToDecoder(d) - } - return d -} - -// Decode reads the next JSON-encoded value from its -// input and stores it in the value pointed to by v. -func (dec *Decoder) Decode(root *Node) error { - xmlDec := xml.NewDecoder(dec.r) - - // That will convert the charset if the provided XML is non-UTF-8 - xmlDec.CharsetReader = charset.NewReaderLabel - - // Create first element from the root node - elem := &element{ - parent: nil, - n: root, - } - - for { - t, _ := xmlDec.Token() - if t == nil { - break - } - - switch se := t.(type) { - case xml.StartElement: - // Build new a new current element and link it to its parent - elem = &element{ - parent: elem, - n: &Node{}, - label: se.Name.Local, - } - - // Extract attributes as children - for _, a := range se.Attr { - if _, ok := dec.excludeAttrs[a.Name.Local]; ok { - continue - } - elem.n.AddChild(dec.attributePrefix+a.Name.Local, &Node{Data: a.Value}) - } - case xml.CharData: - // Extract XML data (if any) - elem.n.Data = trimNonGraphic(string(xml.CharData(se))) - case xml.EndElement: - // And add it to its parent list - if elem.parent != nil { - elem.parent.n.AddChild(elem.label, elem.n) - } - - // Then change the current element to its parent - elem = elem.parent - } - } - - for _, formatter := range dec.formatters { - formatter.Format(root) - } - - return nil -} - -// trimNonGraphic returns a slice of the string s, with all leading and trailing -// non graphic characters and spaces removed. -// -// Graphic characters include letters, marks, numbers, punctuation, symbols, -// and spaces, from categories L, M, N, P, S, Zs. -// Spacing characters are set by category Z and property Pattern_White_Space. -func trimNonGraphic(s string) string { - if s == "" { - return s - } - - var first *int - var last int - for i, r := range []rune(s) { - if !unicode.IsGraphic(r) || unicode.IsSpace(r) { - continue - } - - if first == nil { - f := i // copy i - first = &f - last = i - } else { - last = i - } - } - - // If first is nil, it means there are no graphic characters - if first == nil { - return "" - } - - return string([]rune(s)[*first : last+1]) -} diff --git a/vendor/github.com/basgys/goxml2json/doc.go b/vendor/github.com/basgys/goxml2json/doc.go deleted file mode 100644 index 8a68bd30fb6..00000000000 --- a/vendor/github.com/basgys/goxml2json/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package xml2json is an XML to JSON converter -package xml2json diff --git a/vendor/github.com/basgys/goxml2json/encoder.go b/vendor/github.com/basgys/goxml2json/encoder.go deleted file mode 100644 index 61fafc57f35..00000000000 --- a/vendor/github.com/basgys/goxml2json/encoder.go +++ /dev/null @@ -1,191 +0,0 @@ -package xml2json - -import ( - "bytes" - "io" - "unicode/utf8" -) - -// An Encoder writes JSON objects to an output stream. -type Encoder struct { - w io.Writer - err error - contentPrefix string - attributePrefix string - tc encoderTypeConverter -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer, plugins ...plugin) *Encoder { - e := &Encoder{w: w, contentPrefix: contentPrefix, attributePrefix: attrPrefix} - for _, p := range plugins { - e = p.AddToEncoder(e) - } - return e -} - -// Encode writes the JSON encoding of v to the stream -func (enc *Encoder) Encode(root *Node) error { - if enc.err != nil { - return enc.err - } - if root == nil { - return nil - } - - enc.err = enc.format(root, 0) - - // Terminate each value with a newline. - // This makes the output look a little nicer - // when debugging, and some kind of space - // is required if the encoded value was a number, - // so that the reader knows there aren't more - // digits coming. - enc.write("\n") - - return enc.err -} - -func (enc *Encoder) format(n *Node, lvl int) error { - if n.IsComplex() { - enc.write("{") - - // Add data as an additional attibute (if any) - if len(n.Data) > 0 { - enc.write("\"") - enc.write(enc.contentPrefix) - enc.write("content") - enc.write("\": ") - enc.write(sanitiseString(n.Data)) - enc.write(", ") - } - - i := 0 - tot := len(n.Children) - for label, children := range n.Children { - enc.write("\"") - enc.write(label) - enc.write("\": ") - - if n.ChildrenAlwaysAsArray || len(children) > 1 { - // Array - enc.write("[") - for j, c := range children { - enc.format(c, lvl+1) - - if j < len(children)-1 { - enc.write(", ") - } - } - enc.write("]") - } else { - // Map - enc.format(children[0], lvl+1) - } - - if i < tot-1 { - enc.write(", ") - } - i++ - } - - enc.write("}") - } else { - s := sanitiseString(n.Data) - if enc.tc == nil { - // do nothing - } else { - s = enc.tc.Convert(s) - } - enc.write(s) - - } - - return nil -} - -func (enc *Encoder) write(s string) { - enc.w.Write([]byte(s)) -} - -// https://golang.org/src/encoding/json/encode.go?s=5584:5627#L788 -var hex = "0123456789abcdef" - -func sanitiseString(s string) string { - var buf bytes.Buffer - - buf.WriteByte('"') - - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.WriteString(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - case '\t': - buf.WriteByte('\\') - buf.WriteByte('t') - default: - // This encodes bytes < 0x20 except for \n and \r, - // as well as <, > and &. The latter are escaped because they - // can lead to security holes when user-controlled strings - // are rendered into JSON and served to some browsers. - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. - // U+2029 is PARAGRAPH SEPARATOR. - // They are both technically valid characters in JSON strings, - // but don't work in JSONP, which has to be evaluated as JavaScript, - // and can lead to security holes there. It is valid JSON to - // escape them, so we do so unconditionally. - // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.WriteString(s[start:]) - } - - buf.WriteByte('"') - - return buf.String() -} diff --git a/vendor/github.com/basgys/goxml2json/jstype.go b/vendor/github.com/basgys/goxml2json/jstype.go deleted file mode 100644 index c0c08cb5f0a..00000000000 --- a/vendor/github.com/basgys/goxml2json/jstype.go +++ /dev/null @@ -1,74 +0,0 @@ -package xml2json - -import ( - "strconv" - "strings" -) - -// https://cswr.github.io/JsonSchema/spec/basic_types/ -// JSType is a JavaScript extracted from a string -type JSType int - -const ( - Bool JSType = iota - Int - Float - String - Null -) - -// Str2JSType extract a JavaScript type from a string -func Str2JSType(s string) JSType { - var ( - output JSType - ) - s = strings.TrimSpace(s) // santize the given string - switch { - case isBool(s): - output = Bool - case isFloat(s): - output = Float - case isInt(s): - output = Int - case isNull(s): - output = Null - default: - output = String // if all alternatives have been eliminated, the input is a string - } - return output -} - -func isBool(s string) bool { - return s == "true" || s == "false" -} - -func isFloat(s string) bool { - var output = false - if strings.Contains(s, ".") { - _, err := strconv.ParseFloat(s, 64) - if err == nil { // the string successfully converts to a decimal - output = true - } - } - return output -} - -func isInt(s string) bool { - var output = false - if len(s) >= 1 { - _, err := strconv.Atoi(s) - if err == nil { // the string successfully converts to an int - if s != "0" && s[0] == '0' { - // if the first rune is '0' and there is more than 1 rune, then the input is most likely a float or intended to be - // a string value -- such as in the case of a guid, or an international phone number - } else { - output = true - } - } - } - return output -} - -func isNull(s string) bool { - return s == "null" -} diff --git a/vendor/github.com/basgys/goxml2json/plugins.go b/vendor/github.com/basgys/goxml2json/plugins.go deleted file mode 100644 index 60137f05db6..00000000000 --- a/vendor/github.com/basgys/goxml2json/plugins.go +++ /dev/null @@ -1,161 +0,0 @@ -package xml2json - -import ( - "strings" -) - -type ( - // an plugin is added to an encoder or/and to an decoder to allow custom functionality at runtime - plugin interface { - AddToEncoder(*Encoder) *Encoder - AddToDecoder(*Decoder) *Decoder - } - // a type converter overides the default string sanitization for encoding json - encoderTypeConverter interface { - Convert(string) string - } - // customTypeConverter converts strings to JSON types using a best guess approach, only parses the JSON types given - // when initialized via WithTypeConverter - customTypeConverter struct { - parseTypes []JSType - } - - attrPrefixer string - contentPrefixer string - - excluder []string - - nodesFormatter struct { - list []nodeFormatter - } - nodeFormatter struct { - path string - plugin nodePlugin - } - - nodePlugin interface { - AddTo(*Node) - } - - arrayFormatter struct{} -) - -// WithTypeConverter allows customized js type conversion behavior by passing in the desired JSTypes -func WithTypeConverter(ts ...JSType) *customTypeConverter { - return &customTypeConverter{parseTypes: ts} -} - -func (tc *customTypeConverter) parseAsString(t JSType) bool { - if t == String { - return true - } - for i := 0; i < len(tc.parseTypes); i++ { - if tc.parseTypes[i] == t { - return false - } - } - return true -} - -// Adds the type converter to the encoder -func (tc *customTypeConverter) AddToEncoder(e *Encoder) *Encoder { - e.tc = tc - return e -} - -func (tc *customTypeConverter) AddToDecoder(d *Decoder) *Decoder { - return d -} - -func (tc *customTypeConverter) Convert(s string) string { - // remove quotes if they exists - if strings.HasPrefix(s, `"`) && strings.HasSuffix(s, `"`) { - s = s[1 : len(s)-1] - } - jsType := Str2JSType(s) - if tc.parseAsString(jsType) { - // add the quotes removed at the start of this func - s = `"` + s + `"` - } - return s -} - -// WithAttrPrefix appends the given prefix to the json output of xml attribute fields to preserve namespaces -func WithAttrPrefix(prefix string) *attrPrefixer { - ap := attrPrefixer(prefix) - return &ap -} - -func (a *attrPrefixer) AddToEncoder(e *Encoder) *Encoder { - e.attributePrefix = string((*a)) - return e -} - -func (a *attrPrefixer) AddToDecoder(d *Decoder) *Decoder { - d.attributePrefix = string((*a)) - return d -} - -// WithContentPrefix appends the given prefix to the json output of xml content fields to preserve namespaces -func WithContentPrefix(prefix string) *contentPrefixer { - c := contentPrefixer(prefix) - return &c -} - -func (c *contentPrefixer) AddToEncoder(e *Encoder) *Encoder { - e.contentPrefix = string((*c)) - return e -} - -func (c *contentPrefixer) AddToDecoder(d *Decoder) *Decoder { - d.contentPrefix = string((*c)) - return d -} - -// ExcludeAttributes excludes some xml attributes, for example, xmlns:xsi, xsi:noNamespaceSchemaLocation -func ExcludeAttributes(attrs []string) *excluder { - ex := excluder(attrs) - return &ex -} - -func (ex *excluder) AddToEncoder(e *Encoder) *Encoder { - return e -} - -func (ex *excluder) AddToDecoder(d *Decoder) *Decoder { - d.ExcludeAttributes([]string((*ex))) - return d -} - -// WithNodes formats specific nodes -func WithNodes(n ...nodeFormatter) *nodesFormatter { - return &nodesFormatter{list: n} -} - -func (nf *nodesFormatter) AddToEncoder(e *Encoder) *Encoder { - return e -} - -func (nf *nodesFormatter) AddToDecoder(d *Decoder) *Decoder { - d.AddFormatters(nf.list) - return d -} - -func NodePlugin(path string, plugin nodePlugin) nodeFormatter { - return nodeFormatter{path: path, plugin: plugin} -} - -func (nf *nodeFormatter) Format(node *Node) { - child := node.GetChild(nf.path) - if child != nil { - nf.plugin.AddTo(child) - } -} - -func ToArray() *arrayFormatter { - return &arrayFormatter{} -} - -func (af *arrayFormatter) AddTo(n *Node) { - n.ChildrenAlwaysAsArray = true -} diff --git a/vendor/github.com/basgys/goxml2json/struct.go b/vendor/github.com/basgys/goxml2json/struct.go deleted file mode 100644 index 350e1ac724f..00000000000 --- a/vendor/github.com/basgys/goxml2json/struct.go +++ /dev/null @@ -1,47 +0,0 @@ -package xml2json - -import ( - "strings" -) - -// Node is a data element on a tree -type Node struct { - Children map[string]Nodes - Data string - ChildrenAlwaysAsArray bool -} - -// Nodes is a list of nodes -type Nodes []*Node - -// AddChild appends a node to the list of children -func (n *Node) AddChild(s string, c *Node) { - // Lazy lazy - if n.Children == nil { - n.Children = map[string]Nodes{} - } - - n.Children[s] = append(n.Children[s], c) -} - -// IsComplex returns whether it is a complex type (has children) -func (n *Node) IsComplex() bool { - return len(n.Children) > 0 -} - -// GetChild returns child by path if exists. Path looks like "grandparent.parent.child.grandchild" -func (n *Node) GetChild(path string) *Node { - result := n - names := strings.Split(path, ".") - for _, name := range names { - children, exists := result.Children[name] - if !exists { - return nil - } - if len(children) == 0 { - return nil - } - result = children[0] - } - return result -} diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index 41fd1e2aab8..83e0afbf492 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,26 @@ # Change Log +## [1.178.0] - 2026-03-16 + +- #969 - @d-honeybadger - add support for DOKS SSO toggles +- #968 - @Rachana888 - Add DI get GPUModelConfig +- #967 - @Rachana888 - Add DI sizes/pricing +- #966 - @Rachana888 - add DI token management operations (CreateToken, ListTokens, RevokeToken) +- #964 - @Rachana888 - Add List operation for Dedicated Inference Accelerators +- #963 - @Rachana888 - Add List operation for Dedicated Inference +- #962 - @Rachana888 - Add Dedicated Inference Delete operation +- #961 - @Rachana888 - Add Dedicated Inference Update operation +- #958 - @Rachana888 - Add Dedicated Inference Create and Get operations + +## [1.177.0] - 2026-03-11 + +- #959 - @blesswinsamuel - Add ListEvents, CancelEvent, and GetEventLogs APIs for App Platform +- #960 - @ZachEddy - apps: Add secure_header.remove_header to app spec definition + +## [1.176.0] - 2026-02-26 + +- #953 - @kamleshsahu - Add dbaas metrics client + ## [1.175.0] - 2026-02-12 - #952 - @bentranter - security: add cspm scanning functionality for public preview diff --git a/vendor/github.com/digitalocean/godo/apps.gen.go b/vendor/github.com/digitalocean/godo/apps.gen.go index 148028cdf41..bf18d3e2a72 100644 --- a/vendor/github.com/digitalocean/godo/apps.gen.go +++ b/vendor/github.com/digitalocean/godo/apps.gen.go @@ -382,6 +382,8 @@ type AppSecureHeaderSpec struct { Key string `json:"key,omitempty"` // The value of the header to set. Value string `json:"value,omitempty"` + // Remove the header from incoming requests before forwarding to the app. + RemoveHeader bool `json:"remove_header,omitempty"` } // AppInstance struct for AppInstance @@ -756,6 +758,14 @@ type AutoscalerActionScaleChange struct { To int64 `json:"to,omitempty"` } +// AutoscalingEventComponentScaleChange struct for AutoscalingEventComponentScaleChange +type AutoscalingEventComponentScaleChange struct { + From int64 `json:"from,omitempty"` + To int64 `json:"to,omitempty"` + // The metric that triggered the scale change while scaling up. Known values are "cpu", "requests_per_second", "request_duration". For inactivity sleep, "scale_from_zero" and "scale_to_zero" are used. + TriggeringMetric string `json:"triggering_metric,omitempty"` +} + // BitbucketSourceSpec struct for BitbucketSourceSpec type BitbucketSourceSpec struct { Repo string `json:"repo,omitempty"` @@ -1203,6 +1213,45 @@ type AppDomainValidation struct { TXTValue string `json:"txt_value,omitempty"` } +// Event struct for Event +type Event struct { + ID string `json:"id,omitempty"` + Type EventType `json:"type,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + DeploymentID string `json:"deployment_id,omitempty"` + Deployment *Deployment `json:"deployment,omitempty"` + Autoscaling *EventAutoscalingEvent `json:"autoscaling,omitempty"` +} + +// EventAutoscalingEvent struct for EventAutoscalingEvent +type EventAutoscalingEvent struct { + Phase EventAutoscalingEventPhase `json:"phase,omitempty"` + Components map[string]AutoscalingEventComponentScaleChange `json:"components,omitempty"` +} + +// EventAutoscalingEventPhase the model 'EventAutoscalingEventPhase' +type EventAutoscalingEventPhase string + +// List of EventAutoscalingEventPhase +const ( + EVENTAUTOSCALINGEVENTPHASE_Unknown EventAutoscalingEventPhase = "UNKNOWN" + EVENTAUTOSCALINGEVENTPHASE_Pending EventAutoscalingEventPhase = "PENDING" + EVENTAUTOSCALINGEVENTPHASE_InProgress EventAutoscalingEventPhase = "IN_PROGRESS" + EVENTAUTOSCALINGEVENTPHASE_Succeeded EventAutoscalingEventPhase = "SUCCEEDED" + EVENTAUTOSCALINGEVENTPHASE_Failed EventAutoscalingEventPhase = "FAILED" + EVENTAUTOSCALINGEVENTPHASE_Canceled EventAutoscalingEventPhase = "CANCELED" +) + +// EventType the model 'EventType' +type EventType string + +// List of EventType +const ( + EVENTTYPE_Unknown EventType = "UNKNOWN" + EVENTTYPE_Deployment EventType = "DEPLOYMENT" + EVENTTYPE_Autoscaling EventType = "AUTOSCALING" +) + // FunctionsComponentHealth struct for FunctionsComponentHealth type FunctionsComponentHealth struct { Name string `json:"name,omitempty"` diff --git a/vendor/github.com/digitalocean/godo/apps.go b/vendor/github.com/digitalocean/godo/apps.go index e1fa86d56d0..f35246155d0 100644 --- a/vendor/github.com/digitalocean/godo/apps.go +++ b/vendor/github.com/digitalocean/godo/apps.go @@ -24,6 +24,8 @@ const ( AppLogTypeRun AppLogType = "RUN" // AppLogTypeRunRestarted represents logs of crashed/restarted instances during runtime. AppLogTypeRunRestarted AppLogType = "RUN_RESTARTED" + // AppLogTypeAutoscaleEvent represents logs of an autoscaling event. + AppLogTypeAutoscaleEvent AppLogType = "AUTOSCALE_EVENT" ) // AppsService is an interface for interfacing with the App Platform endpoints @@ -81,6 +83,11 @@ type AppsService interface { GetJobInvocation(ctx context.Context, appID string, jobInvocationId string, opts *GetJobInvocationOptions) (*JobInvocation, *Response, error) GetJobInvocationLogs(ctx context.Context, appID, jobInvocationId string, opts *GetJobInvocationLogsOptions) (*AppLogs, *Response, error) CancelJobInvocation(ctx context.Context, appID, jobInvocationID string, opts *CancelJobInvocationOptions) (*JobInvocation, *Response, error) + + ListEvents(ctx context.Context, appID string, opts *ListEventsOptions) ([]*Event, *Response, error) + GetEvent(ctx context.Context, appID, eventID string) (*Event, *Response, error) + CancelEvent(ctx context.Context, appID, eventID string) (*Event, *Response, error) + GetEventLogs(ctx context.Context, appID, eventID string, opts *GetEventLogsOptions) (*AppLogs, *Response, error) } // AppLogs represent app logs. @@ -137,6 +144,22 @@ type CancelJobInvocationOptions struct { JobName string `url:"job_name,omitempty"` } +// ListEventsOptions specifies the optional parameters to the ListEvents method. +type ListEventsOptions struct { + Page int `url:"page,omitempty"` + PerPage int `url:"per_page,omitempty"` + // EventTypes filters events by type (e.g. DEPLOYMENT, AUTOSCALING). + EventTypes []string `url:"event_types,omitempty"` + // DeploymentTypes filters deployment events by deployment cause type. + DeploymentTypes []string `url:"deployment_types,omitempty"` +} + +// GetEventLogsOptions specifies the optional parameters to the GetEventLogs method. +type GetEventLogsOptions struct { + Follow bool + TailLines int +} + // DeploymentCreateRequest represents a request to create a deployment. type DeploymentCreateRequest struct { ForceBuild bool `json:"force_build"` @@ -199,6 +222,16 @@ type jobInvocationsRoot struct { Meta *Meta `json:"meta"` } +type eventRoot struct { + Event *Event `json:"event,omitempty"` +} + +type eventsRoot struct { + Events []*Event `json:"events"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + type appTierRoot struct { Tier *AppTier `json:"tier"` } @@ -546,6 +579,96 @@ func (s *AppsServiceOp) CancelJobInvocation(ctx context.Context, appID string, j return root.JobInvocation, resp, nil } +// ListEvents lists all events for a given app. +func (s *AppsServiceOp) ListEvents(ctx context.Context, appID string, opts *ListEventsOptions) ([]*Event, *Response, error) { + path := fmt.Sprintf("%s/%s/events", appsBasePath, appID) + + path, err := addOptions(path, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(eventsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + if l := root.Links; l != nil { + resp.Links = l + } + + if m := root.Meta; m != nil { + resp.Meta = m + } + return root.Events, resp, nil +} + +// GetEvent retrieves a single event for an app. +func (s *AppsServiceOp) GetEvent(ctx context.Context, appID, eventID string) (*Event, *Response, error) { + url := fmt.Sprintf("%s/%s/events/%s", appsBasePath, appID, eventID) + + req, err := s.client.NewRequest(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, nil, err + } + + root := new(eventRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Event, resp, nil +} + +// CancelEvent cancels an in-progress autoscaling event. +func (s *AppsServiceOp) CancelEvent(ctx context.Context, appID, eventID string) (*Event, *Response, error) { + url := fmt.Sprintf("%s/%s/events/%s/cancel", appsBasePath, appID, eventID) + + req, err := s.client.NewRequest(ctx, http.MethodPost, url, nil) + if err != nil { + return nil, nil, err + } + + root := new(eventRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Event, resp, nil +} + +// GetEventLogs retrieves logs for an autoscaling event. +func (s *AppsServiceOp) GetEventLogs(ctx context.Context, appID, eventID string, opts *GetEventLogsOptions) (*AppLogs, *Response, error) { + url := fmt.Sprintf("%s/%s/events/%s/logs?type=%s", appsBasePath, appID, eventID, AppLogTypeAutoscaleEvent) + + if opts != nil { + if opts.Follow { + url += fmt.Sprintf("&follow=%t", opts.Follow) + } + if opts.TailLines > 0 { + url += fmt.Sprintf("&tail_lines=%d", opts.TailLines) + } + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, nil, err + } + + logs := new(AppLogs) + resp, err := s.client.Do(ctx, req, logs) + if err != nil { + return nil, resp, err + } + return logs, resp, nil +} + // GetLogs retrieves app logs. func (s *AppsServiceOp) GetLogs(ctx context.Context, appID, deploymentID, component string, logType AppLogType, follow bool, tailLines int) (*AppLogs, *Response, error) { var url string diff --git a/vendor/github.com/digitalocean/godo/apps_accessors.go b/vendor/github.com/digitalocean/godo/apps_accessors.go index ec5b7a304f0..d105e8a9c53 100644 --- a/vendor/github.com/digitalocean/godo/apps_accessors.go +++ b/vendor/github.com/digitalocean/godo/apps_accessors.go @@ -2637,6 +2637,30 @@ func (a *AutoscalerActionScaleChange) GetTo() int64 { return a.To } +// GetFrom returns the From field. +func (a *AutoscalingEventComponentScaleChange) GetFrom() int64 { + if a == nil { + return 0 + } + return a.From +} + +// GetTo returns the To field. +func (a *AutoscalingEventComponentScaleChange) GetTo() int64 { + if a == nil { + return 0 + } + return a.To +} + +// GetTriggeringMetric returns the TriggeringMetric field. +func (a *AutoscalingEventComponentScaleChange) GetTriggeringMetric() string { + if a == nil { + return "" + } + return a.TriggeringMetric +} + // GetBranch returns the Branch field. func (b *BitbucketSourceSpec) GetBranch() string { if b == nil { @@ -3693,6 +3717,70 @@ func (d *DetectResponseServerlessPackage) GetName() string { return d.Name } +// GetAutoscaling returns the Autoscaling field. +func (e *Event) GetAutoscaling() *EventAutoscalingEvent { + if e == nil { + return nil + } + return e.Autoscaling +} + +// GetCreatedAt returns the CreatedAt field. +func (e *Event) GetCreatedAt() time.Time { + if e == nil { + return time.Time{} + } + return e.CreatedAt +} + +// GetDeployment returns the Deployment field. +func (e *Event) GetDeployment() *Deployment { + if e == nil { + return nil + } + return e.Deployment +} + +// GetDeploymentID returns the DeploymentID field. +func (e *Event) GetDeploymentID() string { + if e == nil { + return "" + } + return e.DeploymentID +} + +// GetID returns the ID field. +func (e *Event) GetID() string { + if e == nil { + return "" + } + return e.ID +} + +// GetType returns the Type field. +func (e *Event) GetType() EventType { + if e == nil { + return "" + } + return e.Type +} + +// GetComponents returns the Components field. +func (e *EventAutoscalingEvent) GetComponents() map[string]AutoscalingEventComponentScaleChange { + if e == nil { + return nil + } + return e.Components +} + +// GetPhase returns the Phase field. +func (e *EventAutoscalingEvent) GetPhase() EventAutoscalingEventPhase { + if e == nil { + return "" + } + return e.Phase +} + // GetFunctionsComponentHealthMetrics returns the FunctionsComponentHealthMetrics field. func (f *FunctionsComponentHealth) GetFunctionsComponentHealthMetrics() []*FunctionsComponentHealthMetrics { if f == nil { diff --git a/vendor/github.com/digitalocean/godo/dedicated_inference.go b/vendor/github.com/digitalocean/godo/dedicated_inference.go new file mode 100644 index 00000000000..20bacb31e00 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/dedicated_inference.go @@ -0,0 +1,490 @@ +package godo + +import ( + "context" + "fmt" + "net/http" + "time" +) + +const dedicatedInferenceBasePath = "/v2/dedicated-inferences" + +// DedicatedInferenceService is an interface for managing Dedicated Inference with the DigitalOcean API. +type DedicatedInferenceService interface { + Create(context.Context, *DedicatedInferenceCreateRequest) (*DedicatedInference, *DedicatedInferenceToken, *Response, error) + Get(context.Context, string) (*DedicatedInference, *Response, error) + List(context.Context, *DedicatedInferenceListOptions) ([]DedicatedInferenceListItem, *Response, error) + Delete(context.Context, string) (*Response, error) + Update(context.Context, string, *DedicatedInferenceUpdateRequest) (*DedicatedInference, *Response, error) + ListAccelerators(context.Context, string, *DedicatedInferenceListAcceleratorsOptions) ([]DedicatedInferenceAcceleratorInfo, *Response, error) + CreateToken(context.Context, string, *DedicatedInferenceTokenCreateRequest) (*DedicatedInferenceToken, *Response, error) + ListTokens(context.Context, string, *ListOptions) ([]DedicatedInferenceToken, *Response, error) + RevokeToken(context.Context, string, string) (*Response, error) + GetSizes(context.Context) (*DedicatedInferenceSizesResponse, *Response, error) + GetGPUModelConfig(context.Context) (*DedicatedInferenceGPUModelConfigResponse, *Response, error) +} + +// DedicatedInferenceServiceOp handles communication with Dedicated Inference methods of the DigitalOcean API. +type DedicatedInferenceServiceOp struct { + client *Client +} + +var _ DedicatedInferenceService = &DedicatedInferenceServiceOp{} + +// DedicatedInferenceCreateRequest represents a request to create a Dedicated Inference. +type DedicatedInferenceCreateRequest struct { + Spec *DedicatedInferenceSpecRequest `json:"spec"` + Secrets *DedicatedInferenceSecrets `json:"secrets,omitempty"` +} + +// DedicatedInferenceSpecRequest represents the deployment specification in a create/update request. +type DedicatedInferenceSpecRequest struct { + Version int `json:"version"` + Name string `json:"name"` + Region string `json:"region"` + EnablePublicEndpoint bool `json:"enable_public_endpoint"` + VPC *DedicatedInferenceVPCRequest `json:"vpc"` + ModelDeployments []*DedicatedInferenceModelRequest `json:"model_deployments"` +} + +// DedicatedInferenceVPCRequest represents the VPC configuration in a request. +type DedicatedInferenceVPCRequest struct { + UUID string `json:"uuid"` +} + +// DedicatedInferenceModelRequest represents a model deployment in a request. +type DedicatedInferenceModelRequest struct { + ModelID string `json:"model_id,omitempty"` + ModelSlug string `json:"model_slug"` + ModelProvider string `json:"model_provider"` + WorkloadConfig *DedicatedInferenceWorkloadConfig `json:"workload_config,omitempty"` + Accelerators []*DedicatedInferenceAcceleratorRequest `json:"accelerators"` +} + +// DedicatedInferenceWorkloadConfig represents workload-specific configuration. +type DedicatedInferenceWorkloadConfig struct{} + +// DedicatedInferenceAcceleratorRequest represents an accelerator in a request. +type DedicatedInferenceAcceleratorRequest struct { + AcceleratorSlug string `json:"accelerator_slug"` + Scale uint64 `json:"scale"` + Type string `json:"type"` +} + +// DedicatedInferenceSecrets represents secrets for external model providers. +type DedicatedInferenceSecrets struct { + HuggingFaceToken string `json:"hugging_face_token,omitempty"` +} + +// DedicatedInferenceListOptions specifies optional parameters for listing Dedicated Inferences. +type DedicatedInferenceListOptions struct { + Region string `url:"region,omitempty"` + Name string `url:"name,omitempty"` + ListOptions +} + +// DedicatedInferenceListAcceleratorsOptions specifies optional parameters for listing accelerators. +type DedicatedInferenceListAcceleratorsOptions struct { + Slug string `url:"slug,omitempty"` + ListOptions +} + +// DedicatedInferenceUpdateRequest represents a request to update a Dedicated Inference. +type DedicatedInferenceUpdateRequest struct { + Spec *DedicatedInferenceSpecRequest `json:"spec"` + Secrets *DedicatedInferenceSecrets `json:"secrets,omitempty"` +} + +// DedicatedInferenceTokenCreateRequest represents a request to create an auth token. +type DedicatedInferenceTokenCreateRequest struct { + Name string `json:"name"` +} + +// -- Response types (what the API returns) -- + +// DedicatedInferenceListItem represents a Dedicated Inference item in a list response. +type DedicatedInferenceListItem struct { + ID string `json:"id"` + Name string `json:"name"` + Region string `json:"region"` + Status string `json:"status"` + VPCUUID string `json:"vpc_uuid"` + Endpoints *DedicatedInferenceEndpoints `json:"endpoints,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` +} + +// DedicatedInferenceAcceleratorInfo represents an accelerator in a list accelerators response. +type DedicatedInferenceAcceleratorInfo struct { + ID string `json:"id"` + Name string `json:"name"` + Slug string `json:"slug"` + Status string `json:"status"` + CreatedAt time.Time `json:"created_at"` +} + +// DedicatedInference represents a Dedicated Inference resource returned by the API. +type DedicatedInference struct { + ID string `json:"id"` + Name string `json:"name"` + Region string `json:"region"` + Status string `json:"status"` + VPCUUID string `json:"vpc_uuid"` + Endpoints *DedicatedInferenceEndpoints `json:"endpoints,omitempty"` + DeploymentSpec *DedicatedInferenceDeployment `json:"spec,omitempty"` + PendingDeploymentSpec *DedicatedInferenceDeployment `json:"pending_deployment_spec,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` +} + +func (d DedicatedInference) String() string { + return Stringify(d) +} + +// DedicatedInferenceEndpoints represents the endpoints for a Dedicated Inference. +type DedicatedInferenceEndpoints struct { + PublicEndpointFQDN string `json:"public_endpoint_fqdn,omitempty"` + PrivateEndpointFQDN string `json:"private_endpoint_fqdn,omitempty"` +} + +// DedicatedInferenceDeployment represents a deployment spec in the API response. +type DedicatedInferenceDeployment struct { + Version uint64 `json:"version"` + ID string `json:"id"` + DedicatedInferenceID string `json:"dedicated_inference_id"` + State string `json:"state"` + EnablePublicEndpoint bool `json:"enable_public_endpoint"` + VPCConfig *DedicatedInferenceVPCConfig `json:"vpc_config,omitempty"` + ModelDeployments []*DedicatedInferenceModelDeployment `json:"model_deployments"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` +} + +// DedicatedInferenceVPCConfig represents the VPC config in an API response. +type DedicatedInferenceVPCConfig struct { + VPCUUID string `json:"vpc_uuid"` +} + +// DedicatedInferenceModelDeployment represents a model deployment in an API response. +type DedicatedInferenceModelDeployment struct { + ModelID string `json:"model_id"` + ModelSlug string `json:"model_slug"` + ModelProvider string `json:"model_provider"` + Accelerators []*DedicatedInferenceAccelerator `json:"accelerators"` +} + +// DedicatedInferenceAccelerator represents an accelerator in an API response. +type DedicatedInferenceAccelerator struct { + AcceleratorID string `json:"accelerator_id"` + AcceleratorSlug string `json:"accelerator_slug"` + State string `json:"state"` + Type string `json:"type"` + Scale uint64 `json:"scale"` +} + +// DedicatedInferenceToken represents an auth token returned on create. +type DedicatedInferenceToken struct { + ID string `json:"id"` + Name string `json:"name"` + Value string `json:"value,omitempty"` + CreatedAt time.Time `json:"created_at"` +} + +func (t DedicatedInferenceToken) String() string { + return Stringify(t) +} + +// DedicatedInferenceSizesResponse represents the response from GetSizes. +type DedicatedInferenceSizesResponse struct { + EnabledRegions []string `json:"enabled_regions"` + Sizes []*DedicatedInferenceSize `json:"sizes"` +} + +// DedicatedInferenceSize represents a GPU size with pricing information. +type DedicatedInferenceSize struct { + GPUSlug string `json:"gpu_slug"` + PricePerHour string `json:"price_per_hour"` + Regions []string `json:"regions"` + Currency string `json:"currency"` + CPU uint32 `json:"cpu"` + Memory uint32 `json:"memory"` + GPU *DedicatedInferenceSizeGPU `json:"gpu"` + SizeCategory *DedicatedInferenceSizeCategory `json:"size_category"` + Disks []*DedicatedInferenceSizeDisk `json:"disks"` +} + +// DedicatedInferenceSizeGPU represents GPU details in a size. +type DedicatedInferenceSizeGPU struct { + Count uint32 `json:"count"` + VramGb uint32 `json:"vram_gb"` + Slug string `json:"slug"` +} + +// DedicatedInferenceSizeCategory represents the category of a size. +type DedicatedInferenceSizeCategory struct { + Name string `json:"name"` + FleetName string `json:"fleet_name"` +} + +// DedicatedInferenceSizeDisk represents a disk in a size. +type DedicatedInferenceSizeDisk struct { + Type string `json:"type"` + SizeGb uint64 `json:"size_gb"` +} + +// DedicatedInferenceGPUModelConfigResponse represents the response from GetGPUModelConfig. +type DedicatedInferenceGPUModelConfigResponse struct { + GPUModelConfigs []*DedicatedInferenceGPUModelConfig `json:"gpu_model_configs"` +} + +// DedicatedInferenceGPUModelConfig represents a GPU model configuration. +type DedicatedInferenceGPUModelConfig struct { + GPUSlugs []string `json:"gpu_slugs"` + ModelSlug string `json:"model_slug"` + ModelName string `json:"model_name"` + IsModelGated bool `json:"is_model_gated"` +} + +// -- Root types for JSON deserialization -- + +type dedicatedInferenceRoot struct { + DedicatedInference *DedicatedInference `json:"dedicated_inference"` + Token *DedicatedInferenceToken `json:"token,omitempty"` +} + +type dedicatedInferencesRoot struct { + DedicatedInferences []DedicatedInferenceListItem `json:"dedicated_inferences"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +type dedicatedInferenceAcceleratorsRoot struct { + Accelerators []DedicatedInferenceAcceleratorInfo `json:"accelerators"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +type dedicatedInferenceTokenRoot struct { + Token *DedicatedInferenceToken `json:"token"` +} + +type dedicatedInferenceTokensRoot struct { + Tokens []DedicatedInferenceToken `json:"tokens"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +// -- Service methods -- + +// Create a new Dedicated Inference with the given configuration. +func (s *DedicatedInferenceServiceOp) Create(ctx context.Context, createRequest *DedicatedInferenceCreateRequest) (*DedicatedInference, *DedicatedInferenceToken, *Response, error) { + req, err := s.client.NewRequest(ctx, http.MethodPost, dedicatedInferenceBasePath, createRequest) + if err != nil { + return nil, nil, nil, err + } + + root := new(dedicatedInferenceRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, nil, resp, err + } + + return root.DedicatedInference, root.Token, resp, nil +} + +// Get an existing Dedicated Inference by its UUID. +func (s *DedicatedInferenceServiceOp) Get(ctx context.Context, id string) (*DedicatedInference, *Response, error) { + path := fmt.Sprintf("%s/%s", dedicatedInferenceBasePath, id) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dedicatedInferenceRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.DedicatedInference, resp, nil +} + +// Delete an existing Dedicated Inference by its UUID. +func (s *DedicatedInferenceServiceOp) Delete(ctx context.Context, id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", dedicatedInferenceBasePath, id) + + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// Update an existing Dedicated Inference. +func (s *DedicatedInferenceServiceOp) Update(ctx context.Context, id string, updateRequest *DedicatedInferenceUpdateRequest) (*DedicatedInference, *Response, error) { + path := fmt.Sprintf("%s/%s", dedicatedInferenceBasePath, id) + + req, err := s.client.NewRequest(ctx, http.MethodPatch, path, updateRequest) + if err != nil { + return nil, nil, err + } + + root := new(dedicatedInferenceRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.DedicatedInference, resp, nil +} + +// List all Dedicated Inferences. +func (s *DedicatedInferenceServiceOp) List(ctx context.Context, opt *DedicatedInferenceListOptions) ([]DedicatedInferenceListItem, *Response, error) { + path, err := addOptions(dedicatedInferenceBasePath, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dedicatedInferencesRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.DedicatedInferences, resp, nil +} + +// ListAccelerators lists accelerators for a Dedicated Inference. +func (s *DedicatedInferenceServiceOp) ListAccelerators(ctx context.Context, diID string, opt *DedicatedInferenceListAcceleratorsOptions) ([]DedicatedInferenceAcceleratorInfo, *Response, error) { + basePath := fmt.Sprintf("%s/%s/accelerators", dedicatedInferenceBasePath, diID) + path, err := addOptions(basePath, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dedicatedInferenceAcceleratorsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.Accelerators, resp, nil +} + +// CreateToken creates a new auth token for a Dedicated Inference. +func (s *DedicatedInferenceServiceOp) CreateToken(ctx context.Context, diID string, createRequest *DedicatedInferenceTokenCreateRequest) (*DedicatedInferenceToken, *Response, error) { + path := fmt.Sprintf("%s/%s/tokens", dedicatedInferenceBasePath, diID) + + req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(dedicatedInferenceTokenRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Token, resp, nil +} + +// ListTokens lists all auth tokens for a Dedicated Inference. +func (s *DedicatedInferenceServiceOp) ListTokens(ctx context.Context, diID string, opt *ListOptions) ([]DedicatedInferenceToken, *Response, error) { + basePath := fmt.Sprintf("%s/%s/tokens", dedicatedInferenceBasePath, diID) + path, err := addOptions(basePath, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dedicatedInferenceTokensRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.Tokens, resp, nil +} + +// RevokeToken revokes (deletes) an auth token for a Dedicated Inference. +func (s *DedicatedInferenceServiceOp) RevokeToken(ctx context.Context, diID string, tokenID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/tokens/%s", dedicatedInferenceBasePath, diID, tokenID) + + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// GetSizes returns available Dedicated Inference sizes and pricing. +func (s *DedicatedInferenceServiceOp) GetSizes(ctx context.Context) (*DedicatedInferenceSizesResponse, *Response, error) { + path := fmt.Sprintf("%s/sizes", dedicatedInferenceBasePath) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(DedicatedInferenceSizesResponse) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root, resp, nil +} + +// GetGPUModelConfig returns supported GPU model configurations. +func (s *DedicatedInferenceServiceOp) GetGPUModelConfig(ctx context.Context) (*DedicatedInferenceGPUModelConfigResponse, *Response, error) { + path := fmt.Sprintf("%s/gpu-model-config", dedicatedInferenceBasePath) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(DedicatedInferenceGPUModelConfigResponse) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root, resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index c3fbd9d313a..e318451daaa 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -21,7 +21,7 @@ import ( ) const ( - libraryVersion = "1.175.0" + libraryVersion = "1.178.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" @@ -100,6 +100,7 @@ type Client struct { VPCs VPCsService PartnerAttachment PartnerAttachmentService GradientAI GradientAIService + DedicatedInference DedicatedInferenceService BYOIPPrefixes BYOIPPrefixesService // Optional function called after every successful request made to the DO APIs onRequestCompleted RequestCompletionCallback @@ -331,6 +332,7 @@ func NewClient(httpClient *http.Client) *Client { c.VPCs = &VPCsServiceOp{client: c} c.PartnerAttachment = &PartnerAttachmentServiceOp{client: c} c.GradientAI = &GradientAIServiceOp{client: c} + c.DedicatedInference = &DedicatedInferenceServiceOp{client: c} c.headers = make(map[string]string) diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go index f68291d5838..2b6c5f3c6f6 100644 --- a/vendor/github.com/digitalocean/godo/kubernetes.go +++ b/vendor/github.com/digitalocean/godo/kubernetes.go @@ -91,6 +91,7 @@ type KubernetesClusterCreateRequest struct { AmdGpuDeviceMetricsExporterPlugin *KubernetesAmdGpuDeviceMetricsExporterPlugin `json:"amd_gpu_device_metrics_exporter_plugin,omitempty"` NvidiaGpuDevicePlugin *KubernetesNvidiaGpuDevicePlugin `json:"nvidia_gpu_device_plugin,omitempty"` RdmaSharedDevicePlugin *KubernetesRdmaSharedDevicePlugin `json:"rdma_shared_dev_plugin,omitempty"` + SSO *KubernetesClusterSSO `json:"sso,omitempty"` } // KubernetesClusterUpdateRequest represents a request to update a Kubernetes cluster. @@ -107,6 +108,7 @@ type KubernetesClusterUpdateRequest struct { AmdGpuDeviceMetricsExporterPlugin *KubernetesAmdGpuDeviceMetricsExporterPlugin `json:"amd_gpu_device_metrics_exporter_plugin,omitempty"` NvidiaGpuDevicePlugin *KubernetesNvidiaGpuDevicePlugin `json:"nvidia_gpu_device_plugin,omitempty"` RdmaSharedDevicePlugin *KubernetesRdmaSharedDevicePlugin `json:"rdma_shared_dev_plugin,omitempty"` + SSO *KubernetesClusterSSO `json:"sso,omitempty"` // Convert cluster to run highly available control plane HA *bool `json:"ha,omitempty"` @@ -244,6 +246,7 @@ type KubernetesCluster struct { AmdGpuDeviceMetricsExporterPlugin *KubernetesAmdGpuDeviceMetricsExporterPlugin `json:"amd_gpu_device_metrics_exporter_plugin,omitempty"` NvidiaGpuDevicePlugin *KubernetesNvidiaGpuDevicePlugin `json:"nvidia_gpu_device_plugin,omitempty"` RdmaSharedDevicePlugin *KubernetesRdmaSharedDevicePlugin `json:"rdma_shared_dev_plugin,omitempty"` + SSO *KubernetesClusterSSO `json:"sso,omitempty"` Status *KubernetesClusterStatus `json:"status,omitempty"` CreatedAt time.Time `json:"created_at,omitempty"` @@ -321,6 +324,14 @@ type KubernetesRdmaSharedDevicePlugin struct { Enabled *bool `json:"enabled"` } +// KubernetesClusterSSO configures Single Sign-On (SSO) for a Kubernetes cluster. +// Identity Provider (IDP) settings for SSO are set up on the team level, +// whereas on a per-cluster level, users can enable or require SSO for the cluster. +type KubernetesClusterSSO struct { + Enabled *bool `json:"enabled,omitempty"` + Required *bool `json:"required,omitempty"` +} + // KubernetesMaintenancePolicyDay represents the possible days of a maintenance // window type KubernetesMaintenancePolicyDay int diff --git a/vendor/github.com/digitalocean/godo/monitoring.go b/vendor/github.com/digitalocean/godo/monitoring.go index 00feb256599..e4784fecf9c 100644 --- a/vendor/github.com/digitalocean/godo/monitoring.go +++ b/vendor/github.com/digitalocean/godo/monitoring.go @@ -14,6 +14,7 @@ const ( alertPolicyBasePath = monitoringBasePath + "/alerts" dropletMetricsBasePath = monitoringBasePath + "/metrics/droplet" loadBalancerMetricsBasePath = monitoringBasePath + "/metrics/load_balancer" + dbaasMysqlMetricsBasePath = monitoringBasePath + "/metrics/database/mysql" DropletCPUUtilizationPercent = "v1/insights/droplet/cpu" DropletMemoryUtilizationPercent = "v1/insights/droplet/memory_utilization_percent" @@ -96,6 +97,21 @@ type MonitoringService interface { GetLoadBalancerDropletsConnections(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) GetLoadBalancerDropletsHealthChecks(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) GetLoadBalancerDropletsDowntime(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + + // DBaaS MySQL metrics (host-level: db_id only) + GetDbaasMysqlCpuUsage(ctx context.Context, args *DbaasMysqlCpuUsageRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlLoad(ctx context.Context, args *DbaasMysqlLoadRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlMemoryUsage(ctx context.Context, args *DbaasMysqlMemoryUsageRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlDiskUsage(ctx context.Context, args *DbaasMysqlDiskUsageRequest) (*MetricsResponse, *Response, error) + // DBaaS MySQL metrics (service-level: db_id + service) + GetDbaasMysqlThreadsConnected(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlThreadsCreatedRate(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlThreadsActive(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlIndexVsSequentialReads(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlOpRates(ctx context.Context, args *DbaasMysqlOpRatesRequest) (*MetricsResponse, *Response, error) + // DBaaS MySQL metrics (schema-level: db_id + service + schema) + GetDbaasMysqlSchemaThroughput(ctx context.Context, args *DbaasMysqlSchemaThroughputRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlSchemaLatency(ctx context.Context, args *DbaasMysqlSchemaLatencyRequest) (*MetricsResponse, *Response, error) } // MonitoringServiceOp handles communication with monitoring related methods of the @@ -199,6 +215,73 @@ type LoadBalancerMetricsRequest struct { End time.Time } +// DbaasMysqlMetricsRequest holds the information needed to retrieve DBaaS MySQL host-level metrics (db_id only). +type DbaasMysqlMetricsRequest struct { + DBID string + Start time.Time + End time.Time +} + +// DbaasMysqlCpuUsageRequest holds the information needed to retrieve MySQL cluster CPU usage (percent). Aggregate: avg, max, min. +type DbaasMysqlCpuUsageRequest struct { + DbaasMysqlMetricsRequest + Aggregate string // avg, max, min +} + +// DbaasMysqlLoadRequest holds the information needed to retrieve MySQL cluster load average. Metric: load1, load5, load15. Aggregate: avg, max. +type DbaasMysqlLoadRequest struct { + DbaasMysqlMetricsRequest + Metric string // load1, load5, load15 + Aggregate string // avg, max +} + +// DbaasMysqlMemoryUsageRequest holds the information needed to retrieve MySQL cluster memory usage (percent). Aggregate: avg, max, min. +type DbaasMysqlMemoryUsageRequest struct { + DbaasMysqlMetricsRequest + Aggregate string // avg, max, min +} + +// DbaasMysqlDiskUsageRequest holds the information needed to retrieve MySQL cluster disk usage (percent). Aggregate: avg, max, min. +type DbaasMysqlDiskUsageRequest struct { + DbaasMysqlMetricsRequest + Aggregate string // avg, max, min +} + +// DbaasMysqlServiceMetricsRequest holds the information needed to retrieve DBaaS MySQL service-level metrics (db_id + service). +type DbaasMysqlServiceMetricsRequest struct { + DBID string + Service string + Start time.Time + End time.Time +} + +// DbaasMysqlOpRatesRequest holds the information needed to retrieve MySQL service operations rate. Metric: select, insert, update, delete. +type DbaasMysqlOpRatesRequest struct { + DbaasMysqlServiceMetricsRequest + Metric string // select, insert, update, delete +} + +// DbaasMysqlSchemaMetricsRequest holds the information needed to retrieve DBaaS MySQL schema-level metrics (db_id + service + schema). +type DbaasMysqlSchemaMetricsRequest struct { + DBID string + Service string + Schema string + Start time.Time + End time.Time +} + +// DbaasMysqlSchemaThroughputRequest holds the information needed to retrieve MySQL schema table I/O throughput (rows/s). Metric: insert, fetch, update, delete. +type DbaasMysqlSchemaThroughputRequest struct { + DbaasMysqlSchemaMetricsRequest + Metric string // insert, fetch, update, delete +} + +// DbaasMysqlSchemaLatencyRequest holds the information needed to retrieve MySQL schema table I/O latency (seconds). Metric: insert, fetch, update, delete. +type DbaasMysqlSchemaLatencyRequest struct { + DbaasMysqlSchemaMetricsRequest + Metric string // insert, fetch, update, delete +} + // MetricsResponse holds a Metrics query response. type MetricsResponse struct { Status string `json:"status"` @@ -562,3 +645,147 @@ func (s *MonitoringServiceOp) getLoadBalancerMetrics(ctx context.Context, path s return root, resp, err } + +// getDbaasMysqlMetrics performs a GET request for a DBaaS MySQL metric path with the given query params. +func (s *MonitoringServiceOp) getDbaasMysqlMetrics(ctx context.Context, path string, params map[string]string) (*MetricsResponse, *Response, error) { + fullPath := dbaasMysqlMetricsBasePath + path + req, err := s.client.NewRequest(ctx, http.MethodGet, fullPath, nil) + if err != nil { + return nil, nil, err + } + q := req.URL.Query() + for k, v := range params { + q.Add(k, v) + } + req.URL.RawQuery = q.Encode() + root := new(MetricsResponse) + resp, err := s.client.Do(ctx, req, root) + return root, resp, err +} + +// GetDbaasMysqlCpuUsage retrieves CPU usage (percent) for a MySQL cluster. Aggregate: avg, max, min. +func (s *MonitoringServiceOp) GetDbaasMysqlCpuUsage(ctx context.Context, args *DbaasMysqlCpuUsageRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + "aggregate": args.Aggregate, + } + return s.getDbaasMysqlMetrics(ctx, "/cpu_usage", params) +} + +// GetDbaasMysqlLoad retrieves load average for a MySQL cluster. Metric: load1, load5, load15. Aggregate: avg, max. +func (s *MonitoringServiceOp) GetDbaasMysqlLoad(ctx context.Context, args *DbaasMysqlLoadRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + "metric": args.Metric, + "aggregate": args.Aggregate, + } + return s.getDbaasMysqlMetrics(ctx, "/load", params) +} + +// GetDbaasMysqlMemoryUsage retrieves memory usage (percent) for a MySQL cluster. Aggregate: avg, max, min. +func (s *MonitoringServiceOp) GetDbaasMysqlMemoryUsage(ctx context.Context, args *DbaasMysqlMemoryUsageRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + "aggregate": args.Aggregate, + } + return s.getDbaasMysqlMetrics(ctx, "/memory_usage", params) +} + +// GetDbaasMysqlDiskUsage retrieves disk usage (percent) for a MySQL cluster. Aggregate: avg, max, min. +func (s *MonitoringServiceOp) GetDbaasMysqlDiskUsage(ctx context.Context, args *DbaasMysqlDiskUsageRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + "aggregate": args.Aggregate, + } + return s.getDbaasMysqlMetrics(ctx, "/disk_usage", params) +} + +// GetDbaasMysqlThreadsConnected retrieves current threads connected for a MySQL service. +func (s *MonitoringServiceOp) GetDbaasMysqlThreadsConnected(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/threads_connected", params) +} + +// GetDbaasMysqlThreadsCreatedRate retrieves threads created rate (per second) for a MySQL service. +func (s *MonitoringServiceOp) GetDbaasMysqlThreadsCreatedRate(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/threads_created_rate", params) +} + +// GetDbaasMysqlThreadsActive retrieves active (running) threads for a MySQL service. +func (s *MonitoringServiceOp) GetDbaasMysqlThreadsActive(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/threads_active", params) +} + +// GetDbaasMysqlIndexVsSequentialReads retrieves index vs sequential reads ratio (percent) for a MySQL service. +func (s *MonitoringServiceOp) GetDbaasMysqlIndexVsSequentialReads(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/index_vs_sequential_reads", params) +} + +// GetDbaasMysqlOpRates retrieves operations rate (select, insert, update, delete per second) for a MySQL service. +func (s *MonitoringServiceOp) GetDbaasMysqlOpRates(ctx context.Context, args *DbaasMysqlOpRatesRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "metric": args.Metric, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/op_rates", params) +} + +// GetDbaasMysqlSchemaThroughput retrieves table I/O throughput (rows/s) for a MySQL schema. Metric: insert, fetch, update, delete. +func (s *MonitoringServiceOp) GetDbaasMysqlSchemaThroughput(ctx context.Context, args *DbaasMysqlSchemaThroughputRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "schema": args.Schema, + "metric": args.Metric, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/schema_throughput", params) +} + +// GetDbaasMysqlSchemaLatency retrieves table I/O latency (seconds) for a MySQL schema. Metric: insert, fetch, update, delete. +func (s *MonitoringServiceOp) GetDbaasMysqlSchemaLatency(ctx context.Context, args *DbaasMysqlSchemaLatencyRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "schema": args.Schema, + "metric": args.Metric, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/schema_latency", params) +} diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_unix.go b/vendor/github.com/edsrzf/mmap-go/mmap_unix.go index 25b13e51fdf..0f956faff92 100644 --- a/vendor/github.com/edsrzf/mmap-go/mmap_unix.go +++ b/vendor/github.com/edsrzf/mmap-go/mmap_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux openbsd solaris netbsd +// +build darwin dragonfly freebsd linux openbsd solaris netbsd aix package mmap diff --git a/vendor/github.com/go-openapi/analysis/.cliff.toml b/vendor/github.com/go-openapi/analysis/.cliff.toml deleted file mode 100644 index 702629f5dc3..00000000000 --- a/vendor/github.com/go-openapi/analysis/.cliff.toml +++ /dev/null @@ -1,181 +0,0 @@ -# git-cliff ~ configuration file -# https://git-cliff.org/docs/configuration - -[changelog] -header = """ -""" - -footer = """ - ------ - -**[{{ remote.github.repo }}]({{ self::remote_url() }}) license terms** - -[![License][license-badge]][license-url] - -[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg -[license-url]: {{ self::remote_url() }}/?tab=Apache-2.0-1-ov-file#readme - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" - -body = """ -{%- if version %} -## [{{ version | trim_start_matches(pat="v") }}]({{ self::remote_url() }}/tree/{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} -{%- else %} -## [unreleased] -{%- endif %} -{%- if message %} - {%- raw %}\n{% endraw %} -{{ message }} - {%- raw %}\n{% endraw %} -{%- endif %} -{%- if version %} - {%- if previous.version %} - -**Full Changelog**: <{{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}> - {%- endif %} -{%- else %} - {%- raw %}\n{% endraw %} -{%- endif %} - -{%- if statistics %}{% if statistics.commit_count %} - {%- raw %}\n{% endraw %} -{{ statistics.commit_count }} commits in this release. - {%- raw %}\n{% endraw %} -{%- endif %}{% endif %} ------ - -{%- for group, commits in commits | group_by(attribute="group") %} - {%- raw %}\n{% endraw %} -### {{ group | upper_first }} - {%- raw %}\n{% endraw %} - {%- for commit in commits %} - {%- if commit.remote.pr_title %} - {%- set commit_message = commit.remote.pr_title %} - {%- else %} - {%- set commit_message = commit.message %} - {%- endif %} -* {{ commit_message | split(pat="\n") | first | trim }} - {%- if commit.remote.username %} -{%- raw %} {% endraw %}by [@{{ commit.remote.username }}](https://github.com/{{ commit.remote.username }}) - {%- endif %} - {%- if commit.remote.pr_number %} -{%- raw %} {% endraw %}in [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }}) - {%- endif %} -{%- raw %} {% endraw %}[...]({{ self::remote_url() }}/commit/{{ commit.id }}) - {%- endfor %} -{%- endfor %} - -{%- if github %} -{%- raw %}\n{% endraw -%} - {%- set all_contributors = github.contributors | length %} - {%- if github.contributors | filter(attribute="username", value="dependabot[bot]") | length < all_contributors %} ------ - -### People who contributed to this release - {% endif %} - {%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) - {%- endif %} - {%- endfor %} - - {% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} ------ - {%- raw %}\n{% endraw %} - -### New Contributors - {%- endif %} - - {%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* @{{ contributor.username }} made their first contribution - {%- if contributor.pr_number %} - in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ - {%- endif %} - {%- endif %} - {%- endfor %} -{%- endif %} - -{%- raw %}\n{% endraw %} - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" -# Remove leading and trailing whitespaces from the changelog's body. -trim = true -# Render body even when there are no releases to process. -render_always = true -# An array of regex based postprocessors to modify the changelog. -postprocessors = [ - # Replace the placeholder with a URL. - #{ pattern = '', replace = "https://github.com/orhun/git-cliff" }, -] -# output file path -# output = "test.md" - -[git] -# Parse commits according to the conventional commits specification. -# See https://www.conventionalcommits.org -conventional_commits = false -# Exclude commits that do not match the conventional commits specification. -filter_unconventional = false -# Require all commits to be conventional. -# Takes precedence over filter_unconventional. -require_conventional = false -# Split commits on newlines, treating each line as an individual commit. -split_commits = false -# An array of regex based parsers to modify commit messages prior to further processing. -commit_preprocessors = [ - # Replace issue numbers with link templates to be updated in `changelog.postprocessors`. - #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, - # Check spelling of the commit message using https://github.com/crate-ci/typos. - # If the spelling is incorrect, it will be fixed automatically. - #{ pattern = '.*', replace_command = 'typos --write-changes -' } -] -# Prevent commits that are breaking from being excluded by commit parsers. -protect_breaking_commits = false -# An array of regex based parsers for extracting data from the commit message. -# Assigns commits to groups. -# Optionally sets the commit's scope and can decide to exclude commits from further processing. -commit_parsers = [ - { message = "^[Cc]hore\\([Rr]elease\\): prepare for", skip = true }, - { message = "(^[Mm]erge)|([Mm]erge conflict)", skip = true }, - { field = "author.name", pattern = "dependabot*", group = "Updates" }, - { message = "([Ss]ecurity)|([Vv]uln)", group = "Security" }, - { body = "(.*[Ss]ecurity)|([Vv]uln)", group = "Security" }, - { message = "([Cc]hore\\(lint\\))|(style)|(lint)|(codeql)|(golangci)", group = "Code quality" }, - { message = "(^[Dd]oc)|((?i)readme)|(badge)|(typo)|(documentation)", group = "Documentation" }, - { message = "(^[Ff]eat)|(^[Ee]nhancement)", group = "Implemented enhancements" }, - { message = "(^ci)|(\\(ci\\))|(fixup\\s+ci)|(fix\\s+ci)|(license)|(example)", group = "Miscellaneous tasks" }, - { message = "^test", group = "Testing" }, - { message = "(^fix)|(panic)", group = "Fixed bugs" }, - { message = "(^refact)|(rework)", group = "Refactor" }, - { message = "(^[Pp]erf)|(performance)", group = "Performance" }, - { message = "(^[Cc]hore)", group = "Miscellaneous tasks" }, - { message = "^[Rr]evert", group = "Reverted changes" }, - { message = "(upgrade.*?go)|(go\\s+version)", group = "Updates" }, - { message = ".*", group = "Other" }, -] -# Exclude commits that are not matched by any commit parser. -filter_commits = false -# An array of link parsers for extracting external references, and turning them into URLs, using regex. -link_parsers = [] -# Include only the tags that belong to the current branch. -use_branch_tags = false -# Order releases topologically instead of chronologically. -topo_order = false -# Order releases topologically instead of chronologically. -topo_order_commits = true -# Order of commits in each group/release within the changelog. -# Allowed values: newest, oldest -sort_commits = "newest" -# Process submodules commits -recurse_submodules = false - -#[remote.github] -#owner = "go-openapi" diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore index 87c3bd3e66e..885dc27ab0b 100644 --- a/vendor/github.com/go-openapi/analysis/.gitignore +++ b/vendor/github.com/go-openapi/analysis/.gitignore @@ -1,5 +1,6 @@ -secrets.yml -coverage.out -coverage.txt +*.out *.cov .idea +.env +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml index 05808d52a68..02edc1b9fa9 100644 --- a/vendor/github.com/go-openapi/analysis/.golangci.yml +++ b/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -5,6 +5,7 @@ linters: - depguard - funlen - godox + - gomoddirectives - exhaustruct - nlreturn - nonamedreturns @@ -12,7 +13,7 @@ linters: - paralleltest - recvcheck - testpackage - - thelper # investigate how to parameterize / fix. Temporarily disabled. + - thelper - tparallel - varnamelen - whitespace diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md index 9322b065e37..bac878f216a 100644 --- a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/CONTRIBUTORS.md b/vendor/github.com/go-openapi/analysis/CONTRIBUTORS.md index 03d3defe3be..cf8fcaa7dc1 100644 --- a/vendor/github.com/go-openapi/analysis/CONTRIBUTORS.md +++ b/vendor/github.com/go-openapi/analysis/CONTRIBUTORS.md @@ -4,23 +4,24 @@ | Total Contributors | Total Contributions | | --- | --- | -| 14 | 192 | +| 15 | 202 | | Username | All Time Contribution Count | All Commits | | --- | --- | --- | -| @fredbi | 90 | https://github.com/go-openapi/analysis/commits?author=fredbi | -| @casualjim | 70 | https://github.com/go-openapi/analysis/commits?author=casualjim | -| @keramix | 9 | https://github.com/go-openapi/analysis/commits?author=keramix | -| @youyuanwu | 8 | https://github.com/go-openapi/analysis/commits?author=youyuanwu | -| @msample | 3 | https://github.com/go-openapi/analysis/commits?author=msample | -| @kul-amr | 3 | https://github.com/go-openapi/analysis/commits?author=kul-amr | -| @mbohlool | 2 | https://github.com/go-openapi/analysis/commits?author=mbohlool | -| @danielfbm | 1 | https://github.com/go-openapi/analysis/commits?author=danielfbm | -| @gregmarr | 1 | https://github.com/go-openapi/analysis/commits?author=gregmarr | -| @guillemj | 1 | https://github.com/go-openapi/analysis/commits?author=guillemj | -| @knweiss | 1 | https://github.com/go-openapi/analysis/commits?author=knweiss | -| @tklauser | 1 | https://github.com/go-openapi/analysis/commits?author=tklauser | -| @cuishuang | 1 | https://github.com/go-openapi/analysis/commits?author=cuishuang | -| @ujjwalsh | 1 | https://github.com/go-openapi/analysis/commits?author=ujjwalsh | +| @fredbi | 99 | | +| @casualjim | 70 | | +| @keramix | 9 | | +| @youyuanwu | 8 | | +| @msample | 3 | | +| @kul-amr | 3 | | +| @mbohlool | 2 | | +| @Copilot | 1 | | +| @danielfbm | 1 | | +| @gregmarr | 1 | | +| @guillemj | 1 | | +| @knweiss | 1 | | +| @tklauser | 1 | | +| @cuishuang | 1 | | +| @ujjwalsh | 1 | | _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md index 69e2f4b1df4..96821d3e437 100644 --- a/vendor/github.com/go-openapi/analysis/README.md +++ b/vendor/github.com/go-openapi/analysis/README.md @@ -8,12 +8,21 @@ [![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] -[![GoDoc][godoc-badge]][godoc-url] [![Slack Channel][slack-logo]![slack-badge]][slack-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] --- A foundational library to analyze an OAI specification document for easier reasoning about the content. +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] ## Status @@ -25,7 +34,7 @@ API is stable. go get github.com/go-openapi/analysis ``` -## What's inside? +## What's inside * An analyzer providing methods to walk the functional content of a specification * A spec flattener producing a self-contained document bundle, while preserving `$ref`s @@ -35,6 +44,7 @@ go get github.com/go-openapi/analysis ## FAQ * Does this library support OpenAPI 3? + > No. > This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). > There is no plan to make it evolve toward supporting OpenAPI 3.x. @@ -45,7 +55,9 @@ go get github.com/go-openapi/analysis See ## Licensing @@ -53,12 +65,14 @@ See This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). ## Other documentation @@ -89,21 +103,20 @@ Maintainers can cut a new release by either: [release-badge]: https://badge.fury.io/gh/go-openapi%2Fanalysis.svg [release-url]: https://badge.fury.io/gh/go-openapi%2Fanalysis -[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fanalysis.svg -[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fanalysis [gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/analysis [gocard-url]: https://goreportcard.com/report/github.com/go-openapi/analysis [codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/analysis [codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/analysis -[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F -[doc-url]: https://goswagger.io/go-openapi [godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/analysis [godoc-url]: http://pkg.go.dev/github.com/go-openapi/analysis [slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png [slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM [slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + [license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg [license-url]: https://github.com/go-openapi/analysis/?tab=Apache-2.0-1-ov-file#readme diff --git a/vendor/github.com/go-openapi/analysis/SECURITY.md b/vendor/github.com/go-openapi/analysis/SECURITY.md index f60adcd9e24..6ceb159ca22 100644 --- a/vendor/github.com/go-openapi/analysis/SECURITY.md +++ b/vendor/github.com/go-openapi/analysis/SECURITY.md @@ -6,14 +6,32 @@ This policy outlines the commitment and practices of the go-openapi maintainers | Version | Supported | | ------- | ------------------ | -| 0.24.x | :white_check_mark: | +| 0.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. ## Reporting a vulnerability If you become aware of a security vulnerability that affects the current repository, -please report it privately to the maintainers. +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. -Please follow the instructions provided by github to -[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". -TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go index af50a6fdfdb..1c91b8c5507 100644 --- a/vendor/github.com/go-openapi/analysis/analyzer.go +++ b/vendor/github.com/go-openapi/analysis/analyzer.go @@ -306,7 +306,7 @@ func fieldNameFromParam(param *spec.Parameter) string { // whenever an error is encountered while resolving references // on parameters. // -// This function takes as input the spec.Parameter which triggered the +// This function takes as input the [spec.Parameter] which triggered the // error and the error itself. // // If the callback function returns false, the calling function should bail. @@ -329,7 +329,7 @@ func (s *Spec) ParametersFor(operationID string) []spec.Parameter { // Does not assume parameters properly resolve references or that // such references actually resolve to a parameter object. // -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// Upon error, invoke a [ErrorOnParamFunc] callback with the erroneous // parameters. If the callback is set to nil, panics upon errors. func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { @@ -337,7 +337,7 @@ func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamF s.paramsAsMap(pi.Parameters, bag, callmeOnError) s.paramsAsMap(op.Parameters, bag, callmeOnError) - var res []spec.Parameter + res := make([]spec.Parameter, 0, len(bag)) for _, v := range bag { res = append(res, v) } @@ -388,7 +388,7 @@ func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { // Does not assume parameters properly resolve references or that // such references actually resolve to a parameter object. // -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// Upon error, invoke a [ErrorOnParamFunc] callback with the erroneous // parameters. If the callback is set to nil, panics upon errors. func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { res := make(map[string]spec.Parameter) @@ -516,7 +516,7 @@ func (s *Spec) AllDefinitions() (result []SchemaRef) { return } -// AllDefinitionReferences returns json refs for all the discovered schemas. +// AllDefinitionReferences returns JSON references for all the discovered schemas. func (s *Spec) AllDefinitionReferences() (result []string) { for _, v := range s.references.schemas { result = append(result, v.String()) @@ -525,7 +525,7 @@ func (s *Spec) AllDefinitionReferences() (result []string) { return } -// AllParameterReferences returns json refs for all the discovered parameters. +// AllParameterReferences returns JSON references for all the discovered parameters. func (s *Spec) AllParameterReferences() (result []string) { for _, v := range s.references.parameters { result = append(result, v.String()) @@ -534,7 +534,7 @@ func (s *Spec) AllParameterReferences() (result []string) { return } -// AllResponseReferences returns json refs for all the discovered responses. +// AllResponseReferences returns JSON references for all the discovered responses. func (s *Spec) AllResponseReferences() (result []string) { for _, v := range s.references.responses { result = append(result, v.String()) diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go index 9d41371a9f0..9c4b165c6f5 100644 --- a/vendor/github.com/go-openapi/analysis/doc.go +++ b/vendor/github.com/go-openapi/analysis/doc.go @@ -1,32 +1,31 @@ // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 -/* -Package analysis provides methods to work with a Swagger specification document from -package go-openapi/spec. - -## Analyzing a specification - -An analysed specification object (type Spec) provides methods to work with swagger definition. - -## Flattening or expanding a specification - -Flattening a specification bundles all remote $ref in the main spec document. -Depending on flattening options, additional preprocessing may take place: - - full flattening: replacing all inline complex constructs by a named entry in #/definitions - - expand: replace all $ref's in the document by their expanded content - -## Merging several specifications - -Mixin several specifications merges all Swagger constructs, and warns about found conflicts. - -## Fixing a specification - -Unmarshalling a specification with golang json unmarshalling may lead to -some unwanted result on present but empty fields. - -## Analyzing a Swagger schema - -Swagger schemas are analyzed to determine their complexity and qualify their content. -*/ +// Package analysis provides methods to work with a Swagger specification document from +// package go-openapi/spec. +// +// # Analyzing a specification +// +// An analysed specification object (type Spec) provides methods to work with swagger definition. +// +// # Flattening or expanding a specification +// +// Flattening a specification bundles all remote $ref in the main spec document. +// Depending on flattening options, additional preprocessing may take place: +// +// - full flattening: replacing all inline complex constructs by a named entry in #/definitions +// - expand: replace all $ref's in the document by their expanded content +// +// # Merging several specifications +// +// [Mixin] several specifications merges all Swagger constructs, and warns about found conflicts. +// +// # Fixing a specification +// +// Unmarshalling a specification with golang [json] unmarshalling may lead to +// some unwanted result on present but empty fields. +// +// # Analyzing a Swagger schema +// +// Swagger schemas are analyzed to determine their complexity and qualify their content. package analysis diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go index e225b71a2b1..d7ee0064b6f 100644 --- a/vendor/github.com/go-openapi/analysis/flatten.go +++ b/vendor/github.com/go-openapi/analysis/flatten.go @@ -52,13 +52,15 @@ func newContext() *context { // There is a minimal and a full flattening mode. // // Minimally flattening a spec means: +// // - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left // unscathed) -// - Importing external (http, file) references so they become internal to the document +// - Importing external ([http], file) references so they become internal to the document // - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers // like "$ref": "#/definitions/myObject/allOfs/1") // // A minimally flattened spec thus guarantees the following properties: +// // - all $refs point to a local definition (i.e. '#/definitions/...') // - definitions are unique // @@ -70,6 +72,7 @@ func newContext() *context { // Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. // // Fully flattening a spec means: +// // - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. // // By complex, we mean every JSON object with some properties. @@ -80,6 +83,7 @@ func newContext() *context { // have been created. // // Available flattening options: +// // - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched // - Expand: expand all $ref's in the document (inoperant if Minimal set to true) // - Verbose: croaks about name conflicts detected @@ -87,8 +91,9 @@ func newContext() *context { // // NOTE: expansion removes all $ref save circular $ref, which remain in place // -// TODO: additional options -// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a +// Desirable future additions: additional options. +// +// - PropagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a // x-go-name extension // - LiftAllOfs: // - limit the flattening of allOf members when simple objects @@ -491,14 +496,25 @@ func stripPointersAndOAIGen(opts *FlattenOpts) error { // pointer and name resolution again. func stripOAIGen(opts *FlattenOpts) (bool, error) { debugLog("stripOAIGen") + // Ensure the spec analysis is fresh, as previous steps (namePointers, etc.) might have modified refs. + opts.Spec.reload() + replacedWithComplex := false // figure out referers of OAIGen definitions (doing it before the ref start mutating) - for _, r := range opts.flattenContext.newRefs { + // Sort keys to ensure deterministic processing order + sortedKeys := make([]string, 0, len(opts.flattenContext.newRefs)) + for k := range opts.flattenContext.newRefs { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + for _, k := range sortedKeys { + r := opts.flattenContext.newRefs[k] updateRefParents(opts.Spec.references.allRefs, r) } - for k := range opts.flattenContext.newRefs { + for _, k := range sortedKeys { r := opts.flattenContext.newRefs[k] debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) @@ -580,6 +596,19 @@ func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) { replacedWithComplex = true } } + + // update parents of the target ref (pr[0]) if it is also a newRef (OAIGen) + // This ensures that if the target is later deleted/merged, it knows about these new referers. + for _, nr := range opts.flattenContext.newRefs { + if nr.path == pr[0] && nr.isOAIGen && !nr.resolved { + for _, p := range pr[1:] { + if !slices.Contains(nr.parents, p) { + nr.parents = append(nr.parents, p) + } + } + break + } + } } // remove OAIGen definition @@ -587,7 +616,15 @@ func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) { delete(opts.Swagger().Definitions, path.Base(r.path)) // propagate changes in ref index for keys which have this one as a parent - for kk, value := range opts.flattenContext.newRefs { + // Sort keys to ensure deterministic update order + propagateKeys := make([]string, 0, len(opts.flattenContext.newRefs)) + for k := range opts.flattenContext.newRefs { + propagateKeys = append(propagateKeys, k) + } + sort.Strings(propagateKeys) + + for _, kk := range propagateKeys { + value := opts.flattenContext.newRefs[kk] if kk == k || !value.isOAIGen || value.resolved { continue } diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go index a9e54a85ac6..23a57ea1aca 100644 --- a/vendor/github.com/go-openapi/analysis/flatten_options.go +++ b/vendor/github.com/go-openapi/analysis/flatten_options.go @@ -35,7 +35,7 @@ type FlattenOpts struct { _ struct{} // require keys } -// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. +// ExpandOpts creates a spec.[spec.ExpandOptions] to configure expanding a specification document. func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *spec.ExpandOptions { return &spec.ExpandOptions{ RelativeBase: f.BasePath, diff --git a/vendor/github.com/go-openapi/analysis/go.work b/vendor/github.com/go-openapi/analysis/go.work new file mode 100644 index 00000000000..1794cfc9790 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/go.work @@ -0,0 +1,6 @@ +go 1.24.0 + +use ( + . + ./internal/testintegration +) diff --git a/vendor/github.com/go-openapi/analysis/go.work.sum b/vendor/github.com/go-openapi/analysis/go.work.sum new file mode 100644 index 00000000000..b767fb61716 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/go.work.sum @@ -0,0 +1,29 @@ +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30 h1:BHT1/DKsYDGkUgQ2jmMaozVcdk+sVfz0+1ZJq4zkWgw= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= +go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= +golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go index c04b6b4e74d..afeef20ea67 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go @@ -17,8 +17,9 @@ import ( // NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). // // NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences. +// +// - refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// - "/ in paths may appear as escape sequences. func RebaseRef(baseRef string, ref string) string { baseRef, _ = url.PathUnescape(baseRef) ref, _ = url.PathUnescape(ref) @@ -69,8 +70,9 @@ func RebaseRef(baseRef string, ref string) string { // Path renders absolute path on remote file refs // // NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences. +// +// - refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// - "/ in paths may appear as escape sequences. func Path(ref spec.Ref, basePath string) string { uri, _ := url.PathUnescape(ref.String()) if ref.HasFragmentOnly || filepath.IsAbs(uri) { diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go index e8365f3333d..b4c0fdd44a1 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go @@ -328,7 +328,7 @@ func UpdateRefWithSchema(sp *spec.Swagger, key string, sch *spec.Schema) error { return nil } -// DeepestRefResult holds the results from DeepestRef analysis. +// DeepestRefResult holds the results from [DeepestRef] analysis. type DeepestRefResult struct { Ref spec.Ref Schema *spec.Schema @@ -336,12 +336,13 @@ type DeepestRefResult struct { } // DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. +// // - if no definition is found, returns the deepest ref. // - pointers to external files are expanded // // NOTE: all external $ref's are assumed to be already expanded at this stage. // -//nolint:gocognit,cyclop,gocyclo // this is the most complex method in this package and we'll have to break it down some day +//nolint:gocognit,gocyclo,cyclop // definitely needs a refactoring, in a follow-up PR func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) { if !ref.HasFragmentOnly { // we found an external $ref, which is odd at this stage: diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go index 7ab8e4ea573..363bb19efaf 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go @@ -56,7 +56,7 @@ func (k Keys) Less(i, j int) bool { return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) } -// KeyParts construct a SplitKey with all its /-separated segments decomposed. It is sortable. +// KeyParts construct a [SplitKey] with all its /-separated segments decomposed. It is sortable. func KeyParts(key string) SplitKey { var res []string for part := range strings.SplitSeq(key[1:], "/") { diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go index cc5c392334b..a7a9306cb37 100644 --- a/vendor/github.com/go-openapi/analysis/mixin.go +++ b/vendor/github.com/go-openapi/analysis/mixin.go @@ -18,12 +18,13 @@ import ( // needed. // // The following parts of primary are subject to merge, filling empty details +// // - Info // - BasePath // - Host // - ExternalDocs // -// Consider calling FixEmptyResponseDescriptions() on the modified primary +// Consider calling [FixEmptyResponseDescriptions]() on the modified primary // if you read them from storage and they are valid to start with. // // Entries in "paths", "definitions", "parameters" and "responses" are @@ -39,7 +40,7 @@ import ( // etc). Ensure they are canonical if your downstream tools do // key normalization of any form. // -// Merging schemes (http, https), and consumers/producers do not account for +// Merging schemes ([http], https), and consumers/producers do not account for // collisions. func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { skipped := make([]string, 0, len(mixins)) diff --git a/vendor/github.com/go-openapi/errors/.cliff.toml b/vendor/github.com/go-openapi/errors/.cliff.toml deleted file mode 100644 index 702629f5dc3..00000000000 --- a/vendor/github.com/go-openapi/errors/.cliff.toml +++ /dev/null @@ -1,181 +0,0 @@ -# git-cliff ~ configuration file -# https://git-cliff.org/docs/configuration - -[changelog] -header = """ -""" - -footer = """ - ------ - -**[{{ remote.github.repo }}]({{ self::remote_url() }}) license terms** - -[![License][license-badge]][license-url] - -[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg -[license-url]: {{ self::remote_url() }}/?tab=Apache-2.0-1-ov-file#readme - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" - -body = """ -{%- if version %} -## [{{ version | trim_start_matches(pat="v") }}]({{ self::remote_url() }}/tree/{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} -{%- else %} -## [unreleased] -{%- endif %} -{%- if message %} - {%- raw %}\n{% endraw %} -{{ message }} - {%- raw %}\n{% endraw %} -{%- endif %} -{%- if version %} - {%- if previous.version %} - -**Full Changelog**: <{{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}> - {%- endif %} -{%- else %} - {%- raw %}\n{% endraw %} -{%- endif %} - -{%- if statistics %}{% if statistics.commit_count %} - {%- raw %}\n{% endraw %} -{{ statistics.commit_count }} commits in this release. - {%- raw %}\n{% endraw %} -{%- endif %}{% endif %} ------ - -{%- for group, commits in commits | group_by(attribute="group") %} - {%- raw %}\n{% endraw %} -### {{ group | upper_first }} - {%- raw %}\n{% endraw %} - {%- for commit in commits %} - {%- if commit.remote.pr_title %} - {%- set commit_message = commit.remote.pr_title %} - {%- else %} - {%- set commit_message = commit.message %} - {%- endif %} -* {{ commit_message | split(pat="\n") | first | trim }} - {%- if commit.remote.username %} -{%- raw %} {% endraw %}by [@{{ commit.remote.username }}](https://github.com/{{ commit.remote.username }}) - {%- endif %} - {%- if commit.remote.pr_number %} -{%- raw %} {% endraw %}in [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }}) - {%- endif %} -{%- raw %} {% endraw %}[...]({{ self::remote_url() }}/commit/{{ commit.id }}) - {%- endfor %} -{%- endfor %} - -{%- if github %} -{%- raw %}\n{% endraw -%} - {%- set all_contributors = github.contributors | length %} - {%- if github.contributors | filter(attribute="username", value="dependabot[bot]") | length < all_contributors %} ------ - -### People who contributed to this release - {% endif %} - {%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) - {%- endif %} - {%- endfor %} - - {% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} ------ - {%- raw %}\n{% endraw %} - -### New Contributors - {%- endif %} - - {%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* @{{ contributor.username }} made their first contribution - {%- if contributor.pr_number %} - in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ - {%- endif %} - {%- endif %} - {%- endfor %} -{%- endif %} - -{%- raw %}\n{% endraw %} - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" -# Remove leading and trailing whitespaces from the changelog's body. -trim = true -# Render body even when there are no releases to process. -render_always = true -# An array of regex based postprocessors to modify the changelog. -postprocessors = [ - # Replace the placeholder with a URL. - #{ pattern = '', replace = "https://github.com/orhun/git-cliff" }, -] -# output file path -# output = "test.md" - -[git] -# Parse commits according to the conventional commits specification. -# See https://www.conventionalcommits.org -conventional_commits = false -# Exclude commits that do not match the conventional commits specification. -filter_unconventional = false -# Require all commits to be conventional. -# Takes precedence over filter_unconventional. -require_conventional = false -# Split commits on newlines, treating each line as an individual commit. -split_commits = false -# An array of regex based parsers to modify commit messages prior to further processing. -commit_preprocessors = [ - # Replace issue numbers with link templates to be updated in `changelog.postprocessors`. - #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, - # Check spelling of the commit message using https://github.com/crate-ci/typos. - # If the spelling is incorrect, it will be fixed automatically. - #{ pattern = '.*', replace_command = 'typos --write-changes -' } -] -# Prevent commits that are breaking from being excluded by commit parsers. -protect_breaking_commits = false -# An array of regex based parsers for extracting data from the commit message. -# Assigns commits to groups. -# Optionally sets the commit's scope and can decide to exclude commits from further processing. -commit_parsers = [ - { message = "^[Cc]hore\\([Rr]elease\\): prepare for", skip = true }, - { message = "(^[Mm]erge)|([Mm]erge conflict)", skip = true }, - { field = "author.name", pattern = "dependabot*", group = "Updates" }, - { message = "([Ss]ecurity)|([Vv]uln)", group = "Security" }, - { body = "(.*[Ss]ecurity)|([Vv]uln)", group = "Security" }, - { message = "([Cc]hore\\(lint\\))|(style)|(lint)|(codeql)|(golangci)", group = "Code quality" }, - { message = "(^[Dd]oc)|((?i)readme)|(badge)|(typo)|(documentation)", group = "Documentation" }, - { message = "(^[Ff]eat)|(^[Ee]nhancement)", group = "Implemented enhancements" }, - { message = "(^ci)|(\\(ci\\))|(fixup\\s+ci)|(fix\\s+ci)|(license)|(example)", group = "Miscellaneous tasks" }, - { message = "^test", group = "Testing" }, - { message = "(^fix)|(panic)", group = "Fixed bugs" }, - { message = "(^refact)|(rework)", group = "Refactor" }, - { message = "(^[Pp]erf)|(performance)", group = "Performance" }, - { message = "(^[Cc]hore)", group = "Miscellaneous tasks" }, - { message = "^[Rr]evert", group = "Reverted changes" }, - { message = "(upgrade.*?go)|(go\\s+version)", group = "Updates" }, - { message = ".*", group = "Other" }, -] -# Exclude commits that are not matched by any commit parser. -filter_commits = false -# An array of link parsers for extracting external references, and turning them into URLs, using regex. -link_parsers = [] -# Include only the tags that belong to the current branch. -use_branch_tags = false -# Order releases topologically instead of chronologically. -topo_order = false -# Order releases topologically instead of chronologically. -topo_order_commits = true -# Order of commits in each group/release within the changelog. -# Allowed values: newest, oldest -sort_commits = "newest" -# Process submodules commits -recurse_submodules = false - -#[remote.github] -#owner = "go-openapi" diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore index 9a8da7e5064..9364443a6f0 100644 --- a/vendor/github.com/go-openapi/errors/.gitignore +++ b/vendor/github.com/go-openapi/errors/.gitignore @@ -1,3 +1,7 @@ -secrets.yml *.out +*.cov +.idea +.env +.mcp.json +.claude/ settings.local.json diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml index fdae591bce7..e2c14be86d6 100644 --- a/vendor/github.com/go-openapi/errors/.golangci.yml +++ b/vendor/github.com/go-openapi/errors/.golangci.yml @@ -12,6 +12,7 @@ linters: - paralleltest - recvcheck - testpackage + - thelper - tparallel - varnamelen - whitespace @@ -40,6 +41,10 @@ linters: - common-false-positives - legacy - std-error-handling + rules: + - linters: + - revive + text: "avoid package names that conflict with Go standard library package names" paths: - third_party$ - builtin$ diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md index 9322b065e37..bac878f216a 100644 --- a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/CONTRIBUTORS.md b/vendor/github.com/go-openapi/errors/CONTRIBUTORS.md index eb018f8aaff..d49e377a135 100644 --- a/vendor/github.com/go-openapi/errors/CONTRIBUTORS.md +++ b/vendor/github.com/go-openapi/errors/CONTRIBUTORS.md @@ -4,21 +4,22 @@ | Total Contributors | Total Contributions | | --- | --- | -| 12 | 105 | +| 13 | 110 | | Username | All Time Contribution Count | All Commits | | --- | --- | --- | -| @casualjim | 58 | https://github.com/go-openapi/errors/commits?author=casualjim | -| @fredbi | 32 | https://github.com/go-openapi/errors/commits?author=fredbi | -| @youyuanwu | 5 | https://github.com/go-openapi/errors/commits?author=youyuanwu | -| @alexandear | 2 | https://github.com/go-openapi/errors/commits?author=alexandear | -| @fiorix | 1 | https://github.com/go-openapi/errors/commits?author=fiorix | -| @ligustah | 1 | https://github.com/go-openapi/errors/commits?author=ligustah | -| @artemseleznev | 1 | https://github.com/go-openapi/errors/commits?author=artemseleznev | -| @gautierdelorme | 1 | https://github.com/go-openapi/errors/commits?author=gautierdelorme | -| @guillemj | 1 | https://github.com/go-openapi/errors/commits?author=guillemj | -| @maxatome | 1 | https://github.com/go-openapi/errors/commits?author=maxatome | -| @Simon-Li | 1 | https://github.com/go-openapi/errors/commits?author=Simon-Li | -| @ujjwalsh | 1 | https://github.com/go-openapi/errors/commits?author=ujjwalsh | +| @casualjim | 58 | | +| @fredbi | 36 | | +| @youyuanwu | 5 | | +| @alexandear | 2 | | +| @fiorix | 1 | | +| @ligustah | 1 | | +| @artemseleznev | 1 | | +| @gautierdelorme | 1 | | +| @guillemj | 1 | | +| @maxatome | 1 | | +| @Simon-Li | 1 | | +| @aokumasan | 1 | | +| @ujjwalsh | 1 | | _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md index 6102c6b5277..d9f4a3f1514 100644 --- a/vendor/github.com/go-openapi/errors/README.md +++ b/vendor/github.com/go-openapi/errors/README.md @@ -51,7 +51,9 @@ errNotImplemented := NotImplemented("method: %s", url) See ## Licensing @@ -59,12 +61,9 @@ See This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). - ## Other documentation @@ -95,23 +94,19 @@ Maintainers can cut a new release by either: [release-badge]: https://badge.fury.io/gh/go-openapi%2Ferrors.svg [release-url]: https://badge.fury.io/gh/go-openapi%2Ferrors -[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Ferrors.svg -[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Ferrors [gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/errors [gocard-url]: https://goreportcard.com/report/github.com/go-openapi/errors [codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/errors [codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/errors -[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F -[doc-url]: https://goswagger.io/go-openapi [godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/errors [godoc-url]: http://pkg.go.dev/github.com/go-openapi/errors [slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png [slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM [slack-url]: https://goswagger.slack.com/archives/C04R30YMU [discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue -[discord-url]: https://discord.gg/DrafRmZx +[discord-url]: https://discord.gg/twZ9BwT3 [license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg diff --git a/vendor/github.com/go-openapi/errors/SECURITY.md b/vendor/github.com/go-openapi/errors/SECURITY.md index 2a7b6f0910d..6ceb159ca22 100644 --- a/vendor/github.com/go-openapi/errors/SECURITY.md +++ b/vendor/github.com/go-openapi/errors/SECURITY.md @@ -6,14 +6,32 @@ This policy outlines the commitment and practices of the go-openapi maintainers | Version | Supported | | ------- | ------------------ | -| 0.22.x | :white_check_mark: | +| 0.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. ## Reporting a vulnerability If you become aware of a security vulnerability that affects the current repository, -please report it privately to the maintainers. +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. -Please follow the instructions provided by github to -[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". -TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go index cb139416af8..d2b4427aac5 100644 --- a/vendor/github.com/go-openapi/errors/api.go +++ b/vendor/github.com/go-openapi/errors/api.go @@ -146,7 +146,7 @@ func MethodNotAllowed(requested string, allow []string) Error { } } -// ServeError implements the http error handler interface. +// ServeError implements the [http] error handler interface. func ServeError(rw http.ResponseWriter, r *http.Request, err error) { rw.Header().Set("Content-Type", "application/json") diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go index b4627f30f4c..208c740590e 100644 --- a/vendor/github.com/go-openapi/errors/doc.go +++ b/vendor/github.com/go-openapi/errors/doc.go @@ -1,15 +1,13 @@ // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 -/* -Package errors provides an Error interface and several concrete types -implementing this interface to manage API errors and JSON-schema validation -errors. - -A middleware handler ServeError() is provided to serve the errors types -it defines. - -It is used throughout the various go-openapi toolkit libraries -(https://github.com/go-openapi). -*/ +// Package errors provides an Error interface and several concrete types +// implementing this interface to manage API errors and JSON-schema validation +// errors. +// +// A middleware handler [ServeError]() is provided to serve the errors types +// it defines. +// +// It is used throughout the various go-openapi toolkit libraries. +// (https://github.com/go-openapi). package errors diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore index 59cd2948915..885dc27ab0b 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.gitignore +++ b/vendor/github.com/go-openapi/jsonpointer/.gitignore @@ -2,3 +2,5 @@ *.cov .idea .env +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml index fdae591bce7..dc7c96053de 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -12,6 +12,7 @@ linters: - paralleltest - recvcheck - testpackage + - thelper - tparallel - varnamelen - whitespace diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md index 9322b065e37..bac878f216a 100644 --- a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md b/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md index 03c098316d4..2ebebedc150 100644 --- a/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md +++ b/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md @@ -4,21 +4,21 @@ | Total Contributors | Total Contributions | | --- | --- | -| 12 | 95 | +| 12 | 101 | | Username | All Time Contribution Count | All Commits | | --- | --- | --- | -| @fredbi | 48 | https://github.com/go-openapi/jsonpointer/commits?author=fredbi | -| @casualjim | 33 | https://github.com/go-openapi/jsonpointer/commits?author=casualjim | -| @magodo | 3 | https://github.com/go-openapi/jsonpointer/commits?author=magodo | -| @youyuanwu | 3 | https://github.com/go-openapi/jsonpointer/commits?author=youyuanwu | -| @gaiaz-iusipov | 1 | https://github.com/go-openapi/jsonpointer/commits?author=gaiaz-iusipov | -| @gbjk | 1 | https://github.com/go-openapi/jsonpointer/commits?author=gbjk | -| @gordallott | 1 | https://github.com/go-openapi/jsonpointer/commits?author=gordallott | -| @ianlancetaylor | 1 | https://github.com/go-openapi/jsonpointer/commits?author=ianlancetaylor | -| @mfleader | 1 | https://github.com/go-openapi/jsonpointer/commits?author=mfleader | -| @Neo2308 | 1 | https://github.com/go-openapi/jsonpointer/commits?author=Neo2308 | -| @olivierlemasle | 1 | https://github.com/go-openapi/jsonpointer/commits?author=olivierlemasle | -| @testwill | 1 | https://github.com/go-openapi/jsonpointer/commits?author=testwill | +| @fredbi | 54 | | +| @casualjim | 33 | | +| @magodo | 3 | | +| @youyuanwu | 3 | | +| @gaiaz-iusipov | 1 | | +| @gbjk | 1 | | +| @gordallott | 1 | | +| @ianlancetaylor | 1 | | +| @mfleader | 1 | | +| @Neo2308 | 1 | | +| @olivierlemasle | 1 | | +| @testwill | 1 | | _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md index b61b63fd9af..c52803e2e8f 100644 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -8,12 +8,22 @@ [![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] -[![GoDoc][godoc-badge]][godoc-url] [![Slack Channel][slack-logo]![slack-badge]][slack-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] --- An implementation of JSON Pointer for golang, which supports go `struct`. +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + ## Status API is stable. @@ -124,21 +134,20 @@ Maintainers can cut a new release by either: [release-badge]: https://badge.fury.io/gh/go-openapi%2Fjsonpointer.svg [release-url]: https://badge.fury.io/gh/go-openapi%2Fjsonpointer -[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer.svg -[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer [gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/jsonpointer [gocard-url]: https://goreportcard.com/report/github.com/go-openapi/jsonpointer [codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/jsonpointer [codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/jsonpointer -[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F -[doc-url]: https://goswagger.io/go-openapi [godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer [godoc-url]: http://pkg.go.dev/github.com/go-openapi/jsonpointer [slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png [slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM [slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + [license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg [license-url]: https://github.com/go-openapi/jsonpointer/?tab=Apache-2.0-1-ov-file#readme diff --git a/vendor/github.com/go-openapi/jsonpointer/SECURITY.md b/vendor/github.com/go-openapi/jsonpointer/SECURITY.md index 2a7b6f0910d..1fea2c5736a 100644 --- a/vendor/github.com/go-openapi/jsonpointer/SECURITY.md +++ b/vendor/github.com/go-openapi/jsonpointer/SECURITY.md @@ -6,14 +6,32 @@ This policy outlines the commitment and practices of the go-openapi maintainers | Version | Supported | | ------- | ------------------ | -| 0.22.x | :white_check_mark: | +| O.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. ## Reporting a vulnerability If you become aware of a security vulnerability that affects the current repository, -please report it privately to the maintainers. +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. -Please follow the instructions provided by github to -[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". -TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/jsonreference/.cliff.toml b/vendor/github.com/go-openapi/jsonreference/.cliff.toml deleted file mode 100644 index 702629f5dc3..00000000000 --- a/vendor/github.com/go-openapi/jsonreference/.cliff.toml +++ /dev/null @@ -1,181 +0,0 @@ -# git-cliff ~ configuration file -# https://git-cliff.org/docs/configuration - -[changelog] -header = """ -""" - -footer = """ - ------ - -**[{{ remote.github.repo }}]({{ self::remote_url() }}) license terms** - -[![License][license-badge]][license-url] - -[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg -[license-url]: {{ self::remote_url() }}/?tab=Apache-2.0-1-ov-file#readme - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" - -body = """ -{%- if version %} -## [{{ version | trim_start_matches(pat="v") }}]({{ self::remote_url() }}/tree/{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} -{%- else %} -## [unreleased] -{%- endif %} -{%- if message %} - {%- raw %}\n{% endraw %} -{{ message }} - {%- raw %}\n{% endraw %} -{%- endif %} -{%- if version %} - {%- if previous.version %} - -**Full Changelog**: <{{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}> - {%- endif %} -{%- else %} - {%- raw %}\n{% endraw %} -{%- endif %} - -{%- if statistics %}{% if statistics.commit_count %} - {%- raw %}\n{% endraw %} -{{ statistics.commit_count }} commits in this release. - {%- raw %}\n{% endraw %} -{%- endif %}{% endif %} ------ - -{%- for group, commits in commits | group_by(attribute="group") %} - {%- raw %}\n{% endraw %} -### {{ group | upper_first }} - {%- raw %}\n{% endraw %} - {%- for commit in commits %} - {%- if commit.remote.pr_title %} - {%- set commit_message = commit.remote.pr_title %} - {%- else %} - {%- set commit_message = commit.message %} - {%- endif %} -* {{ commit_message | split(pat="\n") | first | trim }} - {%- if commit.remote.username %} -{%- raw %} {% endraw %}by [@{{ commit.remote.username }}](https://github.com/{{ commit.remote.username }}) - {%- endif %} - {%- if commit.remote.pr_number %} -{%- raw %} {% endraw %}in [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }}) - {%- endif %} -{%- raw %} {% endraw %}[...]({{ self::remote_url() }}/commit/{{ commit.id }}) - {%- endfor %} -{%- endfor %} - -{%- if github %} -{%- raw %}\n{% endraw -%} - {%- set all_contributors = github.contributors | length %} - {%- if github.contributors | filter(attribute="username", value="dependabot[bot]") | length < all_contributors %} ------ - -### People who contributed to this release - {% endif %} - {%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) - {%- endif %} - {%- endfor %} - - {% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} ------ - {%- raw %}\n{% endraw %} - -### New Contributors - {%- endif %} - - {%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* @{{ contributor.username }} made their first contribution - {%- if contributor.pr_number %} - in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ - {%- endif %} - {%- endif %} - {%- endfor %} -{%- endif %} - -{%- raw %}\n{% endraw %} - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" -# Remove leading and trailing whitespaces from the changelog's body. -trim = true -# Render body even when there are no releases to process. -render_always = true -# An array of regex based postprocessors to modify the changelog. -postprocessors = [ - # Replace the placeholder with a URL. - #{ pattern = '', replace = "https://github.com/orhun/git-cliff" }, -] -# output file path -# output = "test.md" - -[git] -# Parse commits according to the conventional commits specification. -# See https://www.conventionalcommits.org -conventional_commits = false -# Exclude commits that do not match the conventional commits specification. -filter_unconventional = false -# Require all commits to be conventional. -# Takes precedence over filter_unconventional. -require_conventional = false -# Split commits on newlines, treating each line as an individual commit. -split_commits = false -# An array of regex based parsers to modify commit messages prior to further processing. -commit_preprocessors = [ - # Replace issue numbers with link templates to be updated in `changelog.postprocessors`. - #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, - # Check spelling of the commit message using https://github.com/crate-ci/typos. - # If the spelling is incorrect, it will be fixed automatically. - #{ pattern = '.*', replace_command = 'typos --write-changes -' } -] -# Prevent commits that are breaking from being excluded by commit parsers. -protect_breaking_commits = false -# An array of regex based parsers for extracting data from the commit message. -# Assigns commits to groups. -# Optionally sets the commit's scope and can decide to exclude commits from further processing. -commit_parsers = [ - { message = "^[Cc]hore\\([Rr]elease\\): prepare for", skip = true }, - { message = "(^[Mm]erge)|([Mm]erge conflict)", skip = true }, - { field = "author.name", pattern = "dependabot*", group = "Updates" }, - { message = "([Ss]ecurity)|([Vv]uln)", group = "Security" }, - { body = "(.*[Ss]ecurity)|([Vv]uln)", group = "Security" }, - { message = "([Cc]hore\\(lint\\))|(style)|(lint)|(codeql)|(golangci)", group = "Code quality" }, - { message = "(^[Dd]oc)|((?i)readme)|(badge)|(typo)|(documentation)", group = "Documentation" }, - { message = "(^[Ff]eat)|(^[Ee]nhancement)", group = "Implemented enhancements" }, - { message = "(^ci)|(\\(ci\\))|(fixup\\s+ci)|(fix\\s+ci)|(license)|(example)", group = "Miscellaneous tasks" }, - { message = "^test", group = "Testing" }, - { message = "(^fix)|(panic)", group = "Fixed bugs" }, - { message = "(^refact)|(rework)", group = "Refactor" }, - { message = "(^[Pp]erf)|(performance)", group = "Performance" }, - { message = "(^[Cc]hore)", group = "Miscellaneous tasks" }, - { message = "^[Rr]evert", group = "Reverted changes" }, - { message = "(upgrade.*?go)|(go\\s+version)", group = "Updates" }, - { message = ".*", group = "Other" }, -] -# Exclude commits that are not matched by any commit parser. -filter_commits = false -# An array of link parsers for extracting external references, and turning them into URLs, using regex. -link_parsers = [] -# Include only the tags that belong to the current branch. -use_branch_tags = false -# Order releases topologically instead of chronologically. -topo_order = false -# Order releases topologically instead of chronologically. -topo_order_commits = true -# Order of commits in each group/release within the changelog. -# Allowed values: newest, oldest -sort_commits = "newest" -# Process submodules commits -recurse_submodules = false - -#[remote.github] -#owner = "go-openapi" diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore index 769c244007b..885dc27ab0b 100644 --- a/vendor/github.com/go-openapi/jsonreference/.gitignore +++ b/vendor/github.com/go-openapi/jsonreference/.gitignore @@ -1 +1,6 @@ -secrets.yml +*.out +*.cov +.idea +.env +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index fdae591bce7..dc7c96053de 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -12,6 +12,7 @@ linters: - paralleltest - recvcheck - testpackage + - thelper - tparallel - varnamelen - whitespace diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md index 9322b065e37..bac878f216a 100644 --- a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md b/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md index 9907d5d2124..7faeb83a77d 100644 --- a/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md +++ b/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md @@ -4,11 +4,11 @@ | Total Contributors | Total Contributions | | --- | --- | -| 9 | 68 | +| 9 | 73 | | Username | All Time Contribution Count | All Commits | | --- | --- | --- | -| @fredbi | 31 | https://github.com/go-openapi/jsonreference/commits?author=fredbi | +| @fredbi | 36 | https://github.com/go-openapi/jsonreference/commits?author=fredbi | | @casualjim | 25 | https://github.com/go-openapi/jsonreference/commits?author=casualjim | | @youyuanwu | 5 | https://github.com/go-openapi/jsonreference/commits?author=youyuanwu | | @olivierlemasle | 2 | https://github.com/go-openapi/jsonreference/commits?author=olivierlemasle | diff --git a/vendor/github.com/go-openapi/jsonreference/NOTICE b/vendor/github.com/go-openapi/jsonreference/NOTICE index f3b51939a95..814e87ef8c4 100644 --- a/vendor/github.com/go-openapi/jsonreference/NOTICE +++ b/vendor/github.com/go-openapi/jsonreference/NOTICE @@ -3,7 +3,7 @@ Copyright 2015-2025 go-swagger maintainers // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 -This software library, github.com/go-openapi/jsonpointer, includes software developed +This software library, github.com/go-openapi/jsonreference, includes software developed by the go-swagger and go-openapi maintainers ("go-swagger maintainers"). Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ It ships with copies of other software which license terms are recalled below. The original software was authored on 25-02-2013 by sigu-399 (https://github.com/sigu-399, sigu.399@gmail.com). -github.com/sigh-399/jsonpointer +github.com/sigh-399/jsonreference =========================== // SPDX-FileCopyrightText: Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md index d479dbdc731..adea1606197 100644 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -8,12 +8,22 @@ [![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] -[![GoDoc][godoc-badge]][godoc-url] [![Slack Channel][slack-logo]![slack-badge]][slack-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] --- An implementation of JSON Reference for golang. +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + ## Status API is stable. @@ -26,18 +36,33 @@ go get github.com/go-openapi/jsonreference ## Dependencies -* https://github.com/go-openapi/jsonpointer +* ## Basic usage +```go +// Creating a new reference +ref, err := jsonreference.New("http://example.com/doc.json#/definitions/Pet") + +// Fragment-only reference +fragRef := jsonreference.MustCreateRef("#/definitions/Pet") + +// Resolving references +parent, _ := jsonreference.New("http://example.com/base.json") +child, _ := jsonreference.New("#/definitions/Pet") +resolved, _ := parent.Inherits(child) +// Result: "http://example.com/base.json#/definitions/Pet" +``` + + ## Change log See ## References -* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 +* +* ## Licensing @@ -89,6 +114,9 @@ Maintainers can cut a new release by either: [slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png [slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM [slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + [license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg [license-url]: https://github.com/go-openapi/jsonreference/?tab=Apache-2.0-1-ov-file#readme diff --git a/vendor/github.com/go-openapi/jsonreference/SECURITY.md b/vendor/github.com/go-openapi/jsonreference/SECURITY.md index 2a7b6f0910d..1fea2c5736a 100644 --- a/vendor/github.com/go-openapi/jsonreference/SECURITY.md +++ b/vendor/github.com/go-openapi/jsonreference/SECURITY.md @@ -6,14 +6,32 @@ This policy outlines the commitment and practices of the go-openapi maintainers | Version | Supported | | ------- | ------------------ | -| 0.22.x | :white_check_mark: | +| O.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. ## Reporting a vulnerability If you become aware of a security vulnerability that affects the current repository, -please report it privately to the maintainers. +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. -Please follow the instructions provided by github to -[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". -TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go index 6e3ae499515..003ba7a838c 100644 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -16,6 +16,7 @@ const ( fragmentRune = `#` ) +// ErrChildURL is raised when there is no child. var ErrChildURL = errors.New("child url is nil") // Ref represents a json reference object. diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore index e4f15f17bfc..d8f4186fe59 100644 --- a/vendor/github.com/go-openapi/loads/.gitignore +++ b/vendor/github.com/go-openapi/loads/.gitignore @@ -1,4 +1,5 @@ -secrets.yml -coverage.out -profile.cov -profile.out +*.out +*.cov +.idea +.env +.mcp.json diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml index 1ad5adf47e6..83968f3faeb 100644 --- a/vendor/github.com/go-openapi/loads/.golangci.yml +++ b/vendor/github.com/go-openapi/loads/.golangci.yml @@ -2,25 +2,12 @@ version: "2" linters: default: all disable: - - cyclop - depguard - - errchkjson - - errorlint - - exhaustruct - - forcetypeassert - funlen - - gochecknoglobals + - gochecknoglobals # on this repo, it is hard to refactor without globals/inits and no breaking change - gochecknoinits - - gocognit - - godot - godox - - gosmopolitan - - inamedparam - - intrange - - ireturn - - lll - - musttag - - nestif + - exhaustruct - nlreturn - nonamedreturns - noinlineerr @@ -29,7 +16,6 @@ linters: - testpackage - thelper - tparallel - - unparam - varnamelen - whitespace - wrapcheck @@ -41,8 +27,15 @@ linters: goconst: min-len: 2 min-occurrences: 3 + cyclop: + max-complexity: 20 gocyclo: - min-complexity: 45 + min-complexity: 20 + exhaustive: + default-signifies-exhaustive: true + default-case-required: true + lll: + line-length: 180 exclusions: generated: lax presets: @@ -58,6 +51,7 @@ formatters: enable: - gofmt - goimports + - gofumpt exclusions: generated: lax paths: diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml deleted file mode 100644 index cd4a7c331bc..00000000000 --- a/vendor/github.com/go-openapi/loads/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.16.x -- 1.x -install: -- go get gotest.tools/gotestsum -language: go -arch: -- amd64 -- ppc64le -jobs: - include: - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -notifications: - slack: - secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= -script: -- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md index 9322b065e37..bac878f216a 100644 --- a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/CONTRIBUTORS.md b/vendor/github.com/go-openapi/loads/CONTRIBUTORS.md new file mode 100644 index 00000000000..36b836a3d5a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/CONTRIBUTORS.md @@ -0,0 +1,26 @@ +# Contributors + +- Repository: ['go-openapi/loads'] + +| Total Contributors | Total Contributions | +| --- | --- | +| 14 | 123 | + +| Username | All Time Contribution Count | All Commits | +| --- | --- | --- | +| @casualjim | 48 | | +| @fredbi | 45 | | +| @youyuanwu | 6 | | +| @vburenin | 4 | | +| @keramix | 4 | | +| @orisano | 3 | | +| @GlenDC | 3 | | +| @pengsrc | 2 | | +| @a2800276 | 2 | | +| @tklauser | 2 | | +| @hypnoglow | 1 | | +| @koron | 1 | | +| @kreativka | 1 | | +| @petrkotas | 1 | | + + _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md index 1f0174f2d91..d92e62a040d 100644 --- a/vendor/github.com/go-openapi/loads/README.md +++ b/vendor/github.com/go-openapi/loads/README.md @@ -1,11 +1,42 @@ -# Loads OAI specs [![Build Status](https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) +# Loads OAI specs -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) + +[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url] + + + +[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] + + +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] -Loading of OAI v2 API specification documents from local or remote locations. Supports JSON and YAML documents. +--- -Primary usage: +Loads OAI v2 API specification documents from local or remote locations. + +Supports JSON and YAML documents. + +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + +## Status + +API is stable. + +## Import this library in your project + +```cmd +go get github.com/go-openapi/loads +``` + +## Basic usage ```go import ( @@ -27,6 +58,61 @@ Primary usage: See also the provided [examples](https://pkg.go.dev/github.com/go-openapi/loads#pkg-examples). +## Change log + +See + ## Licensing This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +## Other documentation + +* [All-time contributors](./CONTRIBUTORS.md) +* [Contributing guidelines](.github/CONTRIBUTING.md) +* [Maintainers documentation](docs/MAINTAINERS.md) +* [Code style](docs/STYLE.md) + +## Cutting a new release + +Maintainers can cut a new release by either: + +* running [this workflow](https://github.com/go-openapi/loads/actions/workflows/bump-release.yml) +* or pushing a semver tag + * signed tags are preferred + * The tag message is prepended to release notes + + +[test-badge]: https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg +[test-url]: https://github.com/go-openapi/loads/actions/workflows/go-test.yml +[cov-badge]: https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/go-openapi/loads +[vuln-scan-badge]: https://github.com/go-openapi/loads/actions/workflows/scanner.yml/badge.svg +[vuln-scan-url]: https://github.com/go-openapi/loads/actions/workflows/scanner.yml +[codeql-badge]: https://github.com/go-openapi/loads/actions/workflows/codeql.yml/badge.svg +[codeql-url]: https://github.com/go-openapi/loads/actions/workflows/codeql.yml + +[release-badge]: https://badge.fury.io/gh/go-openapi%2Floads.svg +[release-url]: https://badge.fury.io/gh/go-openapi%2Floads + +[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/loads +[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/loads +[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/loads +[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/loads + +[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/loads +[godoc-url]: http://pkg.go.dev/github.com/go-openapi/loads +[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png +[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM +[slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: https://github.com/go-openapi/loads/?tab=Apache-2.0-1-ov-file#readme + +[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/loads +[goversion-url]: https://github.com/go-openapi/loads/blob/master/go.mod +[top-badge]: https://img.shields.io/github/languages/top/go-openapi/loads +[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/loads/latest diff --git a/vendor/github.com/go-openapi/loads/SECURITY.md b/vendor/github.com/go-openapi/loads/SECURITY.md new file mode 100644 index 00000000000..6ceb159ca22 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/SECURITY.md @@ -0,0 +1,37 @@ +# Security Policy + +This policy outlines the commitment and practices of the go-openapi maintainers regarding security. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. + +## Reporting a vulnerability + +If you become aware of a security vulnerability that affects the current repository, +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. + +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". + +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go index 7981e70e9f1..67a5e2f8d95 100644 --- a/vendor/github.com/go-openapi/loads/doc.go +++ b/vendor/github.com/go-openapi/loads/doc.go @@ -4,4 +4,6 @@ // Package loads provides document loading methods for swagger (OAI v2) API specifications. // // It is used by other go-openapi packages to load and run analysis on local or remote spec documents. +// +// Loaders support JSON and YAML documents. package loads diff --git a/vendor/github.com/go-openapi/loads/errors.go b/vendor/github.com/go-openapi/loads/errors.go index 8f2d602f5cc..14a8186b6ca 100644 --- a/vendor/github.com/go-openapi/loads/errors.go +++ b/vendor/github.com/go-openapi/loads/errors.go @@ -10,9 +10,9 @@ func (e loaderError) Error() string { } const ( - // ErrLoads is an error returned by the loads package + // ErrLoads is an error returned by the loads package. ErrLoads loaderError = "loaderrs error" - // ErrNoLoader indicates that no configured loader matched the input + // ErrNoLoader indicates that no configured loader matched the input. ErrNoLoader loaderError = "no loader matched" ) diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go index 25b157302e4..ac8adfe8b20 100644 --- a/vendor/github.com/go-openapi/loads/loaders.go +++ b/vendor/github.com/go-openapi/loads/loaders.go @@ -13,14 +13,12 @@ import ( "github.com/go-openapi/swag/loading" ) -var ( - // Default chain of loaders, defined at the package level. - // - // By default this matches json and yaml documents. - // - // May be altered with AddLoader(). - loaders *loader -) +// Default chain of loaders, defined at the package level. +// +// By default this matches json and yaml documents. +// +// May be altered with AddLoader(). +var loaders *loader func init() { jsonLoader := &loader{ @@ -43,10 +41,10 @@ func init() { spec.PathLoader = loaders.Load } -// DocLoader represents a doc loader type +// DocLoader represents a doc loader type. type DocLoader func(string, ...loading.Option) (json.RawMessage, error) -// DocMatcher represents a predicate to check if a loader matches +// DocMatcher represents a predicate to check if a loader matches. type DocMatcher func(string) bool // DocLoaderWithMatch describes a loading function for a given extension match. @@ -55,7 +53,7 @@ type DocLoaderWithMatch struct { Match DocMatcher } -// NewDocLoaderWithMatch builds a DocLoaderWithMatch to be used in load options +// NewDocLoaderWithMatch builds a [DocLoaderWithMatch] to be used in load options. func NewDocLoaderWithMatch(fn DocLoader, matcher DocMatcher) DocLoaderWithMatch { return DocLoaderWithMatch{ Fn: fn, @@ -71,7 +69,7 @@ type loader struct { Next *loader } -// WithHead adds a loader at the head of the current stack +// WithHead adds a loader at the head of the current stack. func (l *loader) WithHead(head *loader) *loader { if head == nil { return l @@ -80,13 +78,13 @@ func (l *loader) WithHead(head *loader) *loader { return head } -// WithNext adds a loader at the trail of the current stack +// WithNext adds a loader at the trail of the current stack. func (l *loader) WithNext(next *loader) *loader { l.Next = next return next } -// Load the raw document from path +// Load the raw document from path. func (l *loader) Load(path string) (json.RawMessage, error) { _, erp := url.Parse(path) if erp != nil { @@ -123,9 +121,9 @@ func (l *loader) clone() *loader { } } -// JSONDoc loads a json document from either a file or a remote url. +// JSONDoc loads a json document from either a file or a remote URL. // -// See [loading.Option] for available options (e.g. configuring authentifaction, +// See [loading.Option] for available options (e.g. configuring authentication, // headers or using embedded file system resources). func JSONDoc(path string, opts ...loading.Option) (json.RawMessage, error) { data, err := loading.LoadFromFileOrHTTP(path, opts...) @@ -139,9 +137,10 @@ func JSONDoc(path string, opts ...loading.Option) (json.RawMessage, error) { // // This sets the configuration at the package level. // -// NOTE: -// - this updates the default loader used by github.com/go-openapi/spec -// - since this sets package level globals, you shouln't call this concurrently +// # Concurrency +// +// This function updates the default loader used by [github.com/go-openapi/spec]. +// Since this sets package level globals, you shouldn't call this concurrently. func AddLoader(predicate DocMatcher, load DocLoader) { loaders = loaders.WithHead(&loader{ DocLoaderWithMatch: DocLoaderWithMatch{ diff --git a/vendor/github.com/go-openapi/loads/options.go b/vendor/github.com/go-openapi/loads/options.go index adb5e6d15b0..045ece5e095 100644 --- a/vendor/github.com/go-openapi/loads/options.go +++ b/vendor/github.com/go-openapi/loads/options.go @@ -28,10 +28,10 @@ func loaderFromOptions(options []LoaderOption) *loader { return l } -// LoaderOption allows to fine-tune the spec loader behavior +// LoaderOption allows to fine-tune the spec loader behavior. type LoaderOption func(*options) -// WithDocLoader sets a custom loader for loading specs +// WithDocLoader sets a custom loader for loading specs. func WithDocLoader(l DocLoader) LoaderOption { return func(opt *options) { if l == nil { @@ -48,7 +48,7 @@ func WithDocLoader(l DocLoader) LoaderOption { // WithDocLoaderMatches sets a chain of custom loaders for loading specs // for different extension matches. // -// Loaders are executed in the order of provided DocLoaderWithMatch'es. +// Loaders are executed in the order of provided [DocLoaderWithMatch] 'es. func WithDocLoaderMatches(l ...DocLoaderWithMatch) LoaderOption { return func(opt *options) { var final, prev *loader diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go index 213c40c657a..606a01d8e9f 100644 --- a/vendor/github.com/go-openapi/loads/spec.go +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -21,7 +21,7 @@ func init() { gob.Register([]any{}) } -// Document represents a swagger spec document +// Document represents a swagger spec document. type Document struct { // specAnalyzer Analyzer *analysis.Spec @@ -33,7 +33,7 @@ type Document struct { raw json.RawMessage } -// JSONSpec loads a spec from a json document, using the [JSONDoc] loader. +// JSONSpec loads a spec from a JSON document, using the [JSONDoc] loader. // // A set of [loading.Option] may be passed to this loader using [WithLoadingOptions]. func JSONSpec(path string, opts ...LoaderOption) (*Document, error) { @@ -57,7 +57,7 @@ func JSONSpec(path string, opts ...LoaderOption) (*Document, error) { return doc, nil } -// Embedded returns a Document based on embedded specs (i.e. as a raw [json.RawMessage]). No analysis is required +// Embedded returns a Document based on embedded specs (i.e. as a [json.RawMessage]). No analysis is required. func Embedded(orig, flat json.RawMessage, opts ...LoaderOption) (*Document, error) { var origSpec, flatSpec spec.Swagger if err := json.Unmarshal(orig, &origSpec); err != nil { @@ -96,7 +96,7 @@ func Spec(path string, opts ...LoaderOption) (*Document, error) { return document, nil } -// Analyzed creates a new analyzed spec document for a root json.RawMessage. +// Analyzed creates a new analyzed spec document for a root [json.RawMessage]. func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*Document, error) { if version == "" { version = "2.0" @@ -145,18 +145,18 @@ func trimData(in json.RawMessage) (json.RawMessage, error) { // assume yaml doc: convert it to json yml, err := yamlutils.BytesToYAMLDoc(trimmed) if err != nil { - return nil, fmt.Errorf("analyzed: %v: %w", err, ErrLoads) + return nil, fmt.Errorf("analyzed: %w: %w", err, ErrLoads) } d, err := yamlutils.YAMLToJSON(yml) if err != nil { - return nil, fmt.Errorf("analyzed: %v: %w", err, ErrLoads) + return nil, fmt.Errorf("analyzed: %w: %w", err, ErrLoads) } return d, nil } -// Expanded expands the $ref fields in the spec [Document] and returns a new expanded [Document] +// Expanded expands the $ref fields in the spec [Document] and returns a new expanded [Document]. func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { swspec := new(spec.Swagger) if err := json.Unmarshal(d.raw, swspec); err != nil { @@ -200,7 +200,7 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { return dd, nil } -// BasePath the base path for the API specified by this spec +// BasePath the base path for the API specified by this spec. func (d *Document) BasePath() string { if d.spec == nil { return "" @@ -208,37 +208,37 @@ func (d *Document) BasePath() string { return d.spec.BasePath } -// Version returns the OpenAPI version of this spec (e.g. 2.0) +// Version returns the OpenAPI version of this spec (e.g. 2.0). func (d *Document) Version() string { return d.spec.Swagger } -// Schema returns the swagger 2.0 meta-schema +// Schema returns the swagger 2.0 meta-schema. func (d *Document) Schema() *spec.Schema { return d.schema } -// Spec returns the swagger object model for this API specification +// Spec returns the swagger object model for this API specification. func (d *Document) Spec() *spec.Swagger { return d.spec } -// Host returns the host for the API +// Host returns the host for the API. func (d *Document) Host() string { return d.spec.Host } -// Raw returns the raw swagger spec as json bytes +// Raw returns the raw swagger spec as json bytes. func (d *Document) Raw() json.RawMessage { return d.raw } -// OrigSpec yields the original spec +// OrigSpec yields the original spec. func (d *Document) OrigSpec() *spec.Swagger { return d.origSpec } -// ResetDefinitions yields a shallow copy with the models reset to the original spec +// ResetDefinitions yields a shallow copy with the models reset to the original spec. func (d *Document) ResetDefinitions() *Document { d.spec.Definitions = make(map[string]spec.Schema, len(d.origSpec.Definitions)) maps.Copy(d.spec.Definitions, d.origSpec.Definitions) @@ -246,9 +246,9 @@ func (d *Document) ResetDefinitions() *Document { return d } -// Pristine creates a new pristine document instance based on the input data +// Pristine creates a new pristine document instance based on the input data. func (d *Document) Pristine() *Document { - raw, _ := json.Marshal(d.Spec()) + raw, _ := json.Marshal(d.Spec()) //nolint:errchkjson // the spec always marshals to JSON dd, _ := Analyzed(raw, d.Version()) dd.pathLoader = d.pathLoader dd.specFilePath = d.specFilePath @@ -256,7 +256,7 @@ func (d *Document) Pristine() *Document { return dd } -// SpecFilePath returns the file path of the spec if one is defined +// SpecFilePath returns the file path of the spec if one is defined. func (d *Document) SpecFilePath() string { return d.specFilePath } diff --git a/vendor/github.com/go-openapi/spec/.cliff.toml b/vendor/github.com/go-openapi/spec/.cliff.toml deleted file mode 100644 index 702629f5dc3..00000000000 --- a/vendor/github.com/go-openapi/spec/.cliff.toml +++ /dev/null @@ -1,181 +0,0 @@ -# git-cliff ~ configuration file -# https://git-cliff.org/docs/configuration - -[changelog] -header = """ -""" - -footer = """ - ------ - -**[{{ remote.github.repo }}]({{ self::remote_url() }}) license terms** - -[![License][license-badge]][license-url] - -[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg -[license-url]: {{ self::remote_url() }}/?tab=Apache-2.0-1-ov-file#readme - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" - -body = """ -{%- if version %} -## [{{ version | trim_start_matches(pat="v") }}]({{ self::remote_url() }}/tree/{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} -{%- else %} -## [unreleased] -{%- endif %} -{%- if message %} - {%- raw %}\n{% endraw %} -{{ message }} - {%- raw %}\n{% endraw %} -{%- endif %} -{%- if version %} - {%- if previous.version %} - -**Full Changelog**: <{{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}> - {%- endif %} -{%- else %} - {%- raw %}\n{% endraw %} -{%- endif %} - -{%- if statistics %}{% if statistics.commit_count %} - {%- raw %}\n{% endraw %} -{{ statistics.commit_count }} commits in this release. - {%- raw %}\n{% endraw %} -{%- endif %}{% endif %} ------ - -{%- for group, commits in commits | group_by(attribute="group") %} - {%- raw %}\n{% endraw %} -### {{ group | upper_first }} - {%- raw %}\n{% endraw %} - {%- for commit in commits %} - {%- if commit.remote.pr_title %} - {%- set commit_message = commit.remote.pr_title %} - {%- else %} - {%- set commit_message = commit.message %} - {%- endif %} -* {{ commit_message | split(pat="\n") | first | trim }} - {%- if commit.remote.username %} -{%- raw %} {% endraw %}by [@{{ commit.remote.username }}](https://github.com/{{ commit.remote.username }}) - {%- endif %} - {%- if commit.remote.pr_number %} -{%- raw %} {% endraw %}in [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }}) - {%- endif %} -{%- raw %} {% endraw %}[...]({{ self::remote_url() }}/commit/{{ commit.id }}) - {%- endfor %} -{%- endfor %} - -{%- if github %} -{%- raw %}\n{% endraw -%} - {%- set all_contributors = github.contributors | length %} - {%- if github.contributors | filter(attribute="username", value="dependabot[bot]") | length < all_contributors %} ------ - -### People who contributed to this release - {% endif %} - {%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) - {%- endif %} - {%- endfor %} - - {% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} ------ - {%- raw %}\n{% endraw %} - -### New Contributors - {%- endif %} - - {%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} - {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %} -* @{{ contributor.username }} made their first contribution - {%- if contributor.pr_number %} - in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ - {%- endif %} - {%- endif %} - {%- endfor %} -{%- endif %} - -{%- raw %}\n{% endraw %} - -{%- macro remote_url() -%} - https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} -{%- endmacro -%} -""" -# Remove leading and trailing whitespaces from the changelog's body. -trim = true -# Render body even when there are no releases to process. -render_always = true -# An array of regex based postprocessors to modify the changelog. -postprocessors = [ - # Replace the placeholder with a URL. - #{ pattern = '', replace = "https://github.com/orhun/git-cliff" }, -] -# output file path -# output = "test.md" - -[git] -# Parse commits according to the conventional commits specification. -# See https://www.conventionalcommits.org -conventional_commits = false -# Exclude commits that do not match the conventional commits specification. -filter_unconventional = false -# Require all commits to be conventional. -# Takes precedence over filter_unconventional. -require_conventional = false -# Split commits on newlines, treating each line as an individual commit. -split_commits = false -# An array of regex based parsers to modify commit messages prior to further processing. -commit_preprocessors = [ - # Replace issue numbers with link templates to be updated in `changelog.postprocessors`. - #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, - # Check spelling of the commit message using https://github.com/crate-ci/typos. - # If the spelling is incorrect, it will be fixed automatically. - #{ pattern = '.*', replace_command = 'typos --write-changes -' } -] -# Prevent commits that are breaking from being excluded by commit parsers. -protect_breaking_commits = false -# An array of regex based parsers for extracting data from the commit message. -# Assigns commits to groups. -# Optionally sets the commit's scope and can decide to exclude commits from further processing. -commit_parsers = [ - { message = "^[Cc]hore\\([Rr]elease\\): prepare for", skip = true }, - { message = "(^[Mm]erge)|([Mm]erge conflict)", skip = true }, - { field = "author.name", pattern = "dependabot*", group = "Updates" }, - { message = "([Ss]ecurity)|([Vv]uln)", group = "Security" }, - { body = "(.*[Ss]ecurity)|([Vv]uln)", group = "Security" }, - { message = "([Cc]hore\\(lint\\))|(style)|(lint)|(codeql)|(golangci)", group = "Code quality" }, - { message = "(^[Dd]oc)|((?i)readme)|(badge)|(typo)|(documentation)", group = "Documentation" }, - { message = "(^[Ff]eat)|(^[Ee]nhancement)", group = "Implemented enhancements" }, - { message = "(^ci)|(\\(ci\\))|(fixup\\s+ci)|(fix\\s+ci)|(license)|(example)", group = "Miscellaneous tasks" }, - { message = "^test", group = "Testing" }, - { message = "(^fix)|(panic)", group = "Fixed bugs" }, - { message = "(^refact)|(rework)", group = "Refactor" }, - { message = "(^[Pp]erf)|(performance)", group = "Performance" }, - { message = "(^[Cc]hore)", group = "Miscellaneous tasks" }, - { message = "^[Rr]evert", group = "Reverted changes" }, - { message = "(upgrade.*?go)|(go\\s+version)", group = "Updates" }, - { message = ".*", group = "Other" }, -] -# Exclude commits that are not matched by any commit parser. -filter_commits = false -# An array of link parsers for extracting external references, and turning them into URLs, using regex. -link_parsers = [] -# Include only the tags that belong to the current branch. -use_branch_tags = false -# Order releases topologically instead of chronologically. -topo_order = false -# Order releases topologically instead of chronologically. -topo_order_commits = true -# Order of commits in each group/release within the changelog. -# Allowed values: newest, oldest -sort_commits = "newest" -# Process submodules commits -recurse_submodules = false - -#[remote.github] -#owner = "go-openapi" diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore index f47cb2045f1..885dc27ab0b 100644 --- a/vendor/github.com/go-openapi/spec/.gitignore +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -1 +1,6 @@ *.out +*.cov +.idea +.env +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml index fdae591bce7..dc7c96053de 100644 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -12,6 +12,7 @@ linters: - paralleltest - recvcheck - testpackage + - thelper - tparallel - varnamelen - whitespace diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md index 9322b065e37..bac878f216a 100644 --- a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/CONTRIBUTORS.md b/vendor/github.com/go-openapi/spec/CONTRIBUTORS.md index d97b9d330a1..2967e3cedd9 100644 --- a/vendor/github.com/go-openapi/spec/CONTRIBUTORS.md +++ b/vendor/github.com/go-openapi/spec/CONTRIBUTORS.md @@ -4,47 +4,47 @@ | Total Contributors | Total Contributions | | --- | --- | -| 38 | 391 | +| 38 | 392 | | Username | All Time Contribution Count | All Commits | | --- | --- | --- | -| @casualjim | 191 | https://github.com/go-openapi/spec/commits?author=casualjim | -| @fredbi | 89 | https://github.com/go-openapi/spec/commits?author=fredbi | -| @pytlesk4 | 26 | https://github.com/go-openapi/spec/commits?author=pytlesk4 | -| @kul-amr | 10 | https://github.com/go-openapi/spec/commits?author=kul-amr | -| @keramix | 10 | https://github.com/go-openapi/spec/commits?author=keramix | -| @youyuanwu | 8 | https://github.com/go-openapi/spec/commits?author=youyuanwu | -| @pengsrc | 7 | https://github.com/go-openapi/spec/commits?author=pengsrc | -| @alphacentory | 5 | https://github.com/go-openapi/spec/commits?author=alphacentory | -| @mtfelian | 4 | https://github.com/go-openapi/spec/commits?author=mtfelian | -| @Capstan | 4 | https://github.com/go-openapi/spec/commits?author=Capstan | -| @sdghchj | 4 | https://github.com/go-openapi/spec/commits?author=sdghchj | -| @databus23 | 2 | https://github.com/go-openapi/spec/commits?author=databus23 | -| @vburenin | 2 | https://github.com/go-openapi/spec/commits?author=vburenin | -| @petrkotas | 2 | https://github.com/go-openapi/spec/commits?author=petrkotas | -| @nikhita | 2 | https://github.com/go-openapi/spec/commits?author=nikhita | -| @hypnoglow | 2 | https://github.com/go-openapi/spec/commits?author=hypnoglow | -| @carvind | 2 | https://github.com/go-openapi/spec/commits?author=carvind | -| @ujjwalsh | 1 | https://github.com/go-openapi/spec/commits?author=ujjwalsh | -| @mbohlool | 1 | https://github.com/go-openapi/spec/commits?author=mbohlool | -| @j2gg0s | 1 | https://github.com/go-openapi/spec/commits?author=j2gg0s | -| @ishveda | 1 | https://github.com/go-openapi/spec/commits?author=ishveda | -| @micln | 1 | https://github.com/go-openapi/spec/commits?author=micln | -| @GlenDC | 1 | https://github.com/go-openapi/spec/commits?author=GlenDC | -| @agmikhailov | 1 | https://github.com/go-openapi/spec/commits?author=agmikhailov | -| @tgraf | 1 | https://github.com/go-openapi/spec/commits?author=tgraf | -| @zhsj | 1 | https://github.com/go-openapi/spec/commits?author=zhsj | -| @sebastien-rosset | 1 | https://github.com/go-openapi/spec/commits?author=sebastien-rosset | -| @alexandear | 1 | https://github.com/go-openapi/spec/commits?author=alexandear | -| @morlay | 1 | https://github.com/go-openapi/spec/commits?author=morlay | -| @mikedanese | 1 | https://github.com/go-openapi/spec/commits?author=mikedanese | -| @koron | 1 | https://github.com/go-openapi/spec/commits?author=koron | -| @honza | 1 | https://github.com/go-openapi/spec/commits?author=honza | -| @gbjk | 1 | https://github.com/go-openapi/spec/commits?author=gbjk | -| @faguirre1 | 1 | https://github.com/go-openapi/spec/commits?author=faguirre1 | -| @ethantkoenig | 1 | https://github.com/go-openapi/spec/commits?author=ethantkoenig | -| @sttts | 1 | https://github.com/go-openapi/spec/commits?author=sttts | -| @ChandanChainani | 1 | https://github.com/go-openapi/spec/commits?author=ChandanChainani | -| @bvwells | 1 | https://github.com/go-openapi/spec/commits?author=bvwells | +| @casualjim | 191 | | +| @fredbi | 90 | | +| @pytlesk4 | 26 | | +| @kul-amr | 10 | | +| @keramix | 10 | | +| @youyuanwu | 8 | | +| @pengsrc | 7 | | +| @alphacentory | 5 | | +| @mtfelian | 4 | | +| @Capstan | 4 | | +| @sdghchj | 4 | | +| @databus23 | 2 | | +| @vburenin | 2 | | +| @petrkotas | 2 | | +| @nikhita | 2 | | +| @hypnoglow | 2 | | +| @carvind | 2 | | +| @ujjwalsh | 1 | | +| @mbohlool | 1 | | +| @j2gg0s | 1 | | +| @ishveda | 1 | | +| @micln | 1 | | +| @GlenDC | 1 | | +| @agmikhailov | 1 | | +| @tgraf | 1 | | +| @zhsj | 1 | | +| @sebastien-rosset | 1 | | +| @alexandear | 1 | | +| @morlay | 1 | | +| @mikedanese | 1 | | +| @koron | 1 | | +| @honza | 1 | | +| @gbjk | 1 | | +| @faguirre1 | 1 | | +| @ethantkoenig | 1 | | +| @sttts | 1 | | +| @ChandanChainani | 1 | | +| @bvwells | 1 | | _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md index 13a2a17eae2..134809fd77a 100644 --- a/vendor/github.com/go-openapi/spec/README.md +++ b/vendor/github.com/go-openapi/spec/README.md @@ -55,7 +55,7 @@ go get github.com/go-openapi/spec > There is no plan to make it evolve toward supporting OpenAPI 3.x. > This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. > -> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 +> An early attempt to support Swagger 3 may be found at: * Does the unmarshaling support YAML? @@ -64,13 +64,13 @@ go get github.com/go-openapi/spec > In order to load a YAML document as a Swagger spec, you need to use the loaders provided by > github.com/go-openapi/loads > -> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec +> Take a look at the example there: > -> See also https://github.com/go-openapi/spec/issues/164 +> See also * How can I validate a spec? -> Validation is provided by [the validate package](http://github.com/go-openapi/validate) +Validation is provided by [the validate package](http://github.com/go-openapi/validate) * Why do we have an `ID` field for `Schema` which is not part of the swagger spec? @@ -78,7 +78,7 @@ go get github.com/go-openapi/spec > how `$ref` are resolved. > This `id` does not conflict with any property named `id`. > -> See also https://github.com/go-openapi/spec/issues/23 +> See also ## Change log @@ -136,7 +136,7 @@ Maintainers can cut a new release by either: [slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM [slack-url]: https://goswagger.slack.com/archives/C04R30YMU [discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue -[discord-url]: https://discord.gg/DrafRmZx +[discord-url]: https://discord.gg/twZ9BwT3 [license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg diff --git a/vendor/github.com/go-openapi/spec/SECURITY.md b/vendor/github.com/go-openapi/spec/SECURITY.md index 2a7b6f0910d..1fea2c5736a 100644 --- a/vendor/github.com/go-openapi/spec/SECURITY.md +++ b/vendor/github.com/go-openapi/spec/SECURITY.md @@ -6,14 +6,32 @@ This policy outlines the commitment and practices of the go-openapi maintainers | Version | Supported | | ------- | ------------------ | -| 0.22.x | :white_check_mark: | +| O.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. ## Reporting a vulnerability If you become aware of a security vulnerability that affects the current repository, -please report it privately to the maintainers. +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. -Please follow the instructions provided by github to -[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". -TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go index 10fba77a839..06495d2c3d3 100644 --- a/vendor/github.com/go-openapi/spec/cache.go +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -8,10 +8,10 @@ import ( "sync" ) -// ResolutionCache a cache for resolving urls +// ResolutionCache a cache for resolving urls. type ResolutionCache interface { - Get(string) (any, bool) - Set(string, any) + Get(uri string) (any, bool) + Set(uri string, data any) } type simpleCache struct { @@ -19,7 +19,7 @@ type simpleCache struct { store map[string]any } -func (s *simpleCache) ShallowClone() ResolutionCache { +func (s *simpleCache) ShallowClone() ResolutionCache { //nolint:ireturn // returns the public interface type by design store := make(map[string]any, len(s.store)) s.lock.RLock() maps.Copy(store, s.store) @@ -30,7 +30,7 @@ func (s *simpleCache) ShallowClone() ResolutionCache { } } -// Get retrieves a cached URI +// Get retrieves a cached URI. func (s *simpleCache) Get(uri string) (any, bool) { s.lock.RLock() v, ok := s.store[uri] @@ -39,7 +39,7 @@ func (s *simpleCache) Get(uri string) (any, bool) { return v, ok } -// Set caches a URI +// Set caches a URI. func (s *simpleCache) Set(uri string, data any) { s.lock.Lock() s.store[uri] = data @@ -56,8 +56,8 @@ var ( // // All subsequent utilizations of this cache are produced from a shallow // clone of this initial version. - resCache *simpleCache - onceCache sync.Once + resCache *simpleCache //nolint:gochecknoglobals // package-level lazy cache for $ref resolution + onceCache sync.Once //nolint:gochecknoglobals // guards lazy init of resCache _ ResolutionCache = &simpleCache{} ) @@ -74,7 +74,7 @@ func defaultResolutionCache() *simpleCache { }} } -func cacheOrDefault(cache ResolutionCache) ResolutionCache { +func cacheOrDefault(cache ResolutionCache) ResolutionCache { //nolint:ireturn // returns the public interface type by design onceCache.Do(initResolutionCache) if cache != nil { diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go index fafe639b45d..46fada5dff1 100644 --- a/vendor/github.com/go-openapi/spec/contact_info.go +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -17,14 +17,14 @@ type ContactInfo struct { VendorExtensible } -// ContactInfoProps hold the properties of a ContactInfo object +// ContactInfoProps hold the properties of a ContactInfo object. type ContactInfoProps struct { Name string `json:"name,omitempty"` URL string `json:"url,omitempty"` Email string `json:"email,omitempty"` } -// UnmarshalJSON hydrates ContactInfo from json +// UnmarshalJSON hydrates ContactInfo from json. func (c *ContactInfo) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { return err @@ -32,7 +32,7 @@ func (c *ContactInfo) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &c.VendorExtensible) } -// MarshalJSON produces ContactInfo as json +// MarshalJSON produces ContactInfo as json. func (c ContactInfo) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(c.ContactInfoProps) if err != nil { diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go index f4316c26333..fa52b0c7f77 100644 --- a/vendor/github.com/go-openapi/spec/debug.go +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -14,14 +14,12 @@ import ( // Debug is true when the SWAGGER_DEBUG env var is not empty. // // It enables a more verbose logging of this package. -var Debug = os.Getenv("SWAGGER_DEBUG") != "" +var Debug = os.Getenv("SWAGGER_DEBUG") != "" //nolint:gochecknoglobals // public toggle for debug logging -var ( - // specLogger is a debug logger for this package - specLogger *log.Logger -) +// specLogger is a debug logger for this package. +var specLogger *log.Logger //nolint:gochecknoglobals // package-level debug logger -func init() { +func init() { //nolint:gochecknoinits // initializes debug logger at package load debugOptions() } diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go index e39ab8bf71e..eaca01cc83c 100644 --- a/vendor/github.com/go-openapi/spec/errors.go +++ b/vendor/github.com/go-openapi/spec/errors.go @@ -5,21 +5,21 @@ package spec import "errors" -// Error codes +// Error codes. var ( - // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type + // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type. ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") - // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer + // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer. ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. - // At the moment, $ref are supported only inside: schemas, parameters, responses, path items + // At the moment, $ref are supported only inside: schemas, parameters, responses, path items. ErrDerefUnsupportedType = errors.New("deref: unsupported type") - // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type + // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type. ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") - // ErrSpec is an error raised by the spec package + // ErrSpec is an error raised by the spec package. ErrSpec = errors.New("spec error") ) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index ff45350ab4a..f9c2fa327aa 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -38,7 +38,7 @@ func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { return &ExpandOptions{} } -// ExpandSpec expands the references in a swagger spec +// ExpandSpec expands the references in a swagger spec. func ExpandSpec(spec *Swagger, options *ExpandOptions) error { options = optionsOrDefault(options) resolver := defaultSchemaLoader(spec, options, nil, nil) @@ -92,7 +92,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { const rootBase = ".root" // baseForRoot loads in the cache the root document and produces a fake ".root" base path entry -// for further $ref resolution +// for further $ref resolution. func baseForRoot(root any, cache ResolutionCache) string { // cache the root document to resolve $ref's normalizedBase := normalizeBase(rootBase) @@ -190,6 +190,7 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, bas return &target, nil } +//nolint:gocognit,gocyclo,cyclop // complex but well-tested $ref expansion logic; refactoring deferred to dedicated PR func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { if target.Ref.String() == "" && target.Ref.IsRoot() { newRef := normalizeRef(&target.Ref, basePath) @@ -464,7 +465,7 @@ func ExpandResponseWithRoot(response *Response, root any, cache ResolutionCache) // ExpandResponse expands a response based on a basepath // -// All refs inside response will be resolved relative to basePath +// All refs inside response will be resolved relative to basePath. func ExpandResponse(response *Response, basePath string) error { opts := optionsOrDefault(&ExpandOptions{ RelativeBase: basePath, @@ -491,7 +492,7 @@ func ExpandParameterWithRoot(parameter *Parameter, root any, cache ResolutionCac // ExpandParameter expands a parameter based on a basepath. // This is the exported version of expandParameter -// all refs inside parameter will be resolved relative to basePath +// all refs inside parameter will be resolved relative to basePath. func ExpandParameter(parameter *Parameter, basePath string) error { opts := optionsOrDefault(&ExpandOptions{ RelativeBase: basePath, @@ -565,7 +566,7 @@ func expandParameterOrResponse(input any, resolver *schemaLoader, basePath strin return nil } - if sch.Ref.String() != "" { + if sch.Ref.String() != "" { //nolint:nestif // intertwined ref rebasing and circularity check rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) if ern != nil { return ern diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go index ab251ef7659..599ba2c5d7e 100644 --- a/vendor/github.com/go-openapi/spec/header.go +++ b/vendor/github.com/go-openapi/spec/header.go @@ -15,7 +15,7 @@ const ( jsonArray = "array" ) -// HeaderProps describes a response header +// HeaderProps describes a response header. type HeaderProps struct { Description string `json:"description,omitempty"` } @@ -30,25 +30,25 @@ type Header struct { HeaderProps } -// ResponseHeader creates a new header instance for use in a response +// ResponseHeader creates a new header instance for use in a response. func ResponseHeader() *Header { return new(Header) } -// WithDescription sets the description on this response, allows for chaining +// WithDescription sets the description on this response, allows for chaining. func (h *Header) WithDescription(description string) *Header { h.Description = description return h } -// Typed a fluent builder method for the type of parameter +// Typed a fluent builder method for the type of parameter. func (h *Header) Typed(tpe, format string) *Header { h.Type = tpe h.Format = format return h } -// CollectionOf a fluent builder method for an array item +// CollectionOf a fluent builder method for an array item. func (h *Header) CollectionOf(items *Items, format string) *Header { h.Type = jsonArray h.Items = items @@ -56,87 +56,87 @@ func (h *Header) CollectionOf(items *Items, format string) *Header { return h } -// WithDefault sets the default value on this item +// WithDefault sets the default value on this item. func (h *Header) WithDefault(defaultValue any) *Header { h.Default = defaultValue return h } -// WithMaxLength sets a max length value +// WithMaxLength sets a max length value. func (h *Header) WithMaxLength(maximum int64) *Header { h.MaxLength = &maximum return h } -// WithMinLength sets a min length value +// WithMinLength sets a min length value. func (h *Header) WithMinLength(minimum int64) *Header { h.MinLength = &minimum return h } -// WithPattern sets a pattern value +// WithPattern sets a pattern value. func (h *Header) WithPattern(pattern string) *Header { h.Pattern = pattern return h } -// WithMultipleOf sets a multiple of value +// WithMultipleOf sets a multiple of value. func (h *Header) WithMultipleOf(number float64) *Header { h.MultipleOf = &number return h } -// WithMaximum sets a maximum number value +// WithMaximum sets a maximum number value. func (h *Header) WithMaximum(maximum float64, exclusive bool) *Header { h.Maximum = &maximum h.ExclusiveMaximum = exclusive return h } -// WithMinimum sets a minimum number value +// WithMinimum sets a minimum number value. func (h *Header) WithMinimum(minimum float64, exclusive bool) *Header { h.Minimum = &minimum h.ExclusiveMinimum = exclusive return h } -// WithEnum sets a the enum values (replace) +// WithEnum sets a the enum values (replace). func (h *Header) WithEnum(values ...any) *Header { h.Enum = append([]any{}, values...) return h } -// WithMaxItems sets the max items +// WithMaxItems sets the max items. func (h *Header) WithMaxItems(size int64) *Header { h.MaxItems = &size return h } -// WithMinItems sets the min items +// WithMinItems sets the min items. func (h *Header) WithMinItems(size int64) *Header { h.MinItems = &size return h } -// UniqueValues dictates that this array can only have unique items +// UniqueValues dictates that this array can only have unique items. func (h *Header) UniqueValues() *Header { h.UniqueItems = true return h } -// AllowDuplicates this array can have duplicates +// AllowDuplicates this array can have duplicates. func (h *Header) AllowDuplicates() *Header { h.UniqueItems = false return h } -// WithValidations is a fluent method to set header validations +// WithValidations is a fluent method to set header validations. func (h *Header) WithValidations(val CommonValidations) *Header { h.SetValidations(SchemaValidations{CommonValidations: val}) return h } -// MarshalJSON marshal this to JSON +// MarshalJSON marshal this to JSON. func (h Header) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(h.CommonValidations) if err != nil { @@ -153,7 +153,7 @@ func (h Header) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b1, b2, b3), nil } -// UnmarshalJSON unmarshals this header from JSON +// UnmarshalJSON unmarshals this header from JSON. func (h *Header) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &h.CommonValidations); err != nil { return err @@ -167,7 +167,7 @@ func (h *Header) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &h.HeaderProps) } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (h Header) JSONLookup(token string) (any, error) { if ex, ok := h.Extensions[token]; ok { return &ex, nil diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go index 9401065bbde..0ccfdcccd9e 100644 --- a/vendor/github.com/go-openapi/spec/info.go +++ b/vendor/github.com/go-openapi/spec/info.go @@ -12,16 +12,16 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// Extensions vendor specific extensions +// Extensions vendor specific extensions. type Extensions map[string]any -// Add adds a value to these extensions +// Add adds a value to these extensions. func (e Extensions) Add(key string, value any) { realKey := strings.ToLower(key) e[realKey] = value } -// GetString gets a string value from the extensions +// GetString gets a string value from the extensions. func (e Extensions) GetString(key string) (string, bool) { if v, ok := e[strings.ToLower(key)]; ok { str, ok := v.(string) @@ -30,7 +30,7 @@ func (e Extensions) GetString(key string) (string, bool) { return "", false } -// GetInt gets a int value from the extensions +// GetInt gets a int value from the extensions. func (e Extensions) GetInt(key string) (int, bool) { realKey := strings.ToLower(key) @@ -48,7 +48,7 @@ func (e Extensions) GetInt(key string) (int, bool) { return -1, false } -// GetBool gets a string value from the extensions +// GetBool gets a string value from the extensions. func (e Extensions) GetBool(key string) (bool, bool) { if v, ok := e[strings.ToLower(key)]; ok { str, ok := v.(bool) @@ -57,7 +57,7 @@ func (e Extensions) GetBool(key string) (bool, bool) { return false, false } -// GetStringSlice gets a string value from the extensions +// GetStringSlice gets a string value from the extensions. func (e Extensions) GetStringSlice(key string) ([]string, bool) { if v, ok := e[strings.ToLower(key)]; ok { arr, isSlice := v.([]any) @@ -82,7 +82,7 @@ type VendorExtensible struct { Extensions Extensions } -// AddExtension adds an extension to this extensible object +// AddExtension adds an extension to this extensible object. func (v *VendorExtensible) AddExtension(key string, value any) { if value == nil { return @@ -93,7 +93,7 @@ func (v *VendorExtensible) AddExtension(key string, value any) { v.Extensions.Add(key, value) } -// MarshalJSON marshals the extensions to json +// MarshalJSON marshals the extensions to json. func (v VendorExtensible) MarshalJSON() ([]byte, error) { toser := make(map[string]any) for k, v := range v.Extensions { @@ -105,7 +105,7 @@ func (v VendorExtensible) MarshalJSON() ([]byte, error) { return json.Marshal(toser) } -// UnmarshalJSON for this extensible object +// UnmarshalJSON for this extensible object. func (v *VendorExtensible) UnmarshalJSON(data []byte) error { var d map[string]any if err := json.Unmarshal(data, &d); err != nil { @@ -123,7 +123,7 @@ func (v *VendorExtensible) UnmarshalJSON(data []byte) error { return nil } -// InfoProps the properties for an info definition +// InfoProps the properties for an info definition. type InfoProps struct { Description string `json:"description,omitempty"` Title string `json:"title,omitempty"` @@ -142,7 +142,7 @@ type Info struct { InfoProps } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (i Info) JSONLookup(token string) (any, error) { if ex, ok := i.Extensions[token]; ok { return &ex, nil @@ -151,7 +151,7 @@ func (i Info) JSONLookup(token string) (any, error) { return r, err } -// MarshalJSON marshal this to JSON +// MarshalJSON marshal this to JSON. func (i Info) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(i.InfoProps) if err != nil { @@ -164,7 +164,7 @@ func (i Info) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b1, b2), nil } -// UnmarshalJSON marshal this from JSON +// UnmarshalJSON marshal this from JSON. func (i *Info) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &i.InfoProps); err != nil { return err diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go index d30ca3569b1..daf5a4fd421 100644 --- a/vendor/github.com/go-openapi/spec/items.go +++ b/vendor/github.com/go-openapi/spec/items.go @@ -15,7 +15,7 @@ const ( jsonRef = "$ref" ) -// SimpleSchema describe swagger simple schemas for parameters and headers +// SimpleSchema describe swagger simple schemas for parameters and headers. type SimpleSchema struct { Type string `json:"type,omitempty"` Nullable bool `json:"nullable,omitempty"` @@ -26,7 +26,7 @@ type SimpleSchema struct { Example any `json:"example,omitempty"` } -// TypeName return the type (or format) of a simple schema +// TypeName return the type (or format) of a simple schema. func (s *SimpleSchema) TypeName() string { if s.Format != "" { return s.Format @@ -34,7 +34,7 @@ func (s *SimpleSchema) TypeName() string { return s.Type } -// ItemsTypeName yields the type of items in a simple schema array +// ItemsTypeName yields the type of items in a simple schema array. func (s *SimpleSchema) ItemsTypeName() string { if s.Items == nil { return "" @@ -53,12 +53,12 @@ type Items struct { VendorExtensible } -// NewItems creates a new instance of items +// NewItems creates a new instance of items. func NewItems() *Items { return &Items{} } -// Typed a fluent builder method for the type of item +// Typed a fluent builder method for the type of item. func (i *Items) Typed(tpe, format string) *Items { i.Type = tpe i.Format = format @@ -71,7 +71,7 @@ func (i *Items) AsNullable() *Items { return i } -// CollectionOf a fluent builder method for an array item +// CollectionOf a fluent builder method for an array item. func (i *Items) CollectionOf(items *Items, format string) *Items { i.Type = jsonArray i.Items = items @@ -79,87 +79,87 @@ func (i *Items) CollectionOf(items *Items, format string) *Items { return i } -// WithDefault sets the default value on this item +// WithDefault sets the default value on this item. func (i *Items) WithDefault(defaultValue any) *Items { i.Default = defaultValue return i } -// WithMaxLength sets a max length value +// WithMaxLength sets a max length value. func (i *Items) WithMaxLength(maximum int64) *Items { i.MaxLength = &maximum return i } -// WithMinLength sets a min length value +// WithMinLength sets a min length value. func (i *Items) WithMinLength(minimum int64) *Items { i.MinLength = &minimum return i } -// WithPattern sets a pattern value +// WithPattern sets a pattern value. func (i *Items) WithPattern(pattern string) *Items { i.Pattern = pattern return i } -// WithMultipleOf sets a multiple of value +// WithMultipleOf sets a multiple of value. func (i *Items) WithMultipleOf(number float64) *Items { i.MultipleOf = &number return i } -// WithMaximum sets a maximum number value +// WithMaximum sets a maximum number value. func (i *Items) WithMaximum(maximum float64, exclusive bool) *Items { i.Maximum = &maximum i.ExclusiveMaximum = exclusive return i } -// WithMinimum sets a minimum number value +// WithMinimum sets a minimum number value. func (i *Items) WithMinimum(minimum float64, exclusive bool) *Items { i.Minimum = &minimum i.ExclusiveMinimum = exclusive return i } -// WithEnum sets a the enum values (replace) +// WithEnum sets a the enum values (replace). func (i *Items) WithEnum(values ...any) *Items { i.Enum = append([]any{}, values...) return i } -// WithMaxItems sets the max items +// WithMaxItems sets the max items. func (i *Items) WithMaxItems(size int64) *Items { i.MaxItems = &size return i } -// WithMinItems sets the min items +// WithMinItems sets the min items. func (i *Items) WithMinItems(size int64) *Items { i.MinItems = &size return i } -// UniqueValues dictates that this array can only have unique items +// UniqueValues dictates that this array can only have unique items. func (i *Items) UniqueValues() *Items { i.UniqueItems = true return i } -// AllowDuplicates this array can have duplicates +// AllowDuplicates this array can have duplicates. func (i *Items) AllowDuplicates() *Items { i.UniqueItems = false return i } -// WithValidations is a fluent method to set Items validations +// WithValidations is a fluent method to set Items validations. func (i *Items) WithValidations(val CommonValidations) *Items { i.SetValidations(SchemaValidations{CommonValidations: val}) return i } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (i *Items) UnmarshalJSON(data []byte) error { var validations CommonValidations if err := json.Unmarshal(data, &validations); err != nil { @@ -184,7 +184,7 @@ func (i *Items) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (i Items) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(i.CommonValidations) if err != nil { @@ -205,7 +205,7 @@ func (i Items) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b4, b3, b1, b2), nil } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (i Items) JSONLookup(token string) (any, error) { if token == jsonRef { return &i.Ref, nil diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go index 286b237e2bf..8209f218b56 100644 --- a/vendor/github.com/go-openapi/spec/license.go +++ b/vendor/github.com/go-openapi/spec/license.go @@ -17,13 +17,13 @@ type License struct { VendorExtensible } -// LicenseProps holds the properties of a License object +// LicenseProps holds the properties of a License object. type LicenseProps struct { Name string `json:"name,omitempty"` URL string `json:"url,omitempty"` } -// UnmarshalJSON hydrates License from json +// UnmarshalJSON hydrates License from json. func (l *License) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &l.LicenseProps); err != nil { return err @@ -31,7 +31,7 @@ func (l *License) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &l.VendorExtensible) } -// MarshalJSON produces License as json +// MarshalJSON produces License as json. func (l License) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(l.LicenseProps) if err != nil { diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go index e1d7c58d72b..68252dc30b8 100644 --- a/vendor/github.com/go-openapi/spec/normalizer.go +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -138,7 +138,7 @@ func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { return MustCreateRef(newBase.String()), true } -// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor +// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor. func normalizeRef(ref *Ref, relativeBase string) *Ref { r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) return &r diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go index 29d9c4f4822..cd70d2547ce 100644 --- a/vendor/github.com/go-openapi/spec/operation.go +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -13,7 +13,7 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -func init() { +func init() { //nolint:gochecknoinits // registers gob types for Operation serialization gob.Register(map[string]any{}) gob.Register([]any{}) } @@ -22,7 +22,7 @@ func init() { // // NOTES: // - schemes, when present must be from [http, https, ws, wss]: see validate -// - Security is handled as a special case: see MarshalJSON function +// - Security is handled as a special case: see MarshalJSON function. type OperationProps struct { Description string `json:"description,omitempty"` Consumes []string `json:"consumes,omitempty"` @@ -82,7 +82,7 @@ func NewOperation(id string) *Operation { return op } -// SuccessResponse gets a success response model +// SuccessResponse gets a success response model. func (o *Operation) SuccessResponse() (*Response, int, bool) { if o.Responses == nil { return nil, 0, false @@ -103,7 +103,7 @@ func (o *Operation) SuccessResponse() (*Response, int, bool) { return o.Responses.Default, 0, false } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (o Operation) JSONLookup(token string) (any, error) { if ex, ok := o.Extensions[token]; ok { return &ex, nil @@ -112,7 +112,7 @@ func (o Operation) JSONLookup(token string) (any, error) { return r, err } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (o *Operation) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &o.OperationProps); err != nil { return err @@ -120,7 +120,7 @@ func (o *Operation) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &o.VendorExtensible) } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (o Operation) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(o.OperationProps) if err != nil { @@ -140,13 +140,13 @@ func (o *Operation) WithID(id string) *Operation { return o } -// WithDescription sets the description on this operation, allows for chaining +// WithDescription sets the description on this operation, allows for chaining. func (o *Operation) WithDescription(description string) *Operation { o.Description = description return o } -// WithSummary sets the summary on this operation, allows for chaining +// WithSummary sets the summary on this operation, allows for chaining. func (o *Operation) WithSummary(summary string) *Operation { o.Summary = summary return o @@ -170,38 +170,38 @@ func (o *Operation) WithExternalDocs(description, url string) *Operation { return o } -// Deprecate marks the operation as deprecated +// Deprecate marks the operation as deprecated. func (o *Operation) Deprecate() *Operation { o.Deprecated = true return o } -// Undeprecate marks the operation as not deprecated +// Undeprecate marks the operation as not deprecated. func (o *Operation) Undeprecate() *Operation { o.Deprecated = false return o } -// WithConsumes adds media types for incoming body values +// WithConsumes adds media types for incoming body values. func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { o.Consumes = append(o.Consumes, mediaTypes...) return o } -// WithProduces adds media types for outgoing body values +// WithProduces adds media types for outgoing body values. func (o *Operation) WithProduces(mediaTypes ...string) *Operation { o.Produces = append(o.Produces, mediaTypes...) return o } -// WithTags adds tags for this operation +// WithTags adds tags for this operation. func (o *Operation) WithTags(tags ...string) *Operation { o.Tags = append(o.Tags, tags...) return o } // AddParam adds a parameter to this operation, when a parameter for that location -// and with that name already exists it will be replaced +// and with that name already exists it will be replaced. func (o *Operation) AddParam(param *Parameter) *Operation { if param == nil { return o @@ -223,7 +223,7 @@ func (o *Operation) AddParam(param *Parameter) *Operation { return o } -// RemoveParam removes a parameter from the operation +// RemoveParam removes a parameter from the operation. func (o *Operation) RemoveParam(name, in string) *Operation { for i, p := range o.Parameters { if p.Name == name && p.In == in { @@ -241,14 +241,14 @@ func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { } // WithDefaultResponse adds a default response to the operation. -// Passing a nil value will remove the response +// Passing a nil value will remove the response. func (o *Operation) WithDefaultResponse(response *Response) *Operation { return o.RespondsWith(0, response) } // RespondsWith adds a status code response to the operation. // When the code is 0 the value of the response will be used as default response value. -// When the value of the response is nil it will be removed from the operation +// When the value of the response is nil it will be removed from the operation. func (o *Operation) RespondsWith(code int, response *Response) *Operation { if o.Responses == nil { o.Responses = new(Responses) @@ -279,7 +279,7 @@ type gobAlias struct { SecurityIsEmpty bool } -// GobEncode provides a safe gob encoder for Operation, including empty security requirements +// GobEncode provides a safe gob encoder for Operation, including empty security requirements. func (o Operation) GobEncode() ([]byte, error) { raw := struct { Ext VendorExtensible @@ -293,7 +293,7 @@ func (o Operation) GobEncode() ([]byte, error) { return b.Bytes(), err } -// GobDecode provides a safe gob decoder for Operation, including empty security requirements +// GobDecode provides a safe gob decoder for Operation, including empty security requirements. func (o *Operation) GobDecode(b []byte) error { var raw struct { Ext VendorExtensible @@ -310,7 +310,7 @@ func (o *Operation) GobDecode(b []byte) error { return nil } -// GobEncode provides a safe gob encoder for Operation, including empty security requirements +// GobEncode provides a safe gob encoder for Operation, including empty security requirements. func (op OperationProps) GobEncode() ([]byte, error) { raw := gobAlias{ Alias: (*opsAlias)(&op), @@ -355,7 +355,7 @@ func (op OperationProps) GobEncode() ([]byte, error) { return b.Bytes(), err } -// GobDecode provides a safe gob decoder for Operation, including empty security requirements +// GobDecode provides a safe gob decoder for Operation, including empty security requirements. func (op *OperationProps) GobDecode(b []byte) error { var raw gobAlias diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index b94b7682ac8..516f5d95c56 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -11,45 +11,51 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// QueryParam creates a query parameter +// QueryParam creates a query parameter. func QueryParam(name string) *Parameter { return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} } -// HeaderParam creates a header parameter, this is always required by default +// HeaderParam creates a header parameter, this is always required by default. func HeaderParam(name string) *Parameter { return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} } -// PathParam creates a path parameter, this is always required +// PathParam creates a path parameter, this is always required. func PathParam(name string) *Parameter { return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} } -// BodyParam creates a body parameter +// BodyParam creates a body parameter. func BodyParam(name string, schema *Schema) *Parameter { return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} } -// FormDataParam creates a body parameter +// FormDataParam creates a body parameter. func FormDataParam(name string) *Parameter { return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} } -// FileParam creates a body parameter +// FileParam creates a body parameter. func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, - SimpleSchema: SimpleSchema{Type: "file"}} + return &Parameter{ + ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}, + } } -// SimpleArrayParam creates a param for a simple array (string, int, date etc) +// SimpleArrayParam creates a param for a simple array (string, int, date etc). func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, - SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", - Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} + return &Parameter{ + ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{ + Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}, + }, + } } -// ParamRef creates a parameter that's a json reference +// ParamRef creates a parameter that's a json reference. func ParamRef(uri string) *Parameter { p := new(Parameter) p.Ref = MustCreateRef(uri) @@ -60,7 +66,7 @@ func ParamRef(uri string) *Parameter { // // NOTE: // - Schema is defined when "in" == "body": see validate -// - AllowEmptyValue is allowed where "in" == "query" || "formData" +// - AllowEmptyValue is allowed where "in" == "query" || "formData". type ParamProps struct { Description string `json:"description,omitempty"` Name string `json:"name,omitempty"` @@ -104,7 +110,7 @@ type Parameter struct { ParamProps } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (p Parameter) JSONLookup(token string) (any, error) { if ex, ok := p.Extensions[token]; ok { return &ex, nil @@ -131,32 +137,32 @@ func (p Parameter) JSONLookup(token string) (any, error) { return r, err } -// WithDescription a fluent builder method for the description of the parameter +// WithDescription a fluent builder method for the description of the parameter. func (p *Parameter) WithDescription(description string) *Parameter { p.Description = description return p } -// Named a fluent builder method to override the name of the parameter +// Named a fluent builder method to override the name of the parameter. func (p *Parameter) Named(name string) *Parameter { p.Name = name return p } -// WithLocation a fluent builder method to override the location of the parameter +// WithLocation a fluent builder method to override the location of the parameter. func (p *Parameter) WithLocation(in string) *Parameter { p.In = in return p } -// Typed a fluent builder method for the type of the parameter value +// Typed a fluent builder method for the type of the parameter value. func (p *Parameter) Typed(tpe, format string) *Parameter { p.Type = tpe p.Format = format return p } -// CollectionOf a fluent builder method for an array parameter +// CollectionOf a fluent builder method for an array parameter. func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { p.Type = jsonArray p.Items = items @@ -164,32 +170,32 @@ func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { return p } -// WithDefault sets the default value on this parameter +// WithDefault sets the default value on this parameter. func (p *Parameter) WithDefault(defaultValue any) *Parameter { p.AsOptional() // with default implies optional p.Default = defaultValue return p } -// AllowsEmptyValues flags this parameter as being ok with empty values +// AllowsEmptyValues flags this parameter as being ok with empty values. func (p *Parameter) AllowsEmptyValues() *Parameter { p.AllowEmptyValue = true return p } -// NoEmptyValues flags this parameter as not liking empty values +// NoEmptyValues flags this parameter as not liking empty values. func (p *Parameter) NoEmptyValues() *Parameter { p.AllowEmptyValue = false return p } -// AsOptional flags this parameter as optional +// AsOptional flags this parameter as optional. func (p *Parameter) AsOptional() *Parameter { p.Required = false return p } -// AsRequired flags this parameter as required +// AsRequired flags this parameter as required. func (p *Parameter) AsRequired() *Parameter { if p.Default != nil { // with a default required makes no sense return p @@ -198,81 +204,81 @@ func (p *Parameter) AsRequired() *Parameter { return p } -// WithMaxLength sets a max length value +// WithMaxLength sets a max length value. func (p *Parameter) WithMaxLength(maximum int64) *Parameter { p.MaxLength = &maximum return p } -// WithMinLength sets a min length value +// WithMinLength sets a min length value. func (p *Parameter) WithMinLength(minimum int64) *Parameter { p.MinLength = &minimum return p } -// WithPattern sets a pattern value +// WithPattern sets a pattern value. func (p *Parameter) WithPattern(pattern string) *Parameter { p.Pattern = pattern return p } -// WithMultipleOf sets a multiple of value +// WithMultipleOf sets a multiple of value. func (p *Parameter) WithMultipleOf(number float64) *Parameter { p.MultipleOf = &number return p } -// WithMaximum sets a maximum number value +// WithMaximum sets a maximum number value. func (p *Parameter) WithMaximum(maximum float64, exclusive bool) *Parameter { p.Maximum = &maximum p.ExclusiveMaximum = exclusive return p } -// WithMinimum sets a minimum number value +// WithMinimum sets a minimum number value. func (p *Parameter) WithMinimum(minimum float64, exclusive bool) *Parameter { p.Minimum = &minimum p.ExclusiveMinimum = exclusive return p } -// WithEnum sets a the enum values (replace) +// WithEnum sets a the enum values (replace). func (p *Parameter) WithEnum(values ...any) *Parameter { p.Enum = append([]any{}, values...) return p } -// WithMaxItems sets the max items +// WithMaxItems sets the max items. func (p *Parameter) WithMaxItems(size int64) *Parameter { p.MaxItems = &size return p } -// WithMinItems sets the min items +// WithMinItems sets the min items. func (p *Parameter) WithMinItems(size int64) *Parameter { p.MinItems = &size return p } -// UniqueValues dictates that this array can only have unique items +// UniqueValues dictates that this array can only have unique items. func (p *Parameter) UniqueValues() *Parameter { p.UniqueItems = true return p } -// AllowDuplicates this array can have duplicates +// AllowDuplicates this array can have duplicates. func (p *Parameter) AllowDuplicates() *Parameter { p.UniqueItems = false return p } -// WithValidations is a fluent method to set parameter validations +// WithValidations is a fluent method to set parameter validations. func (p *Parameter) WithValidations(val CommonValidations) *Parameter { p.SetValidations(SchemaValidations{CommonValidations: val}) return p } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (p *Parameter) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &p.CommonValidations); err != nil { return err @@ -289,7 +295,7 @@ func (p *Parameter) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &p.ParamProps) } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (p Parameter) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(p.CommonValidations) if err != nil { diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go index c692b89e46c..4408ece4654 100644 --- a/vendor/github.com/go-openapi/spec/path_item.go +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -10,7 +10,7 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// PathItemProps the path item specific properties +// PathItemProps the path item specific properties. type PathItemProps struct { Get *Operation `json:"get,omitempty"` Put *Operation `json:"put,omitempty"` @@ -34,7 +34,7 @@ type PathItem struct { PathItemProps } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (p PathItem) JSONLookup(token string) (any, error) { if ex, ok := p.Extensions[token]; ok { return &ex, nil @@ -46,7 +46,7 @@ func (p PathItem) JSONLookup(token string) (any, error) { return r, err } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (p *PathItem) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &p.Refable); err != nil { return err @@ -57,7 +57,7 @@ func (p *PathItem) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &p.PathItemProps) } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (p PathItem) MarshalJSON() ([]byte, error) { b3, err := json.Marshal(p.Refable) if err != nil { diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go index b9e42184b19..5daf5a6709f 100644 --- a/vendor/github.com/go-openapi/spec/paths.go +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -23,7 +23,7 @@ type Paths struct { Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (p Paths) JSONLookup(token string) (any, error) { if pi, ok := p.Paths[token]; ok { return &pi, nil @@ -34,7 +34,7 @@ func (p Paths) JSONLookup(token string) (any, error) { return nil, fmt.Errorf("object has no field %q: %w", token, ErrSpec) } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (p *Paths) UnmarshalJSON(data []byte) error { var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { @@ -65,7 +65,7 @@ func (p *Paths) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (p Paths) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(p.VendorExtensible) if err != nil { diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go index c4988180675..b8e97271e19 100644 --- a/vendor/github.com/go-openapi/spec/properties.go +++ b/vendor/github.com/go-openapi/spec/properties.go @@ -10,7 +10,7 @@ import ( "sort" ) -// OrderSchemaItem holds a named schema (e.g. from a property of an object) +// OrderSchemaItem holds a named schema (e.g. from a property of an object). type OrderSchemaItem struct { Schema @@ -53,7 +53,7 @@ func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], it func (items OrderSchemaItems) Less(i, j int) (ret bool) { ii, oki := items[i].Extensions.GetInt("x-order") ij, okj := items[j].Extensions.GetInt("x-order") - if oki { + if oki { //nolint:nestif // nested recover logic for safe type comparison if okj { defer func() { if err := recover(); err != nil { @@ -94,7 +94,7 @@ func (items OrderSchemaItems) marshalJSONItem(item OrderSchemaItem, output *byte // It knows how to transform its keys into an ordered slice. type SchemaProperties map[string]Schema -// ToOrderedSchemaItems transforms the map of properties into a sortable slice +// ToOrderedSchemaItems transforms the map of properties into a sortable slice. func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { items := make(OrderSchemaItems, 0, len(properties)) for k, v := range properties { diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go index 1d1c759174a..40b7d486c95 100644 --- a/vendor/github.com/go-openapi/spec/ref.go +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -14,28 +14,28 @@ import ( "github.com/go-openapi/jsonreference" ) -// Refable is a struct for things that accept a $ref property +// Refable is a struct for things that accept a $ref property. type Refable struct { Ref Ref } -// MarshalJSON marshals the ref to json +// MarshalJSON marshals the ref to json. func (r Refable) MarshalJSON() ([]byte, error) { return r.Ref.MarshalJSON() } -// UnmarshalJSON unmarshals the ref from json +// UnmarshalJSON unmarshals the ref from json. func (r *Refable) UnmarshalJSON(d []byte) error { return json.Unmarshal(d, &r.Ref) } -// Ref represents a json reference that is potentially resolved +// Ref represents a json reference that is potentially resolved. type Ref struct { jsonreference.Ref } // NewRef creates a new instance of a ref object -// returns an error when the reference uri is an invalid uri +// returns an error when the reference uri is an invalid uri. func NewRef(refURI string) (Ref, error) { ref, err := jsonreference.New(refURI) if err != nil { @@ -51,7 +51,7 @@ func MustCreateRef(refURI string) Ref { return Ref{Ref: jsonreference.MustCreateRef(refURI)} } -// RemoteURI gets the remote uri part of the ref +// RemoteURI gets the remote uri part of the ref. func (r *Ref) RemoteURI() string { if r.String() == "" { return "" @@ -62,7 +62,7 @@ func (r *Ref) RemoteURI() string { return u.String() } -// IsValidURI returns true when the url the ref points to can be found +// IsValidURI returns true when the url the ref points to can be found. func (r *Ref) IsValidURI(basepaths ...string) bool { if r.String() == "" { return true @@ -112,7 +112,7 @@ func (r *Ref) IsValidURI(basepaths ...string) bool { } // Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned +// If the child cannot inherit from the parent, an error is returned. func (r *Ref) Inherits(child Ref) (*Ref, error) { ref, err := r.Ref.Inherits(child.Ref) if err != nil { @@ -121,7 +121,7 @@ func (r *Ref) Inherits(child Ref) (*Ref, error) { return &Ref{Ref: *ref}, nil } -// MarshalJSON marshals this ref into a JSON object +// MarshalJSON marshals this ref into a JSON object. func (r Ref) MarshalJSON() ([]byte, error) { str := r.String() if str == "" { @@ -134,7 +134,7 @@ func (r Ref) MarshalJSON() ([]byte, error) { return json.Marshal(v) } -// UnmarshalJSON unmarshals this ref from a JSON object +// UnmarshalJSON unmarshals this ref from a JSON object. func (r *Ref) UnmarshalJSON(d []byte) error { var v map[string]any if err := json.Unmarshal(d, &v); err != nil { @@ -143,7 +143,7 @@ func (r *Ref) UnmarshalJSON(d []byte) error { return r.fromMap(v) } -// GobEncode provides a safe gob encoder for Ref +// GobEncode provides a safe gob encoder for Ref. func (r Ref) GobEncode() ([]byte, error) { var b bytes.Buffer raw, err := r.MarshalJSON() @@ -154,7 +154,7 @@ func (r Ref) GobEncode() ([]byte, error) { return b.Bytes(), err } -// GobDecode provides a safe gob decoder for Ref +// GobDecode provides a safe gob decoder for Ref. func (r *Ref) GobDecode(b []byte) error { var raw []byte buf := bytes.NewBuffer(b) diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go index 600574e1189..1bf90c86828 100644 --- a/vendor/github.com/go-openapi/spec/resolver.go +++ b/vendor/github.com/go-openapi/spec/resolver.go @@ -20,7 +20,7 @@ func resolveAnyWithBase(root any, ref *Ref, result any, options *ExpandOptions) return nil } -// ResolveRefWithBase resolves a reference against a context root with preservation of base path +// ResolveRefWithBase resolves a reference against a context root with preservation of base path. func ResolveRefWithBase(root any, ref *Ref, options *ExpandOptions) (*Schema, error) { result := new(Schema) @@ -34,7 +34,7 @@ func ResolveRefWithBase(root any, ref *Ref, options *ExpandOptions) (*Schema, er // ResolveRef resolves a reference for a schema against a context root // ref is guaranteed to be in root (no need to go to external files) // -// ResolveRef is ONLY called from the code generation module +// ResolveRef is ONLY called from the code generation module. func ResolveRef(root any, ref *Ref) (*Schema, error) { res, _, err := ref.GetPointer().Get(root) if err != nil { @@ -57,7 +57,7 @@ func ResolveRef(root any, ref *Ref) (*Schema, error) { } } -// ResolveParameterWithBase resolves a parameter reference against a context root and base path +// ResolveParameterWithBase resolves a parameter reference against a context root and base path. func ResolveParameterWithBase(root any, ref Ref, options *ExpandOptions) (*Parameter, error) { result := new(Parameter) @@ -68,12 +68,12 @@ func ResolveParameterWithBase(root any, ref Ref, options *ExpandOptions) (*Param return result, nil } -// ResolveParameter resolves a parameter reference against a context root +// ResolveParameter resolves a parameter reference against a context root. func ResolveParameter(root any, ref Ref) (*Parameter, error) { return ResolveParameterWithBase(root, ref, nil) } -// ResolveResponseWithBase resolves response a reference against a context root and base path +// ResolveResponseWithBase resolves response a reference against a context root and base path. func ResolveResponseWithBase(root any, ref Ref, options *ExpandOptions) (*Response, error) { result := new(Response) @@ -85,12 +85,12 @@ func ResolveResponseWithBase(root any, ref Ref, options *ExpandOptions) (*Respon return result, nil } -// ResolveResponse resolves response a reference against a context root +// ResolveResponse resolves response a reference against a context root. func ResolveResponse(root any, ref Ref) (*Response, error) { return ResolveResponseWithBase(root, ref, nil) } -// ResolvePathItemWithBase resolves response a path item against a context root and base path +// ResolvePathItemWithBase resolves response a path item against a context root and base path. func ResolvePathItemWithBase(root any, ref Ref, options *ExpandOptions) (*PathItem, error) { result := new(PathItem) @@ -103,7 +103,7 @@ func ResolvePathItemWithBase(root any, ref Ref, options *ExpandOptions) (*PathIt // ResolvePathItem resolves response a path item against a context root and base path // -// Deprecated: use ResolvePathItemWithBase instead +// Deprecated: use ResolvePathItemWithBase instead. func ResolvePathItem(root any, ref Ref, options *ExpandOptions) (*PathItem, error) { return ResolvePathItemWithBase(root, ref, options) } @@ -124,7 +124,7 @@ func ResolveItemsWithBase(root any, ref Ref, options *ExpandOptions) (*Items, er // ResolveItems resolves parameter items reference against a context root and base path. // -// Deprecated: use ResolveItemsWithBase instead +// Deprecated: use ResolveItemsWithBase instead. func ResolveItems(root any, ref Ref, options *ExpandOptions) (*Items, error) { return ResolveItemsWithBase(root, ref, options) } diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go index e5a7e5c40d4..4bb6a2bcd24 100644 --- a/vendor/github.com/go-openapi/spec/response.go +++ b/vendor/github.com/go-openapi/spec/response.go @@ -10,7 +10,7 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// ResponseProps properties specific to a response +// ResponseProps properties specific to a response. type ResponseProps struct { Description string `json:"description"` Schema *Schema `json:"schema,omitempty"` @@ -27,19 +27,19 @@ type Response struct { VendorExtensible } -// NewResponse creates a new response instance +// NewResponse creates a new response instance. func NewResponse() *Response { return new(Response) } -// ResponseRef creates a response as a json reference +// ResponseRef creates a response as a json reference. func ResponseRef(url string) *Response { resp := NewResponse() resp.Ref = MustCreateRef(url) return resp } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (r Response) JSONLookup(token string) (any, error) { if ex, ok := r.Extensions[token]; ok { return &ex, nil @@ -51,7 +51,7 @@ func (r Response) JSONLookup(token string) (any, error) { return ptr, err } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.ResponseProps); err != nil { return err @@ -62,7 +62,7 @@ func (r *Response) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &r.VendorExtensible) } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (r Response) MarshalJSON() ([]byte, error) { var ( b1 []byte @@ -100,20 +100,20 @@ func (r Response) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b1, b2, b3), nil } -// WithDescription sets the description on this response, allows for chaining +// WithDescription sets the description on this response, allows for chaining. func (r *Response) WithDescription(description string) *Response { r.Description = description return r } // WithSchema sets the schema on this response, allows for chaining. -// Passing a nil argument removes the schema from this response +// Passing a nil argument removes the schema from this response. func (r *Response) WithSchema(schema *Schema) *Response { r.Schema = schema return r } -// AddHeader adds a header to this response +// AddHeader adds a header to this response. func (r *Response) AddHeader(name string, header *Header) *Response { if header == nil { return r.RemoveHeader(name) @@ -125,13 +125,13 @@ func (r *Response) AddHeader(name string, header *Header) *Response { return r } -// RemoveHeader removes a header from this response +// RemoveHeader removes a header from this response. func (r *Response) RemoveHeader(name string) *Response { delete(r.Headers, name) return r } -// AddExample adds an example to this response +// AddExample adds an example to this response. func (r *Response) AddExample(mediaType string, example any) *Response { if r.Examples == nil { r.Examples = make(map[string]any) diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go index 733a1315d02..fb369e4a6b0 100644 --- a/vendor/github.com/go-openapi/spec/responses.go +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -31,7 +31,7 @@ type Responses struct { ResponsesProps } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (r Responses) JSONLookup(token string) (any, error) { if token == "default" { return r.Default, nil @@ -47,7 +47,7 @@ func (r Responses) JSONLookup(token string) (any, error) { return nil, fmt.Errorf("object has no field %q: %w", token, ErrSpec) } -// UnmarshalJSON hydrates this items instance with the data from JSON +// UnmarshalJSON hydrates this items instance with the data from JSON. func (r *Responses) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { return err @@ -62,7 +62,7 @@ func (r *Responses) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON converts this items object to JSON +// MarshalJSON converts this items object to JSON. func (r Responses) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(r.ResponsesProps) if err != nil { @@ -84,7 +84,7 @@ type ResponsesProps struct { StatusCodeResponses map[int]Response } -// MarshalJSON marshals responses as JSON +// MarshalJSON marshals responses as JSON. func (r ResponsesProps) MarshalJSON() ([]byte, error) { toser := map[string]Response{} if r.Default != nil { @@ -96,7 +96,7 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { return json.Marshal(toser) } -// UnmarshalJSON unmarshals responses from JSON +// UnmarshalJSON unmarshals responses from JSON. func (r *ResponsesProps) UnmarshalJSON(data []byte) error { var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go index 6623728a41a..d7a481bf1ac 100644 --- a/vendor/github.com/go-openapi/spec/schema.go +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -13,86 +13,88 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// BooleanProperty creates a boolean property +// BooleanProperty creates a boolean property. func BooleanProperty() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} } -// BoolProperty creates a boolean property +// BoolProperty creates a boolean property. func BoolProperty() *Schema { return BooleanProperty() } -// StringProperty creates a string property +// StringProperty creates a string property. func StringProperty() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} } -// CharProperty creates a string property +// CharProperty creates a string property. func CharProperty() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} } -// Float64Property creates a float64/double property +// Float64Property creates a float64/double property. func Float64Property() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} } -// Float32Property creates a float32/float property +// Float32Property creates a float32/float property. func Float32Property() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} } -// Int8Property creates an int8 property +// Int8Property creates an int8 property. func Int8Property() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} } -// Int16Property creates an int16 property +// Int16Property creates an int16 property. func Int16Property() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} } -// Int32Property creates an int32 property +// Int32Property creates an int32 property. func Int32Property() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} } -// Int64Property creates an int64 property +// Int64Property creates an int64 property. func Int64Property() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} } -// StrFmtProperty creates a property for the named string format +// StrFmtProperty creates a property for the named string format. func StrFmtProperty(format string) *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} } -// DateProperty creates a date property +// DateProperty creates a date property. func DateProperty() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} } -// DateTimeProperty creates a date time property +// DateTimeProperty creates a date time property. func DateTimeProperty() *Schema { return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} } -// MapProperty creates a map property +// MapProperty creates a map property. func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, - AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} + return &Schema{SchemaProps: SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}, + }} } -// RefProperty creates a ref property +// RefProperty creates a ref property. func RefProperty(name string) *Schema { return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} } -// RefSchema creates a ref property +// RefSchema creates a ref property. func RefSchema(name string) *Schema { return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} } -// ArrayProperty creates an array property +// ArrayProperty creates an array property. func ArrayProperty(items *Schema) *Schema { if items == nil { return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} @@ -100,17 +102,17 @@ func ArrayProperty(items *Schema) *Schema { return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} } -// ComposedSchema creates a schema with allOf +// ComposedSchema creates a schema with allOf. func ComposedSchema(schemas ...Schema) *Schema { s := new(Schema) s.AllOf = schemas return s } -// SchemaURL represents a schema url +// SchemaURL represents a schema url. type SchemaURL string -// MarshalJSON marshal this to JSON +// MarshalJSON marshal this to JSON. func (r SchemaURL) MarshalJSON() ([]byte, error) { if r == "" { return []byte("{}"), nil @@ -119,7 +121,7 @@ func (r SchemaURL) MarshalJSON() ([]byte, error) { return json.Marshal(v) } -// UnmarshalJSON unmarshal this from JSON +// UnmarshalJSON unmarshal this from JSON. func (r *SchemaURL) UnmarshalJSON(data []byte) error { var v map[string]any if err := json.Unmarshal(data, &v); err != nil { @@ -145,7 +147,7 @@ func (r *SchemaURL) fromMap(v map[string]any) error { return nil } -// SchemaProps describes a JSON schema (draft 4) +// SchemaProps describes a JSON schema (draft 4). type SchemaProps struct { ID string `json:"id,omitempty"` Ref Ref `json:"-"` @@ -184,7 +186,7 @@ type SchemaProps struct { Definitions Definitions `json:"definitions,omitempty"` } -// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4). type SwaggerSchemaProps struct { Discriminator string `json:"discriminator,omitempty"` ReadOnly bool `json:"readOnly,omitempty"` @@ -208,7 +210,7 @@ type Schema struct { ExtraProps map[string]any `json:"-"` } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (s Schema) JSONLookup(token string) (any, error) { if ex, ok := s.Extensions[token]; ok { return &ex, nil @@ -226,31 +228,31 @@ func (s Schema) JSONLookup(token string) (any, error) { return r, err } -// WithID sets the id for this schema, allows for chaining +// WithID sets the id for this schema, allows for chaining. func (s *Schema) WithID(id string) *Schema { s.ID = id return s } -// WithTitle sets the title for this schema, allows for chaining +// WithTitle sets the title for this schema, allows for chaining. func (s *Schema) WithTitle(title string) *Schema { s.Title = title return s } -// WithDescription sets the description for this schema, allows for chaining +// WithDescription sets the description for this schema, allows for chaining. func (s *Schema) WithDescription(description string) *Schema { s.Description = description return s } -// WithProperties sets the properties for this schema +// WithProperties sets the properties for this schema. func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { s.Properties = schemas return s } -// SetProperty sets a property on this schema +// SetProperty sets a property on this schema. func (s *Schema) SetProperty(name string, schema Schema) *Schema { if s.Properties == nil { s.Properties = make(map[string]Schema) @@ -259,32 +261,32 @@ func (s *Schema) SetProperty(name string, schema Schema) *Schema { return s } -// WithAllOf sets the all of property +// WithAllOf sets the all of property. func (s *Schema) WithAllOf(schemas ...Schema) *Schema { s.AllOf = schemas return s } -// WithMaxProperties sets the max number of properties an object can have +// WithMaxProperties sets the max number of properties an object can have. func (s *Schema) WithMaxProperties(maximum int64) *Schema { s.MaxProperties = &maximum return s } -// WithMinProperties sets the min number of properties an object must have +// WithMinProperties sets the min number of properties an object must have. func (s *Schema) WithMinProperties(minimum int64) *Schema { s.MinProperties = &minimum return s } -// Typed sets the type of this schema for a single value item +// Typed sets the type of this schema for a single value item. func (s *Schema) Typed(tpe, format string) *Schema { s.Type = []string{tpe} s.Format = format return s } -// AddType adds a type with potential format to the types for this schema +// AddType adds a type with potential format to the types for this schema. func (s *Schema) AddType(tpe, format string) *Schema { s.Type = append(s.Type, tpe) if format != "" { @@ -299,124 +301,124 @@ func (s *Schema) AsNullable() *Schema { return s } -// CollectionOf a fluent builder method for an array parameter +// CollectionOf a fluent builder method for an array parameter. func (s *Schema) CollectionOf(items Schema) *Schema { s.Type = []string{jsonArray} s.Items = &SchemaOrArray{Schema: &items} return s } -// WithDefault sets the default value on this parameter +// WithDefault sets the default value on this parameter. func (s *Schema) WithDefault(defaultValue any) *Schema { s.Default = defaultValue return s } -// WithRequired flags this parameter as required +// WithRequired flags this parameter as required. func (s *Schema) WithRequired(items ...string) *Schema { s.Required = items return s } -// AddRequired adds field names to the required properties array +// AddRequired adds field names to the required properties array. func (s *Schema) AddRequired(items ...string) *Schema { s.Required = append(s.Required, items...) return s } -// WithMaxLength sets a max length value +// WithMaxLength sets a max length value. func (s *Schema) WithMaxLength(maximum int64) *Schema { s.MaxLength = &maximum return s } -// WithMinLength sets a min length value +// WithMinLength sets a min length value. func (s *Schema) WithMinLength(minimum int64) *Schema { s.MinLength = &minimum return s } -// WithPattern sets a pattern value +// WithPattern sets a pattern value. func (s *Schema) WithPattern(pattern string) *Schema { s.Pattern = pattern return s } -// WithMultipleOf sets a multiple of value +// WithMultipleOf sets a multiple of value. func (s *Schema) WithMultipleOf(number float64) *Schema { s.MultipleOf = &number return s } -// WithMaximum sets a maximum number value +// WithMaximum sets a maximum number value. func (s *Schema) WithMaximum(maximum float64, exclusive bool) *Schema { s.Maximum = &maximum s.ExclusiveMaximum = exclusive return s } -// WithMinimum sets a minimum number value +// WithMinimum sets a minimum number value. func (s *Schema) WithMinimum(minimum float64, exclusive bool) *Schema { s.Minimum = &minimum s.ExclusiveMinimum = exclusive return s } -// WithEnum sets a the enum values (replace) +// WithEnum sets a the enum values (replace). func (s *Schema) WithEnum(values ...any) *Schema { s.Enum = append([]any{}, values...) return s } -// WithMaxItems sets the max items +// WithMaxItems sets the max items. func (s *Schema) WithMaxItems(size int64) *Schema { s.MaxItems = &size return s } -// WithMinItems sets the min items +// WithMinItems sets the min items. func (s *Schema) WithMinItems(size int64) *Schema { s.MinItems = &size return s } -// UniqueValues dictates that this array can only have unique items +// UniqueValues dictates that this array can only have unique items. func (s *Schema) UniqueValues() *Schema { s.UniqueItems = true return s } -// AllowDuplicates this array can have duplicates +// AllowDuplicates this array can have duplicates. func (s *Schema) AllowDuplicates() *Schema { s.UniqueItems = false return s } -// AddToAllOf adds a schema to the allOf property +// AddToAllOf adds a schema to the allOf property. func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { s.AllOf = append(s.AllOf, schemas...) return s } -// WithDiscriminator sets the name of the discriminator field +// WithDiscriminator sets the name of the discriminator field. func (s *Schema) WithDiscriminator(discriminator string) *Schema { s.Discriminator = discriminator return s } -// AsReadOnly flags this schema as readonly +// AsReadOnly flags this schema as readonly. func (s *Schema) AsReadOnly() *Schema { s.ReadOnly = true return s } -// AsWritable flags this schema as writeable (not read-only) +// AsWritable flags this schema as writeable (not read-only). func (s *Schema) AsWritable() *Schema { s.ReadOnly = false return s } -// WithExample sets the example for this schema +// WithExample sets the example for this schema. func (s *Schema) WithExample(example any) *Schema { s.Example = example return s @@ -440,7 +442,7 @@ func (s *Schema) WithExternalDocs(description, url string) *Schema { return s } -// WithXMLName sets the xml name for the object +// WithXMLName sets the xml name for the object. func (s *Schema) WithXMLName(name string) *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -449,7 +451,7 @@ func (s *Schema) WithXMLName(name string) *Schema { return s } -// WithXMLNamespace sets the xml namespace for the object +// WithXMLNamespace sets the xml namespace for the object. func (s *Schema) WithXMLNamespace(namespace string) *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -458,7 +460,7 @@ func (s *Schema) WithXMLNamespace(namespace string) *Schema { return s } -// WithXMLPrefix sets the xml prefix for the object +// WithXMLPrefix sets the xml prefix for the object. func (s *Schema) WithXMLPrefix(prefix string) *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -467,7 +469,7 @@ func (s *Schema) WithXMLPrefix(prefix string) *Schema { return s } -// AsXMLAttribute flags this object as xml attribute +// AsXMLAttribute flags this object as xml attribute. func (s *Schema) AsXMLAttribute() *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -476,7 +478,7 @@ func (s *Schema) AsXMLAttribute() *Schema { return s } -// AsXMLElement flags this object as an xml node +// AsXMLElement flags this object as an xml node. func (s *Schema) AsXMLElement() *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -485,7 +487,7 @@ func (s *Schema) AsXMLElement() *Schema { return s } -// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types. func (s *Schema) AsWrappedXML() *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -494,7 +496,7 @@ func (s *Schema) AsWrappedXML() *Schema { return s } -// AsUnwrappedXML flags this object as an xml node +// AsUnwrappedXML flags this object as an xml node. func (s *Schema) AsUnwrappedXML() *Schema { if s.XML == nil { s.XML = new(XMLObject) @@ -524,13 +526,13 @@ func (s *Schema) SetValidations(val SchemaValidations) { s.PatternProperties = val.PatternProperties } -// WithValidations is a fluent method to set schema validations +// WithValidations is a fluent method to set schema validations. func (s *Schema) WithValidations(val SchemaValidations) *Schema { s.SetValidations(val) return s } -// Validations returns a clone of the validations for this schema +// Validations returns a clone of the validations for this schema. func (s Schema) Validations() SchemaValidations { return SchemaValidations{ CommonValidations: CommonValidations{ @@ -553,40 +555,40 @@ func (s Schema) Validations() SchemaValidations { } } -// MarshalJSON marshal this to JSON +// MarshalJSON marshal this to JSON. func (s Schema) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(s.SchemaProps) if err != nil { - return nil, fmt.Errorf("schema props %v: %w", err, ErrSpec) + return nil, fmt.Errorf("schema props %w: %w", err, ErrSpec) } b2, err := json.Marshal(s.VendorExtensible) if err != nil { - return nil, fmt.Errorf("vendor props %v: %w", err, ErrSpec) + return nil, fmt.Errorf("vendor props %w: %w", err, ErrSpec) } b3, err := s.Ref.MarshalJSON() if err != nil { - return nil, fmt.Errorf("ref prop %v: %w", err, ErrSpec) + return nil, fmt.Errorf("ref prop %w: %w", err, ErrSpec) } b4, err := s.Schema.MarshalJSON() if err != nil { - return nil, fmt.Errorf("schema prop %v: %w", err, ErrSpec) + return nil, fmt.Errorf("schema prop %w: %w", err, ErrSpec) } b5, err := json.Marshal(s.SwaggerSchemaProps) if err != nil { - return nil, fmt.Errorf("common validations %v: %w", err, ErrSpec) + return nil, fmt.Errorf("common validations %w: %w", err, ErrSpec) } var b6 []byte if s.ExtraProps != nil { jj, err := json.Marshal(s.ExtraProps) if err != nil { - return nil, fmt.Errorf("extra props %v: %w", err, ErrSpec) + return nil, fmt.Errorf("extra props %w: %w", err, ErrSpec) } b6 = jj } return jsonutils.ConcatJSON(b1, b2, b3, b4, b5, b6), nil } -// UnmarshalJSON marshal this from JSON +// UnmarshalJSON marshal this from JSON. func (s *Schema) UnmarshalJSON(data []byte) error { props := struct { SchemaProps diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go index 8d4a9853256..0894c932c6b 100644 --- a/vendor/github.com/go-openapi/spec/schema_loader.go +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -24,7 +24,7 @@ import ( // NOTE: if you are using the go-openapi/loads package, it will override // this value with its own default (a loader to retrieve YAML documents as // well as JSON ones). -var PathLoader = func(pth string) (json.RawMessage, error) { +var PathLoader = func(pth string) (json.RawMessage, error) { //nolint:gochecknoglobals // package-level default loader, overridable by go-openapi/loads data, err := loading.LoadFromFileOrHTTP(pth) if err != nil { return nil, err @@ -76,7 +76,7 @@ type schemaLoader struct { // // If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. // -// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct. func (r *schemaLoader) Resolve(ref *Ref, target any, basePath string) error { return r.resolveRef(ref, target, basePath) } @@ -136,7 +136,7 @@ func (r *schemaLoader) resolveRef(ref *Ref, target any, basePath string) error { root := r.root if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { if baseRef, erb := NewRef(basePath); erb == nil { - root, _, _, _ = r.load(baseRef.GetURL()) + root, _ = r.load(baseRef.GetURL()) } } @@ -144,7 +144,7 @@ func (r *schemaLoader) resolveRef(ref *Ref, target any, basePath string) error { data = root } else { baseRef := normalizeRef(ref, basePath) - data, _, _, err = r.load(baseRef.GetURL()) + data, err = r.load(baseRef.GetURL()) if err != nil { return err } @@ -160,33 +160,32 @@ func (r *schemaLoader) resolveRef(ref *Ref, target any, basePath string) error { return jsonutils.FromDynamicJSON(res, target) } -func (r *schemaLoader) load(refURL *url.URL) (any, url.URL, bool, error) { +func (r *schemaLoader) load(refURL *url.URL) (any, error) { debugLog("loading schema from url: %s", refURL) toFetch := *refURL toFetch.Fragment = "" - var err error pth := toFetch.String() normalized := normalizeBase(pth) debugLog("loading doc from: %s", normalized) data, fromCache := r.cache.Get(normalized) if fromCache { - return data, toFetch, fromCache, nil + return data, nil } b, err := r.context.loadDoc(normalized) if err != nil { - return nil, url.URL{}, false, err + return nil, err } var doc any if err := json.Unmarshal(b, &doc); err != nil { - return nil, url.URL{}, false, err + return nil, err } r.cache.Set(normalized, doc) - return doc, toFetch, fromCache, nil + return doc, nil } // isCircular detects cycles in sequences of $ref. @@ -293,8 +292,8 @@ func defaultSchemaLoader( root any, expandOptions *ExpandOptions, cache ResolutionCache, - context *resolverContext) *schemaLoader { - + context *resolverContext, +) *schemaLoader { if expandOptions == nil { expandOptions = &ExpandOptions{} } diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go index 46a4a7e2f9f..6d9019e749c 100644 --- a/vendor/github.com/go-openapi/spec/security_scheme.go +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -20,17 +20,17 @@ const ( accessCode = "accessCode" ) -// BasicAuth creates a basic auth security scheme +// BasicAuth creates a basic auth security scheme. func BasicAuth() *SecurityScheme { return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} } -// APIKeyAuth creates an api key auth security scheme +// APIKeyAuth creates an api key auth security scheme. func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} } -// OAuth2Implicit creates an implicit flow oauth2 security scheme +// OAuth2Implicit creates an implicit flow oauth2 security scheme. func OAuth2Implicit(authorizationURL string) *SecurityScheme { return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ Type: oauth2, @@ -39,7 +39,7 @@ func OAuth2Implicit(authorizationURL string) *SecurityScheme { }} } -// OAuth2Password creates a password flow oauth2 security scheme +// OAuth2Password creates a password flow oauth2 security scheme. func OAuth2Password(tokenURL string) *SecurityScheme { return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ Type: oauth2, @@ -48,7 +48,7 @@ func OAuth2Password(tokenURL string) *SecurityScheme { }} } -// OAuth2Application creates an application flow oauth2 security scheme +// OAuth2Application creates an application flow oauth2 security scheme. func OAuth2Application(tokenURL string) *SecurityScheme { return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ Type: oauth2, @@ -57,7 +57,7 @@ func OAuth2Application(tokenURL string) *SecurityScheme { }} } -// OAuth2AccessToken creates an access token flow oauth2 security scheme +// OAuth2AccessToken creates an access token flow oauth2 security scheme. func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ Type: oauth2, @@ -67,7 +67,7 @@ func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { }} } -// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section. type SecuritySchemeProps struct { Description string `json:"description,omitempty"` Type string `json:"type"` @@ -79,7 +79,7 @@ type SecuritySchemeProps struct { Scopes map[string]string `json:"scopes,omitempty"` // oauth2 } -// AddScope adds a scope to this security scheme +// AddScope adds a scope to this security scheme. func (s *SecuritySchemeProps) AddScope(scope, description string) { if s.Scopes == nil { s.Scopes = make(map[string]string) @@ -97,7 +97,7 @@ type SecurityScheme struct { SecuritySchemeProps } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (s SecurityScheme) JSONLookup(token string) (any, error) { if ex, ok := s.Extensions[token]; ok { return &ex, nil @@ -107,7 +107,7 @@ func (s SecurityScheme) JSONLookup(token string) (any, error) { return r, err } -// MarshalJSON marshal this to JSON +// MarshalJSON marshal this to JSON. func (s SecurityScheme) MarshalJSON() ([]byte, error) { var ( b1 []byte @@ -150,7 +150,7 @@ func (s SecurityScheme) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b1, b2), nil } -// UnmarshalJSON marshal this from JSON +// UnmarshalJSON marshal this from JSON. func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { return err diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go index 0d0aaabc487..4eba04b2d1a 100644 --- a/vendor/github.com/go-openapi/spec/spec.go +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -13,13 +13,13 @@ import ( //go:generate perl -pi -e s,Json,JSON,g bindata.go const ( - // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs. SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema + // JSONSchemaURL the url for the json schema. JSONSchemaURL = "http://json-schema.org/draft-04/schema#" ) -// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error. func MustLoadJSONSchemaDraft04() *Schema { d, e := JSONSchemaDraft04() if e != nil { @@ -28,7 +28,7 @@ func MustLoadJSONSchemaDraft04() *Schema { return d } -// JSONSchemaDraft04 loads the json schema document for json schema draft04 +// JSONSchemaDraft04 loads the json schema document for json schema draft04. func JSONSchemaDraft04() (*Schema, error) { b, err := jsonschemaDraft04JSONBytes() if err != nil { @@ -42,7 +42,7 @@ func JSONSchemaDraft04() (*Schema, error) { return schema, nil } -// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error. func MustLoadSwagger20Schema() *Schema { d, e := Swagger20Schema() if e != nil { @@ -51,9 +51,8 @@ func MustLoadSwagger20Schema() *Schema { return d } -// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets. func Swagger20Schema() (*Schema, error) { - b, err := v2SchemaJSONBytes() if err != nil { return nil, err diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index f7cd0f608c2..dbe32db8a33 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -25,7 +25,7 @@ type Swagger struct { SwaggerProps } -// JSONLookup look up a value by the json property name +// JSONLookup look up a value by the json property name. func (s Swagger) JSONLookup(token string) (any, error) { if ex, ok := s.Extensions[token]; ok { return &ex, nil @@ -34,7 +34,7 @@ func (s Swagger) JSONLookup(token string) (any, error) { return r, err } -// MarshalJSON marshals this swagger structure to json +// MarshalJSON marshals this swagger structure to json. func (s Swagger) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(s.SwaggerProps) if err != nil { @@ -47,7 +47,7 @@ func (s Swagger) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b1, b2), nil } -// UnmarshalJSON unmarshals a swagger spec from json +// UnmarshalJSON unmarshals a swagger spec from json. func (s *Swagger) UnmarshalJSON(data []byte) error { var sw Swagger if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { @@ -60,7 +60,7 @@ func (s *Swagger) UnmarshalJSON(data []byte) error { return nil } -// GobEncode provides a safe gob encoder for Swagger, including extensions +// GobEncode provides a safe gob encoder for Swagger, including extensions. func (s Swagger) GobEncode() ([]byte, error) { var b bytes.Buffer raw := struct { @@ -74,7 +74,7 @@ func (s Swagger) GobEncode() ([]byte, error) { return b.Bytes(), err } -// GobDecode provides a safe gob decoder for Swagger, including extensions +// GobDecode provides a safe gob decoder for Swagger, including extensions. func (s *Swagger) GobDecode(b []byte) error { var raw struct { Props SwaggerProps @@ -95,7 +95,7 @@ func (s *Swagger) GobDecode(b []byte) error { // NOTE: validation rules // - the scheme, when present must be from [http, https, ws, wss] // - BasePath must start with a leading "/" -// - Paths is required +// - Paths is required. type SwaggerProps struct { ID string `json:"id,omitempty"` Consumes []string `json:"consumes,omitempty"` @@ -126,7 +126,7 @@ type gobSwaggerPropsAlias struct { SecurityIsEmpty bool } -// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements. func (o SwaggerProps) GobEncode() ([]byte, error) { raw := gobSwaggerPropsAlias{ Alias: (*swaggerPropsAlias)(&o), @@ -171,7 +171,7 @@ func (o SwaggerProps) GobEncode() ([]byte, error) { return b.Bytes(), err } -// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements. func (o *SwaggerProps) GobDecode(b []byte) error { var raw gobSwaggerPropsAlias @@ -207,16 +207,16 @@ func (o *SwaggerProps) GobDecode(b []byte) error { return nil } -// Dependencies represent a dependencies property +// Dependencies represent a dependencies property. type Dependencies map[string]SchemaOrStringArray -// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property. type SchemaOrBool struct { Allows bool Schema *Schema } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (s SchemaOrBool) JSONLookup(token string) (any, error) { if token == "allows" { return s.Allows, nil @@ -225,10 +225,12 @@ func (s SchemaOrBool) JSONLookup(token string) (any, error) { return r, err } -var jsTrue = []byte("true") -var jsFalse = []byte("false") +var ( + jsTrue = []byte("true") //nolint:gochecknoglobals // constant-like byte slices for JSON marshaling + jsFalse = []byte("false") //nolint:gochecknoglobals // constant-like byte slices for JSON marshaling +) -// MarshalJSON convert this object to JSON +// MarshalJSON convert this object to JSON. func (s SchemaOrBool) MarshalJSON() ([]byte, error) { if s.Schema != nil { return json.Marshal(s.Schema) @@ -240,7 +242,7 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) { return jsTrue, nil } -// UnmarshalJSON converts this bool or schema object from a JSON structure +// UnmarshalJSON converts this bool or schema object from a JSON structure. func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { var nw SchemaOrBool if len(data) > 0 { @@ -257,19 +259,19 @@ func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { return nil } -// SchemaOrStringArray represents a schema or a string array +// SchemaOrStringArray represents a schema or a string array. type SchemaOrStringArray struct { Schema *Schema Property []string } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (s SchemaOrStringArray) JSONLookup(token string) (any, error) { r, _, err := jsonpointer.GetForToken(s.Schema, token) return r, err } -// MarshalJSON converts this schema object or array into JSON structure +// MarshalJSON converts this schema object or array into JSON structure. func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { if len(s.Property) > 0 { return json.Marshal(s.Property) @@ -280,7 +282,7 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { return []byte("null"), nil } -// UnmarshalJSON converts this schema object or array from a JSON structure +// UnmarshalJSON converts this schema object or array from a JSON structure. func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { var first byte if len(data) > 1 { @@ -318,15 +320,15 @@ type Definitions map[string]Schema type SecurityDefinitions map[string]*SecurityScheme // StringOrArray represents a value that can either be a string -// or an array of strings. Mainly here for serialization purposes +// or an array of strings. Mainly here for serialization purposes. type StringOrArray []string -// Contains returns true when the value is contained in the slice +// Contains returns true when the value is contained in the slice. func (s StringOrArray) Contains(value string) bool { return slices.Contains(s, value) } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (s SchemaOrArray) JSONLookup(token string) (any, error) { if _, err := strconv.Atoi(token); err == nil { r, _, err := jsonpointer.GetForToken(s.Schemas, token) @@ -336,7 +338,7 @@ func (s SchemaOrArray) JSONLookup(token string) (any, error) { return r, err } -// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string. func (s *StringOrArray) UnmarshalJSON(data []byte) error { var first byte if len(data) > 1 { @@ -368,7 +370,7 @@ func (s *StringOrArray) UnmarshalJSON(data []byte) error { } } -// MarshalJSON converts this string or array to a JSON array or JSON string +// MarshalJSON converts this string or array to a JSON array or JSON string. func (s StringOrArray) MarshalJSON() ([]byte, error) { if len(s) == 1 { return json.Marshal([]string(s)[0]) @@ -377,13 +379,13 @@ func (s StringOrArray) MarshalJSON() ([]byte, error) { } // SchemaOrArray represents a value that can either be a Schema -// or an array of Schema. Mainly here for serialization purposes +// or an array of Schema. Mainly here for serialization purposes. type SchemaOrArray struct { Schema *Schema Schemas []Schema } -// Len returns the number of schemas in this property +// Len returns the number of schemas in this property. func (s SchemaOrArray) Len() int { if s.Schema != nil { return 1 @@ -391,7 +393,7 @@ func (s SchemaOrArray) Len() int { return len(s.Schemas) } -// ContainsType returns true when one of the schemas is of the specified type +// ContainsType returns true when one of the schemas is of the specified type. func (s *SchemaOrArray) ContainsType(name string) bool { if s.Schema != nil { return s.Schema.Type != nil && s.Schema.Type.Contains(name) @@ -399,7 +401,7 @@ func (s *SchemaOrArray) ContainsType(name string) bool { return false } -// MarshalJSON converts this schema object or array into JSON structure +// MarshalJSON converts this schema object or array into JSON structure. func (s SchemaOrArray) MarshalJSON() ([]byte, error) { if len(s.Schemas) > 0 { return json.Marshal(s.Schemas) @@ -407,7 +409,7 @@ func (s SchemaOrArray) MarshalJSON() ([]byte, error) { return json.Marshal(s.Schema) } -// UnmarshalJSON converts this schema object or array from a JSON structure +// UnmarshalJSON converts this schema object or array from a JSON structure. func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { var nw SchemaOrArray var first byte diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go index ae98fd985fb..af3fb0a4e89 100644 --- a/vendor/github.com/go-openapi/spec/tag.go +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -10,7 +10,7 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// TagProps describe a tag entry in the top level tags section of a swagger spec +// TagProps describe a tag entry in the top level tags section of a swagger spec. type TagProps struct { Description string `json:"description,omitempty"` Name string `json:"name,omitempty"` @@ -27,12 +27,12 @@ type Tag struct { TagProps } -// NewTag creates a new tag +// NewTag creates a new tag. func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} } -// JSONLookup implements an interface to customize json pointer lookup +// JSONLookup implements an interface to customize json pointer lookup. func (t Tag) JSONLookup(token string) (any, error) { if ex, ok := t.Extensions[token]; ok { return &ex, nil @@ -42,7 +42,7 @@ func (t Tag) JSONLookup(token string) (any, error) { return r, err } -// MarshalJSON marshal this to JSON +// MarshalJSON marshal this to JSON. func (t Tag) MarshalJSON() ([]byte, error) { b1, err := json.Marshal(t.TagProps) if err != nil { @@ -55,7 +55,7 @@ func (t Tag) MarshalJSON() ([]byte, error) { return jsonutils.ConcatJSON(b1, b2), nil } -// UnmarshalJSON marshal this from JSON +// UnmarshalJSON marshal this from JSON. func (t *Tag) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &t.TagProps); err != nil { return err diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go index 2c0dc424793..a82c2ffe138 100644 --- a/vendor/github.com/go-openapi/spec/validations.go +++ b/vendor/github.com/go-openapi/spec/validations.go @@ -3,7 +3,7 @@ package spec -// CommonValidations describe common JSON-schema validations +// CommonValidations describe common JSON-schema validations. type CommonValidations struct { Maximum *float64 `json:"maximum,omitempty"` ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` @@ -143,22 +143,22 @@ func (v CommonValidations) Validations() SchemaValidations { } } -// HasNumberValidations indicates if the validations are for numbers or integers +// HasNumberValidations indicates if the validations are for numbers or integers. func (v CommonValidations) HasNumberValidations() bool { return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil } -// HasStringValidations indicates if the validations are for strings +// HasStringValidations indicates if the validations are for strings. func (v CommonValidations) HasStringValidations() bool { return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" } -// HasArrayValidations indicates if the validations are for arrays +// HasArrayValidations indicates if the validations are for arrays. func (v CommonValidations) HasArrayValidations() bool { return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems } -// HasEnum indicates if the validation includes some enum constraint +// HasEnum indicates if the validation includes some enum constraint. func (v CommonValidations) HasEnum() bool { return len(v.Enum) > 0 } @@ -175,12 +175,12 @@ type SchemaValidations struct { MinProperties *int64 `json:"minProperties,omitempty"` } -// HasObjectValidations indicates if the validations are for objects +// HasObjectValidations indicates if the validations are for objects. func (v SchemaValidations) HasObjectValidations() bool { return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil } -// SetValidations for schema validations +// SetValidations for schema validations. func (v *SchemaValidations) SetValidations(val SchemaValidations) { v.CommonValidations.SetValidations(val) v.PatternProperties = val.PatternProperties @@ -188,7 +188,7 @@ func (v *SchemaValidations) SetValidations(val SchemaValidations) { v.MinProperties = val.MinProperties } -// Validations for a schema +// Validations for a schema. func (v SchemaValidations) Validations() SchemaValidations { val := v.CommonValidations.Validations() val.PatternProperties = v.PatternProperties diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go index bf2f8f18b24..07f7ef8ccd7 100644 --- a/vendor/github.com/go-openapi/spec/xml_object.go +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -14,43 +14,43 @@ type XMLObject struct { Wrapped bool `json:"wrapped,omitempty"` } -// WithName sets the xml name for the object +// WithName sets the xml name for the object. func (x *XMLObject) WithName(name string) *XMLObject { x.Name = name return x } -// WithNamespace sets the xml namespace for the object +// WithNamespace sets the xml namespace for the object. func (x *XMLObject) WithNamespace(namespace string) *XMLObject { x.Namespace = namespace return x } -// WithPrefix sets the xml prefix for the object +// WithPrefix sets the xml prefix for the object. func (x *XMLObject) WithPrefix(prefix string) *XMLObject { x.Prefix = prefix return x } -// AsAttribute flags this object as xml attribute +// AsAttribute flags this object as xml attribute. func (x *XMLObject) AsAttribute() *XMLObject { x.Attribute = true return x } -// AsElement flags this object as an xml node +// AsElement flags this object as an xml node. func (x *XMLObject) AsElement() *XMLObject { x.Attribute = false return x } -// AsWrapped flags this object as wrapped, this is mostly useful for array types +// AsWrapped flags this object as wrapped, this is mostly useful for array types. func (x *XMLObject) AsWrapped() *XMLObject { x.Wrapped = true return x } -// AsUnwrapped flags this object as an xml node +// AsUnwrapped flags this object as an xml node. func (x *XMLObject) AsUnwrapped() *XMLObject { x.Wrapped = false return x diff --git a/vendor/github.com/go-openapi/strfmt/.codecov.yml b/vendor/github.com/go-openapi/strfmt/.codecov.yml new file mode 100644 index 00000000000..a5ba8e96d8e --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.codecov.yml @@ -0,0 +1,9 @@ +codecov: + notify: + after_n_builds: 2 + +coverage: + status: + patch: + default: + target: 80% diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore index dd91ed6a04e..885dc27ab0b 100644 --- a/vendor/github.com/go-openapi/strfmt/.gitignore +++ b/vendor/github.com/go-openapi/strfmt/.gitignore @@ -1,2 +1,6 @@ -secrets.yml -coverage.out +*.out +*.cov +.idea +.env +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml index 1ad5adf47e6..3c4cd489a1d 100644 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -2,25 +2,11 @@ version: "2" linters: default: all disable: - - cyclop - depguard - - errchkjson - - errorlint - - exhaustruct - - forcetypeassert - funlen - - gochecknoglobals - - gochecknoinits - - gocognit - - godot + - gomoddirectives - godox - - gosmopolitan - - inamedparam - - intrange - - ireturn - - lll - - musttag - - nestif + - exhaustruct - nlreturn - nonamedreturns - noinlineerr @@ -29,7 +15,6 @@ linters: - testpackage - thelper - tparallel - - unparam - varnamelen - whitespace - wrapcheck @@ -41,8 +26,15 @@ linters: goconst: min-len: 2 min-occurrences: 3 + cyclop: + max-complexity: 20 gocyclo: - min-complexity: 45 + min-complexity: 20 + exhaustive: + default-signifies-exhaustive: true + default-case-required: true + lll: + line-length: 180 exclusions: generated: lax presets: @@ -58,6 +50,7 @@ formatters: enable: - gofmt - goimports + - gofumpt exclusions: generated: lax paths: diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md index 9322b065e37..bac878f216a 100644 --- a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/CONTRIBUTORS.md b/vendor/github.com/go-openapi/strfmt/CONTRIBUTORS.md new file mode 100644 index 00000000000..e49700d4d25 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/CONTRIBUTORS.md @@ -0,0 +1,52 @@ +# Contributors + +- Repository: ['go-openapi/strfmt'] + +| Total Contributors | Total Contributions | +| --- | --- | +| 40 | 225 | + +| Username | All Time Contribution Count | All Commits | +| --- | --- | --- | +| @casualjim | 88 | | +| @fredbi | 57 | | +| @youyuanwu | 13 | | +| @jlambatl | 9 | | +| @GlenDC | 5 | | +| @padamstx | 4 | | +| @dimovnike | 3 | | +| @carlv-stripe | 3 | | +| @Copilot | 3 | | +| @keramix | 3 | | +| @gregmarr | 2 | | +| @vadorovsky | 2 | | +| @Ompluscator | 2 | | +| @johnnyg | 2 | | +| @chakrit | 2 | | +| @bg451 | 2 | | +| @aleksandr-vin | 2 | | +| @ujjwalsh | 1 | | +| @kenjones-cisco | 1 | | +| @jwalter1-quest | 1 | | +| @ccoVeille | 1 | | +| @tylerb | 1 | | +| @tzneal | 1 | | +| @tklauser | 1 | | +| @SuperQ | 1 | | +| @srizzling | 1 | | +| @shawnps | 1 | | +| @prashantv | 1 | | +| @krnkl | 1 | | +| @mstoykov | 1 | | +| @maxatome | 1 | | +| @jerome-laforge | 1 | | +| @justincormack | 1 | | +| @elipavlov | 1 | | +| @gbjk | 1 | | +| @enesanbar | 1 | | +| @CodeLingoBot | 1 | | +| @Kunde21 | 1 | | +| @bvwells | 1 | | +| @ligustah | 1 | | + + _this file was generated by the [Contributors GitHub Action](https://github.com/github-community-projects/contributors)_ diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md index de5afe13760..a0cf6427541 100644 --- a/vendor/github.com/go-openapi/strfmt/README.md +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -1,15 +1,61 @@ -# Strfmt [![Build Status](https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) +# strfmt + + +[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url] + + + +[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] + + +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] + +--- + +Golang support for string formats defined by JSON Schema and OpenAPI. + +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + +* **2026-03-07** : v0.26.0 **dropped dependency to the mongodb driver** + * mongodb users can still use this package without any change + * however, we have frozen the back-compatible support for mongodb driver at v2.5.0 + * users who want to keep-up with future evolutions (possibly incompatible) of this driver + can do so by adding a blank import in their program: `import _ "github.com/go-openapi/strfmt/enable/mongodb"`. + This will switch the behavior to the actual driver, which remains regularly updated as an independent module. + +## Status + +API is stable. + +## Import this library in your project + +```cmd +go get github.com/go-openapi/strfmt +``` + +## Contents This package exposes a registry of data types to support string formats in the go-openapi toolkit. -strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. +`strfmt` represents a well known string format such as hostname or email. + +This package provides a few extra formats such as credit card (US), color, etc. + +Format types can serialize and deserialize JSON or from a SQL database. + +BSON is also supported (MongoDB). + +### Supported formats -## Supported data formats -go-openapi/strfmt follows the swagger 2.0 specification with the following formats +`go-openapi/strfmt` follows the swagger 2.0 specification with the following formats defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). It also provides convenient extensions to go-openapi users. @@ -43,7 +89,7 @@ It also provides convenient extensions to go-openapi users. > It does not provide validation for numerical values with swagger format extension for JSON types "number" or > "integer" (e.g. float, double, int32...). -## Type conversion +### Type conversion All types defined here are stringers and may be converted to strings with `.String()`. Note that most types defined by this package may be converted directly to string like `string(Email{})`. @@ -51,13 +97,14 @@ Note that most types defined by this package may be converted directly to string `Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`. Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})` -## Using pointers +### Using pointers The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does with primitive types. -## Format types -Types defined in strfmt expose marshaling and validation capabilities. +### Format types + +Types defined in `strfmt` expose marshaling and validation capabilities. List of defined types: - Base64 @@ -87,6 +134,97 @@ List of defined types: - [UUID7](https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-version-7) - [ULID](https://github.com/ulid/spec) +### Database support + +All format types implement the `database/sql` interfaces `sql.Scanner` and `driver.Valuer`, +so they work out of the box with Go's standard `database/sql` package and any SQL driver. + +All format types also implement BSON marshaling/unmarshaling for use with MongoDB. +By default, a built-in minimal codec is used (compatible with mongo-driver v2.5.0). +For full driver support, add `import _ "github.com/go-openapi/strfmt/enable/mongodb"`. + +> **MySQL / MariaDB caveat for `DateTime`:** +> The `go-sql-driver/mysql` driver has hard-coded handling for `time.Time` but does not +> intercept type redefinitions like `strfmt.DateTime`. As a result, `DateTime.Value()` sends +> an RFC 3339 string (e.g. `"2024-06-15T12:30:45.123Z"`) that MySQL/MariaDB rejects for +> `DATETIME` columns. +> +> Workaround: set `strfmt.MarshalFormat` to a MySQL-compatible format such as +> `strfmt.ISO8601LocalTime` and normalize to UTC before marshaling: +> +> ```go +> strfmt.MarshalFormat = strfmt.ISO8601LocalTime +> strfmt.NormalizeTimeForMarshal = func(t time.Time) time.Time { return t.UTC() } +> ``` +> +> See [#174](https://github.com/go-openapi/strfmt/issues/174) for details. + +Integration tests for MongoDB, MariaDB, and PostgreSQL run in CI to verify database roundtrip +compatibility for all format types. See [`internal/testintegration/`](internal/testintegration/). + +## Change log + +See + +## References + + + ## Licensing This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +## Other documentation + +* [All-time contributors](./CONTRIBUTORS.md) +* [Contributing guidelines](.github/CONTRIBUTING.md) +* [Maintainers documentation](docs/MAINTAINERS.md) +* [Code style](docs/STYLE.md) + +## Cutting a new release + +Maintainers can cut a new release by either: + +* running [this workflow](https://github.com/go-openapi/strfmt/actions/workflows/bump-release.yml) +* or pushing a semver tag + * signed tags are preferred + * The tag message is prepended to release notes + + +[test-badge]: https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg +[test-url]: https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml +[cov-badge]: https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/go-openapi/strfmt +[vuln-scan-badge]: https://github.com/go-openapi/strfmt/actions/workflows/scanner.yml/badge.svg +[vuln-scan-url]: https://github.com/go-openapi/strfmt/actions/workflows/scanner.yml +[codeql-badge]: https://github.com/go-openapi/strfmt/actions/workflows/codeql.yml/badge.svg +[codeql-url]: https://github.com/go-openapi/strfmt/actions/workflows/codeql.yml + +[release-badge]: https://badge.fury.io/gh/go-openapi%2Fstrfmt.svg +[release-url]: https://badge.fury.io/gh/go-openapi%2Fstrfmt +[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fstrfmt.svg +[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fstrfmt + +[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/strfmt +[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/strfmt +[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/strfmt +[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/strfmt + +[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F +[doc-url]: https://goswagger.io/go-openapi +[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/strfmt +[godoc-url]: http://pkg.go.dev/github.com/go-openapi/strfmt +[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png +[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM +[slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/FfnFYaC3k5 + + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: https://github.com/go-openapi/strfmt/?tab=Apache-2.0-1-ov-file#readme + +[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/strfmt +[goversion-url]: https://github.com/go-openapi/strfmt/blob/master/go.mod +[top-badge]: https://img.shields.io/github/languages/top/go-openapi/strfmt +[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/strfmt/latest diff --git a/vendor/github.com/go-openapi/strfmt/SECURITY.md b/vendor/github.com/go-openapi/strfmt/SECURITY.md new file mode 100644 index 00000000000..6ceb159ca22 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/SECURITY.md @@ -0,0 +1,37 @@ +# Security Policy + +This policy outlines the commitment and practices of the go-openapi maintainers regarding security. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. + +## Reporting a vulnerability + +If you become aware of a security vulnerability that affects the current repository, +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. + +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". + +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go index 0eec8f6432c..16a83f64086 100644 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -5,62 +5,62 @@ package strfmt import ( "database/sql/driver" + "encoding/hex" + "encoding/json" "fmt" - - bsonprim "go.mongodb.org/mongo-driver/bson/primitive" ) -func init() { +func init() { //nolint:gochecknoinits // registers bsonobjectid format in the default registry var id ObjectId - // register this format in the default registry Default.Add("bsonobjectid", &id, IsBSONObjectID) } -// IsBSONObjectID returns true when the string is a valid BSON.ObjectId +// IsBSONObjectID returns true when the string is a valid BSON [ObjectId]. func IsBSONObjectID(str string) bool { - _, err := bsonprim.ObjectIDFromHex(str) + _, err := objectIDFromHex(str) return err == nil } -// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) +// ObjectId represents a BSON object ID (a 12-byte unique identifier). // -// swagger:strfmt bsonobjectid -type ObjectId bsonprim.ObjectID //nolint:revive +// swagger:strfmt bsonobjectid. +type ObjectId [12]byte //nolint:revive + +// nilObjectID is the zero-value ObjectId. +var nilObjectID ObjectId //nolint:gochecknoglobals // package-level sentinel -// NewObjectId creates a ObjectId from a Hex String +// NewObjectId creates a [ObjectId] from a hexadecimal String. func NewObjectId(hex string) ObjectId { //nolint:revive - oid, err := bsonprim.ObjectIDFromHex(hex) + oid, err := objectIDFromHex(hex) if err != nil { panic(err) } - return ObjectId(oid) + return oid } -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (id ObjectId) MarshalText() ([]byte, error) { - oid := bsonprim.ObjectID(id) - if oid == bsonprim.NilObjectID { + if id == nilObjectID { return nil, nil } - return []byte(oid.Hex()), nil + return []byte(id.Hex()), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on if len(data) == 0 { - *id = ObjectId(bsonprim.NilObjectID) + *id = nilObjectID return nil } - oidstr := string(data) - oid, err := bsonprim.ObjectIDFromHex(oidstr) + oid, err := objectIDFromHex(string(data)) if err != nil { return err } - *id = ObjectId(oid) + *id = oid return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (id *ObjectId) Scan(raw any) error { var data []byte switch v := raw.(type) { @@ -75,27 +75,36 @@ func (id *ObjectId) Scan(raw any) error { return id.UnmarshalText(data) } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (id ObjectId) Value() (driver.Value, error) { - return driver.Value(bsonprim.ObjectID(id).Hex()), nil + return driver.Value(id.Hex()), nil +} + +// Hex returns the hex string representation of the [ObjectId]. +func (id ObjectId) Hex() string { + return hex.EncodeToString(id[:]) } func (id ObjectId) String() string { - return bsonprim.ObjectID(id).Hex() + return id.Hex() } -// MarshalJSON returns the ObjectId as JSON +// MarshalJSON returns the [ObjectId] as JSON. func (id ObjectId) MarshalJSON() ([]byte, error) { - return bsonprim.ObjectID(id).MarshalJSON() + return json.Marshal(id.Hex()) } -// UnmarshalJSON sets the ObjectId from JSON +// UnmarshalJSON sets the [ObjectId] from JSON. func (id *ObjectId) UnmarshalJSON(data []byte) error { - var obj bsonprim.ObjectID - if err := obj.UnmarshalJSON(data); err != nil { + var hexStr string + if err := json.Unmarshal(data, &hexStr); err != nil { + return err + } + oid, err := objectIDFromHex(hexStr) + if err != nil { return err } - *id = ObjectId(obj) + *id = oid return nil } @@ -104,7 +113,7 @@ func (id *ObjectId) DeepCopyInto(out *ObjectId) { *out = *id } -// DeepCopy copies the receiver into a new ObjectId. +// DeepCopy copies the receiver into a new [ObjectId]. func (id *ObjectId) DeepCopy() *ObjectId { if id == nil { return nil @@ -113,3 +122,18 @@ func (id *ObjectId) DeepCopy() *ObjectId { id.DeepCopyInto(out) return out } + +// objectIDFromHex parses a 24-character hex string into an [ObjectId]. +func objectIDFromHex(s string) (ObjectId, error) { + const objectIDHexLen = 24 + if len(s) != objectIDHexLen { + return nilObjectID, fmt.Errorf("the provided hex string %q is not a valid ObjectID: %w", s, ErrFormat) + } + b, err := hex.DecodeString(s) + if err != nil { + return nilObjectID, fmt.Errorf("the provided hex string %q is not a valid ObjectID: %w", s, err) + } + var oid ObjectId + copy(oid[:], b) + return oid, nil +} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go index 8aa17b8ea55..59ee1f11216 100644 --- a/vendor/github.com/go-openapi/strfmt/date.go +++ b/vendor/github.com/go-openapi/strfmt/date.go @@ -10,35 +10,34 @@ import ( "time" ) -func init() { +func init() { //nolint:gochecknoinits // registers date format in the default registry d := Date{} - // register this format in the default registry Default.Add("date", &d, IsDate) } -// IsDate returns true when the string is a valid date +// IsDate returns true when the string is a valid date. func IsDate(str string) bool { _, err := time.Parse(RFC3339FullDate, str) return err == nil } const ( - // RFC3339FullDate represents a full-date as specified by RFC3339 + // RFC3339FullDate represents a full-date as specified by RFC3339. // See: http://goo.gl/xXOvVd RFC3339FullDate = "2006-01-02" ) -// Date represents a date from the API +// Date represents a date from the API. // -// swagger:strfmt date +// swagger:strfmt date. type Date time.Time -// String converts this date into a string +// String converts this date into a string. func (d Date) String() string { return time.Time(d).Format(RFC3339FullDate) } -// UnmarshalText parses a text representation into a date type +// UnmarshalText parses a text representation into a date type. func (d *Date) UnmarshalText(text []byte) error { if len(text) == 0 { return nil @@ -51,7 +50,7 @@ func (d *Date) UnmarshalText(text []byte) error { return nil } -// MarshalText serializes this date type to string +// MarshalText serializes this date type to string. func (d Date) MarshalText() ([]byte, error) { return []byte(d.String()), nil } @@ -79,12 +78,12 @@ func (d Date) Value() (driver.Value, error) { return driver.Value(d.String()), nil } -// MarshalJSON returns the Date as JSON +// MarshalJSON returns the Date as JSON. func (d Date) MarshalJSON() ([]byte, error) { return json.Marshal(time.Time(d).Format(RFC3339FullDate)) } -// UnmarshalJSON sets the Date from JSON +// UnmarshalJSON sets the Date from JSON. func (d *Date) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -126,12 +125,12 @@ func (d *Date) GobDecode(data []byte) error { return d.UnmarshalBinary(data) } -// MarshalBinary implements the encoding.BinaryMarshaler interface. +// MarshalBinary implements the encoding.[encoding.BinaryMarshaler] interface. func (d Date) MarshalBinary() ([]byte, error) { return time.Time(d).MarshalBinary() } -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// UnmarshalBinary implements the encoding.[encoding.BinaryUnmarshaler] interface. func (d *Date) UnmarshalBinary(data []byte) error { var original time.Time @@ -145,7 +144,7 @@ func (d *Date) UnmarshalBinary(data []byte) error { return nil } -// Equal checks if two Date instances are equal +// Equal checks if two Date instances are equal. func (d Date) Equal(d2 Date) bool { return time.Time(d).Equal(time.Time(d2)) } diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go index 8a80cfbdb8a..87d3856ad23 100644 --- a/vendor/github.com/go-openapi/strfmt/default.go +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -21,42 +21,48 @@ import ( ) const ( - // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114. + // HostnamePattern http://[json]-schema.org/latest/[json]-schema-validation.html#anchor114. // // Deprecated: this package no longer uses regular expressions to validate hostnames. - HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z0-9-\p{L}]){2,63})$` + HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)` + + `|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z0-9-\p{L}]){2,63})$` - // json null type + // json null type. jsonNull = "null" ) const ( - // UUIDPattern Regex for UUID that allows uppercase + // UUIDPattern Regex for [UUID] that allows uppercase // - // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + // Deprecated: [strfmt] no longer uses regular expressions to validate UUIDs. UUIDPattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{32}$)` - // UUID3Pattern Regex for UUID3 that allows uppercase + // UUID3Pattern Regex for [UUID3] that allows uppercase // - // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + // Deprecated: [strfmt] no longer uses regular expressions to validate UUIDs. UUID3Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{12}3[0-9a-f]{3}?[0-9a-f]{16}$)` - // UUID4Pattern Regex for UUID4 that allows uppercase + // UUID4Pattern Regex for [UUID4] that allows uppercase // - // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + // Deprecated: [strfmt] no longer uses regular expressions to validate UUIDs. UUID4Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}$)` - // UUID5Pattern Regex for UUID5 that allows uppercase + // UUID5Pattern Regex for [UUID]5 that allows uppercase // - // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + // Deprecated: [strfmt] no longer uses regular expressions to validate UUIDs. UUID5Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}5[0-9a-f]{3}[89ab][0-9a-f]{15}$)` - isbn10Pattern string = "^(?:[0-9]{9}X|[0-9]{10})$" - isbn13Pattern string = "^(?:[0-9]{13})$" - usCardPattern string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" + isbn10Pattern string = "^(?:[0-9]{9}X|[0-9]{10})$" + isbn13Pattern string = "^(?:[0-9]{13})$" + usCardPattern string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}" + + "|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}" + + "|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}" + + "|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" ssnPattern string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` hexColorPattern string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" - rgbColorPattern string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + rgbColorPattern string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*," + + "\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*," + + "\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" ) const ( @@ -65,6 +71,7 @@ const ( decimalBase = 10 ) +//nolint:gochecknoglobals // package-level compiled patterns and validators var ( idnaHostChecker = idna.New( idna.ValidateForRegistration(), // shorthand for [idna.StrictDomainName], [idna.ValidateLabels], [idna.VerifyDNSLength], [idna.BidiRule] @@ -87,11 +94,12 @@ var ( // It supports IDNA rules regarding internationalized names with unicode. // // Besides: -// * the empty string is not a valid host name -// * a trailing dot is allowed in names and IPv4's (not IPv6) -// * a host name can be a valid IPv4 (with decimal, octal or hexadecimal numbers) or IPv6 address -// * IPv6 zones are disallowed -// * top-level domains can be unicode (cf. https://www.iana.org/domains/root/db). +// +// - the empty string is not a valid host name +// - a trailing dot is allowed in names and [IPv4]'s (not [IPv6]) +// - a host name can be a valid [IPv4] (with decimal, octal or hexadecimal numbers) or [IPv6] address +// - [IPv6] zones are disallowed +// - top-level domains can be unicode (cf. https://www.iana.org/domains/root/db). // // NOTE: this validator doesn't check top-level domains against the IANA root database. // It merely ensures that a top-level domain in a FQDN is at least 2 code points long. @@ -325,7 +333,7 @@ func isASCIIDigit(c byte) bool { return c >= '0' && c <= '9' } -// IsUUID returns true is the string matches a UUID (in any version, including v6 and v7), upper case is allowed +// IsUUID returns true if the string matches a [UUID] (in any version, including v6 and v7), upper case is allowed. func IsUUID(str string) bool { _, err := uuid.Parse(str) return err == nil @@ -338,25 +346,25 @@ const ( uuidV7 = 7 ) -// IsUUID3 returns true is the string matches a UUID v3, upper case is allowed +// IsUUID3 returns true if the string matches a [UUID] v3, upper case is allowed. func IsUUID3(str string) bool { id, err := uuid.Parse(str) return err == nil && id.Version() == uuid.Version(uuidV3) } -// IsUUID4 returns true is the string matches a UUID v4, upper case is allowed +// IsUUID4 returns true is the string matches a [UUID] v4, upper case is allowed. func IsUUID4(str string) bool { id, err := uuid.Parse(str) return err == nil && id.Version() == uuid.Version(uuidV4) } -// IsUUID5 returns true is the string matches a UUID v5, upper case is allowed +// IsUUID5 returns true if the string matches a [UUID] v5, upper case is allowed. func IsUUID5(str string) bool { id, err := uuid.Parse(str) return err == nil && id.Version() == uuid.Version(uuidV5) } -// IsUUID7 returns true is the string matches a UUID v7, upper case is allowed +// IsUUID7 returns true if the string matches a [UUID] v7, upper case is allowed. func IsUUID7(str string) bool { id, err := uuid.Parse(str) return err == nil && id.Version() == uuid.Version(uuidV7) @@ -368,7 +376,7 @@ func IsEmail(str string) bool { return e == nil && addr.Address != "" } -func init() { +func init() { //nolint:gochecknoinits // registers all default string formats in the registry // register formats in the default registry: // - byte // - creditcard @@ -455,12 +463,12 @@ func init() { Default.Add("password", &pw, func(_ string) bool { return true }) } -// Base64 represents a base64 encoded string, using URLEncoding alphabet +// Base64 represents a base64 encoded string, using URLEncoding alphabet. // -// swagger:strfmt byte +// swagger:strfmt byte. type Base64 []byte -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (b Base64) MarshalText() ([]byte, error) { enc := base64.URLEncoding src := []byte(b) @@ -469,7 +477,7 @@ func (b Base64) MarshalText() ([]byte, error) { return buf, nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (b *Base64) UnmarshalText(data []byte) error { // validation is performed later on enc := base64.URLEncoding dbuf := make([]byte, enc.DecodedLen(len(data))) @@ -483,7 +491,7 @@ func (b *Base64) UnmarshalText(data []byte) error { // validation is performed l return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (b *Base64) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -506,7 +514,7 @@ func (b *Base64) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (b Base64) Value() (driver.Value, error) { return driver.Value(b.String()), nil } @@ -515,12 +523,12 @@ func (b Base64) String() string { return base64.StdEncoding.EncodeToString([]byte(b)) } -// MarshalJSON returns the Base64 as JSON +// MarshalJSON returns the Base64 as JSON. func (b Base64) MarshalJSON() ([]byte, error) { return json.Marshal(b.String()) } -// UnmarshalJSON sets the Base64 from JSON +// UnmarshalJSON sets the Base64 from JSON. func (b *Base64) UnmarshalJSON(data []byte) error { var b64str string if err := json.Unmarshal(data, &b64str); err != nil { @@ -549,23 +557,23 @@ func (b *Base64) DeepCopy() *Base64 { return out } -// URI represents the uri string format as specified by the json schema spec +// URI represents the uri string format as specified by the [json] schema spec. // -// swagger:strfmt uri +// swagger:strfmt uri. type URI string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u URI) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *URI) UnmarshalText(data []byte) error { // validation is performed later on *u = URI(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *URI) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -579,7 +587,7 @@ func (u *URI) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u URI) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -588,12 +596,12 @@ func (u URI) String() string { return string(u) } -// MarshalJSON returns the URI as JSON +// MarshalJSON returns the [URI] as JSON. func (u URI) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the URI from JSON +// UnmarshalJSON sets the [URI] from JSON. func (u *URI) UnmarshalJSON(data []byte) error { var uristr string if err := json.Unmarshal(data, &uristr); err != nil { @@ -608,7 +616,7 @@ func (u *URI) DeepCopyInto(out *URI) { *out = *u } -// DeepCopy copies the receiver into a new URI. +// DeepCopy copies the receiver into a new [URI]. func (u *URI) DeepCopy() *URI { if u == nil { return nil @@ -618,23 +626,23 @@ func (u *URI) DeepCopy() *URI { return out } -// Email represents the email string format as specified by the json schema spec +// Email represents the email string format as specified by the [json] schema spec. // -// swagger:strfmt email +// swagger:strfmt email. type Email string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (e Email) MarshalText() ([]byte, error) { return []byte(string(e)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (e *Email) UnmarshalText(data []byte) error { // validation is performed later on *e = Email(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (e *Email) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -648,7 +656,7 @@ func (e *Email) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (e Email) Value() (driver.Value, error) { return driver.Value(string(e)), nil } @@ -657,12 +665,12 @@ func (e Email) String() string { return string(e) } -// MarshalJSON returns the Email as JSON +// MarshalJSON returns the Email as JSON. func (e Email) MarshalJSON() ([]byte, error) { return json.Marshal(string(e)) } -// UnmarshalJSON sets the Email from JSON +// UnmarshalJSON sets the Email from JSON. func (e *Email) UnmarshalJSON(data []byte) error { var estr string if err := json.Unmarshal(data, &estr); err != nil { @@ -687,23 +695,23 @@ func (e *Email) DeepCopy() *Email { return out } -// Hostname represents the hostname string format as specified by the json schema spec +// Hostname represents the hostname string format as specified by the [json] schema spec. // -// swagger:strfmt hostname +// swagger:strfmt hostname. type Hostname string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (h Hostname) MarshalText() ([]byte, error) { return []byte(string(h)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (h *Hostname) UnmarshalText(data []byte) error { // validation is performed later on *h = Hostname(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (h *Hostname) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -717,7 +725,7 @@ func (h *Hostname) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (h Hostname) Value() (driver.Value, error) { return driver.Value(string(h)), nil } @@ -726,12 +734,12 @@ func (h Hostname) String() string { return string(h) } -// MarshalJSON returns the Hostname as JSON +// MarshalJSON returns the [Hostname] as JSON. func (h Hostname) MarshalJSON() ([]byte, error) { return json.Marshal(string(h)) } -// UnmarshalJSON sets the Hostname from JSON +// UnmarshalJSON sets the [Hostname] from JSON. func (h *Hostname) UnmarshalJSON(data []byte) error { var hstr string if err := json.Unmarshal(data, &hstr); err != nil { @@ -746,7 +754,7 @@ func (h *Hostname) DeepCopyInto(out *Hostname) { *out = *h } -// DeepCopy copies the receiver into a new Hostname. +// DeepCopy copies the receiver into a new [Hostname]. func (h *Hostname) DeepCopy() *Hostname { if h == nil { return nil @@ -756,23 +764,23 @@ func (h *Hostname) DeepCopy() *Hostname { return out } -// IPv4 represents an IP v4 address +// IPv4 represents an IP v4 address. // -// swagger:strfmt ipv4 +// swagger:strfmt ipv4. type IPv4 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u IPv4) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *IPv4) UnmarshalText(data []byte) error { // validation is performed later on *u = IPv4(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *IPv4) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -786,7 +794,7 @@ func (u *IPv4) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u IPv4) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -795,12 +803,12 @@ func (u IPv4) String() string { return string(u) } -// MarshalJSON returns the IPv4 as JSON +// MarshalJSON returns the [IPv4] as JSON. func (u IPv4) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the IPv4 from JSON +// UnmarshalJSON sets the [IPv4] from JSON. func (u *IPv4) UnmarshalJSON(data []byte) error { var ustr string if err := json.Unmarshal(data, &ustr); err != nil { @@ -815,7 +823,7 @@ func (u *IPv4) DeepCopyInto(out *IPv4) { *out = *u } -// DeepCopy copies the receiver into a new IPv4. +// DeepCopy copies the receiver into a new [IPv4]. func (u *IPv4) DeepCopy() *IPv4 { if u == nil { return nil @@ -825,23 +833,23 @@ func (u *IPv4) DeepCopy() *IPv4 { return out } -// IPv6 represents an IP v6 address +// IPv6 represents an IP v6 address. // -// swagger:strfmt ipv6 +// swagger:strfmt ipv6. type IPv6 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u IPv6) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *IPv6) UnmarshalText(data []byte) error { // validation is performed later on *u = IPv6(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *IPv6) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -855,7 +863,7 @@ func (u *IPv6) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u IPv6) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -864,12 +872,12 @@ func (u IPv6) String() string { return string(u) } -// MarshalJSON returns the IPv6 as JSON +// MarshalJSON returns the [IPv6] as JSON. func (u IPv6) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the IPv6 from JSON +// UnmarshalJSON sets the [IPv6] from JSON. func (u *IPv6) UnmarshalJSON(data []byte) error { var ustr string if err := json.Unmarshal(data, &ustr); err != nil { @@ -884,7 +892,7 @@ func (u *IPv6) DeepCopyInto(out *IPv6) { *out = *u } -// DeepCopy copies the receiver into a new IPv6. +// DeepCopy copies the receiver into a new [IPv6]. func (u *IPv6) DeepCopy() *IPv6 { if u == nil { return nil @@ -894,23 +902,23 @@ func (u *IPv6) DeepCopy() *IPv6 { return out } -// CIDR represents a Classless Inter-Domain Routing notation +// CIDR represents a Classless Inter-Domain Routing notation. // -// swagger:strfmt cidr +// swagger:strfmt cidr. type CIDR string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u CIDR) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *CIDR) UnmarshalText(data []byte) error { // validation is performed later on *u = CIDR(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *CIDR) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -924,7 +932,7 @@ func (u *CIDR) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u CIDR) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -933,12 +941,12 @@ func (u CIDR) String() string { return string(u) } -// MarshalJSON returns the CIDR as JSON +// MarshalJSON returns the [CIDR] as JSON. func (u CIDR) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the CIDR from JSON +// UnmarshalJSON sets the [CIDR] from JSON. func (u *CIDR) UnmarshalJSON(data []byte) error { var ustr string if err := json.Unmarshal(data, &ustr); err != nil { @@ -953,7 +961,7 @@ func (u *CIDR) DeepCopyInto(out *CIDR) { *out = *u } -// DeepCopy copies the receiver into a new CIDR. +// DeepCopy copies the receiver into a new [CIDR]. func (u *CIDR) DeepCopy() *CIDR { if u == nil { return nil @@ -963,23 +971,23 @@ func (u *CIDR) DeepCopy() *CIDR { return out } -// MAC represents a 48 bit MAC address +// MAC represents a 48 bit MAC address. // -// swagger:strfmt mac +// swagger:strfmt mac. type MAC string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u MAC) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *MAC) UnmarshalText(data []byte) error { // validation is performed later on *u = MAC(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *MAC) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -993,7 +1001,7 @@ func (u *MAC) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u MAC) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1002,12 +1010,12 @@ func (u MAC) String() string { return string(u) } -// MarshalJSON returns the MAC as JSON +// MarshalJSON returns the [MAC] as JSON. func (u MAC) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the MAC from JSON +// UnmarshalJSON sets the [MAC] from JSON. func (u *MAC) UnmarshalJSON(data []byte) error { var ustr string if err := json.Unmarshal(data, &ustr); err != nil { @@ -1022,7 +1030,7 @@ func (u *MAC) DeepCopyInto(out *MAC) { *out = *u } -// DeepCopy copies the receiver into a new MAC. +// DeepCopy copies the receiver into a new [MAC]. func (u *MAC) DeepCopy() *MAC { if u == nil { return nil @@ -1032,23 +1040,23 @@ func (u *MAC) DeepCopy() *MAC { return out } -// UUID represents a uuid string format +// UUID represents a [uuid] string format // -// swagger:strfmt uuid +// swagger:strfmt uuid. type UUID string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u UUID) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *UUID) UnmarshalText(data []byte) error { // validation is performed later on *u = UUID(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *UUID) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1062,7 +1070,7 @@ func (u *UUID) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u UUID) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1071,12 +1079,12 @@ func (u UUID) String() string { return string(u) } -// MarshalJSON returns the UUID as JSON +// MarshalJSON returns the [UUID] as JSON. func (u UUID) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the UUID from JSON +// UnmarshalJSON sets the [UUID] from JSON. func (u *UUID) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1094,7 +1102,7 @@ func (u *UUID) DeepCopyInto(out *UUID) { *out = *u } -// DeepCopy copies the receiver into a new UUID. +// DeepCopy copies the receiver into a new [UUID]. func (u *UUID) DeepCopy() *UUID { if u == nil { return nil @@ -1104,23 +1112,23 @@ func (u *UUID) DeepCopy() *UUID { return out } -// UUID3 represents a uuid3 string format +// UUID3 represents a uuid3 string format. // -// swagger:strfmt uuid3 +// swagger:strfmt uuid3. type UUID3 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u UUID3) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *UUID3) UnmarshalText(data []byte) error { // validation is performed later on *u = UUID3(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *UUID3) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1134,7 +1142,7 @@ func (u *UUID3) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u UUID3) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1143,12 +1151,12 @@ func (u UUID3) String() string { return string(u) } -// MarshalJSON returns the UUID as JSON +// MarshalJSON returns the [UUID3] as JSON. func (u UUID3) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the UUID from JSON +// UnmarshalJSON sets the [UUID3] from JSON. func (u *UUID3) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1166,7 +1174,7 @@ func (u *UUID3) DeepCopyInto(out *UUID3) { *out = *u } -// DeepCopy copies the receiver into a new UUID3. +// DeepCopy copies the receiver into a new [UUID]3. func (u *UUID3) DeepCopy() *UUID3 { if u == nil { return nil @@ -1176,23 +1184,23 @@ func (u *UUID3) DeepCopy() *UUID3 { return out } -// UUID4 represents a uuid4 string format +// UUID4 represents a uuid4 string format. // -// swagger:strfmt uuid4 +// swagger:strfmt uuid4. type UUID4 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u UUID4) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *UUID4) UnmarshalText(data []byte) error { // validation is performed later on *u = UUID4(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *UUID4) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1206,7 +1214,7 @@ func (u *UUID4) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u UUID4) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1215,12 +1223,12 @@ func (u UUID4) String() string { return string(u) } -// MarshalJSON returns the UUID as JSON +// MarshalJSON returns the [UUID4] as JSON. func (u UUID4) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the UUID from JSON +// UnmarshalJSON sets the [UUID4] from JSON. func (u *UUID4) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1248,23 +1256,23 @@ func (u *UUID4) DeepCopy() *UUID4 { return out } -// UUID5 represents a uuid5 string format +// UUID5 represents a uuid5 string format. // -// swagger:strfmt uuid5 +// swagger:strfmt uuid5. type UUID5 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u UUID5) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *UUID5) UnmarshalText(data []byte) error { // validation is performed later on *u = UUID5(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *UUID5) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1278,7 +1286,7 @@ func (u *UUID5) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u UUID5) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1287,12 +1295,12 @@ func (u UUID5) String() string { return string(u) } -// MarshalJSON returns the UUID as JSON +// MarshalJSON returns the [UUID5] as JSON. func (u UUID5) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the UUID from JSON +// UnmarshalJSON sets the [UUID5] from JSON. func (u *UUID5) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1310,7 +1318,7 @@ func (u *UUID5) DeepCopyInto(out *UUID5) { *out = *u } -// DeepCopy copies the receiver into a new UUID5. +// DeepCopy copies the receiver into a new [UUID5]. func (u *UUID5) DeepCopy() *UUID5 { if u == nil { return nil @@ -1320,23 +1328,23 @@ func (u *UUID5) DeepCopy() *UUID5 { return out } -// UUID7 represents a uuid7 string format +// UUID7 represents a uuid7 string format. // -// swagger:strfmt uuid7 +// swagger:strfmt uuid7. type UUID7 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u UUID7) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *UUID7) UnmarshalText(data []byte) error { // validation is performed later on *u = UUID7(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *UUID7) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1350,7 +1358,7 @@ func (u *UUID7) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u UUID7) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1359,12 +1367,12 @@ func (u UUID7) String() string { return string(u) } -// MarshalJSON returns the UUID as JSON +// MarshalJSON returns the [UUID7] as JSON. func (u UUID7) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the UUID from JSON +// UnmarshalJSON sets the [UUID7] from JSON. func (u *UUID7) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1382,7 +1390,7 @@ func (u *UUID7) DeepCopyInto(out *UUID7) { *out = *u } -// DeepCopy copies the receiver into a new UUID7. +// DeepCopy copies the receiver into a new [UUID]7. func (u *UUID7) DeepCopy() *UUID7 { if u == nil { return nil @@ -1392,23 +1400,23 @@ func (u *UUID7) DeepCopy() *UUID7 { return out } -// ISBN represents an isbn string format +// ISBN represents an isbn string format. // -// swagger:strfmt isbn +// swagger:strfmt isbn. type ISBN string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u ISBN) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *ISBN) UnmarshalText(data []byte) error { // validation is performed later on *u = ISBN(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *ISBN) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1422,7 +1430,7 @@ func (u *ISBN) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u ISBN) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1431,12 +1439,12 @@ func (u ISBN) String() string { return string(u) } -// MarshalJSON returns the ISBN as JSON +// MarshalJSON returns the [ISBN] as JSON. func (u ISBN) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the ISBN from JSON +// UnmarshalJSON sets the [ISBN] from JSON. func (u *ISBN) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1454,7 +1462,7 @@ func (u *ISBN) DeepCopyInto(out *ISBN) { *out = *u } -// DeepCopy copies the receiver into a new ISBN. +// DeepCopy copies the receiver into a new [ISBN]. func (u *ISBN) DeepCopy() *ISBN { if u == nil { return nil @@ -1464,23 +1472,23 @@ func (u *ISBN) DeepCopy() *ISBN { return out } -// ISBN10 represents an isbn 10 string format +// ISBN10 represents an isbn 10 string format. // -// swagger:strfmt isbn10 +// swagger:strfmt isbn10. type ISBN10 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u ISBN10) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *ISBN10) UnmarshalText(data []byte) error { // validation is performed later on *u = ISBN10(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *ISBN10) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1494,7 +1502,7 @@ func (u *ISBN10) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u ISBN10) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1503,12 +1511,12 @@ func (u ISBN10) String() string { return string(u) } -// MarshalJSON returns the ISBN10 as JSON +// MarshalJSON returns the [ISBN10] as JSON. func (u ISBN10) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the ISBN10 from JSON +// UnmarshalJSON sets the [ISBN10] from JSON. func (u *ISBN10) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1526,7 +1534,7 @@ func (u *ISBN10) DeepCopyInto(out *ISBN10) { *out = *u } -// DeepCopy copies the receiver into a new ISBN10. +// DeepCopy copies the receiver into a new [ISBN10]. func (u *ISBN10) DeepCopy() *ISBN10 { if u == nil { return nil @@ -1536,23 +1544,23 @@ func (u *ISBN10) DeepCopy() *ISBN10 { return out } -// ISBN13 represents an isbn 13 string format +// ISBN13 represents an isbn 13 string format. // -// swagger:strfmt isbn13 +// swagger:strfmt isbn13. type ISBN13 string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u ISBN13) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *ISBN13) UnmarshalText(data []byte) error { // validation is performed later on *u = ISBN13(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *ISBN13) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1566,7 +1574,7 @@ func (u *ISBN13) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u ISBN13) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1575,12 +1583,12 @@ func (u ISBN13) String() string { return string(u) } -// MarshalJSON returns the ISBN13 as JSON +// MarshalJSON returns the [ISBN13] as JSON. func (u ISBN13) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the ISBN13 from JSON +// UnmarshalJSON sets the [ISBN13] from JSON. func (u *ISBN13) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1598,7 +1606,7 @@ func (u *ISBN13) DeepCopyInto(out *ISBN13) { *out = *u } -// DeepCopy copies the receiver into a new ISBN13. +// DeepCopy copies the receiver into a new [ISBN13]. func (u *ISBN13) DeepCopy() *ISBN13 { if u == nil { return nil @@ -1608,23 +1616,23 @@ func (u *ISBN13) DeepCopy() *ISBN13 { return out } -// CreditCard represents a credit card string format +// CreditCard represents a credit card string format. // -// swagger:strfmt creditcard +// swagger:strfmt creditcard. type CreditCard string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u CreditCard) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *CreditCard) UnmarshalText(data []byte) error { // validation is performed later on *u = CreditCard(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *CreditCard) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1638,7 +1646,7 @@ func (u *CreditCard) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u CreditCard) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1647,12 +1655,12 @@ func (u CreditCard) String() string { return string(u) } -// MarshalJSON returns the CreditCard as JSON +// MarshalJSON returns the [CreditCard] as JSON. func (u CreditCard) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the CreditCard from JSON +// UnmarshalJSON sets the [CreditCard] from JSON. func (u *CreditCard) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1670,7 +1678,7 @@ func (u *CreditCard) DeepCopyInto(out *CreditCard) { *out = *u } -// DeepCopy copies the receiver into a new CreditCard. +// DeepCopy copies the receiver into a new [CreditCard]. func (u *CreditCard) DeepCopy() *CreditCard { if u == nil { return nil @@ -1680,23 +1688,23 @@ func (u *CreditCard) DeepCopy() *CreditCard { return out } -// SSN represents a social security string format +// SSN represents a social security string format. // -// swagger:strfmt ssn +// swagger:strfmt ssn. type SSN string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (u SSN) MarshalText() ([]byte, error) { return []byte(string(u)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *SSN) UnmarshalText(data []byte) error { // validation is performed later on *u = SSN(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (u *SSN) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1710,7 +1718,7 @@ func (u *SSN) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u SSN) Value() (driver.Value, error) { return driver.Value(string(u)), nil } @@ -1719,12 +1727,12 @@ func (u SSN) String() string { return string(u) } -// MarshalJSON returns the SSN as JSON +// MarshalJSON returns the [SSN] as JSON. func (u SSN) MarshalJSON() ([]byte, error) { return json.Marshal(string(u)) } -// UnmarshalJSON sets the SSN from JSON +// UnmarshalJSON sets the [SSN] from JSON. func (u *SSN) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1742,7 +1750,7 @@ func (u *SSN) DeepCopyInto(out *SSN) { *out = *u } -// DeepCopy copies the receiver into a new SSN. +// DeepCopy copies the receiver into a new [SSN]. func (u *SSN) DeepCopy() *SSN { if u == nil { return nil @@ -1752,23 +1760,23 @@ func (u *SSN) DeepCopy() *SSN { return out } -// HexColor represents a hex color string format +// HexColor represents a hex color string format. // -// swagger:strfmt hexcolor +// swagger:strfmt hexcolor. type HexColor string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (h HexColor) MarshalText() ([]byte, error) { return []byte(string(h)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (h *HexColor) UnmarshalText(data []byte) error { // validation is performed later on *h = HexColor(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (h *HexColor) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1782,7 +1790,7 @@ func (h *HexColor) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (h HexColor) Value() (driver.Value, error) { return driver.Value(string(h)), nil } @@ -1791,12 +1799,12 @@ func (h HexColor) String() string { return string(h) } -// MarshalJSON returns the HexColor as JSON +// MarshalJSON returns the [HexColor] as JSON. func (h HexColor) MarshalJSON() ([]byte, error) { return json.Marshal(string(h)) } -// UnmarshalJSON sets the HexColor from JSON +// UnmarshalJSON sets the [HexColor] from JSON. func (h *HexColor) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1814,7 +1822,7 @@ func (h *HexColor) DeepCopyInto(out *HexColor) { *out = *h } -// DeepCopy copies the receiver into a new HexColor. +// DeepCopy copies the receiver into a new [HexColor]. func (h *HexColor) DeepCopy() *HexColor { if h == nil { return nil @@ -1824,23 +1832,23 @@ func (h *HexColor) DeepCopy() *HexColor { return out } -// RGBColor represents a RGB color string format +// RGBColor represents a RGB color string format. // -// swagger:strfmt rgbcolor +// swagger:strfmt rgbcolor. type RGBColor string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (r RGBColor) MarshalText() ([]byte, error) { return []byte(string(r)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (r *RGBColor) UnmarshalText(data []byte) error { // validation is performed later on *r = RGBColor(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (r *RGBColor) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1854,7 +1862,7 @@ func (r *RGBColor) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (r RGBColor) Value() (driver.Value, error) { return driver.Value(string(r)), nil } @@ -1863,12 +1871,12 @@ func (r RGBColor) String() string { return string(r) } -// MarshalJSON returns the RGBColor as JSON +// MarshalJSON returns the [RGBColor] as JSON. func (r RGBColor) MarshalJSON() ([]byte, error) { return json.Marshal(string(r)) } -// UnmarshalJSON sets the RGBColor from JSON +// UnmarshalJSON sets the [RGBColor] from JSON. func (r *RGBColor) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1886,7 +1894,7 @@ func (r *RGBColor) DeepCopyInto(out *RGBColor) { *out = *r } -// DeepCopy copies the receiver into a new RGBColor. +// DeepCopy copies the receiver into a new [RGBColor]. func (r *RGBColor) DeepCopy() *RGBColor { if r == nil { return nil @@ -1899,21 +1907,21 @@ func (r *RGBColor) DeepCopy() *RGBColor { // Password represents a password. // This has no validations and is mainly used as a marker for UI components. // -// swagger:strfmt password +// swagger:strfmt password. type Password string -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (r Password) MarshalText() ([]byte, error) { return []byte(string(r)), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (r *Password) UnmarshalText(data []byte) error { // validation is performed later on *r = Password(string(data)) return nil } -// Scan read a value from a database driver +// Scan read a value from a database driver. func (r *Password) Scan(raw any) error { switch v := raw.(type) { case []byte: @@ -1927,7 +1935,7 @@ func (r *Password) Scan(raw any) error { return nil } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (r Password) Value() (driver.Value, error) { return driver.Value(string(r)), nil } @@ -1936,12 +1944,12 @@ func (r Password) String() string { return string(r) } -// MarshalJSON returns the Password as JSON +// MarshalJSON returns the Password as JSON. func (r Password) MarshalJSON() ([]byte, error) { return json.Marshal(string(r)) } -// UnmarshalJSON sets the Password from JSON +// UnmarshalJSON sets the Password from JSON. func (r *Password) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -1986,7 +1994,7 @@ func isIPv6(str string) bool { return ip != nil && strings.Contains(str, ":") } -// isCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) +// isCIDR checks if the string is a valid CIDR notation (IPV4 & IPV6). func isCIDR(str string) bool { _, _, err := net.ParseCIDR(str) return err == nil @@ -1999,7 +2007,7 @@ func isCIDR(str string) bool { // 01-23-45-67-89-ab // 01-23-45-67-89-ab-cd-ef // 0123.4567.89ab -// 0123.4567.89ab.cdef +// 0123.4567.89ab.cdef. func isMAC(str string) bool { _, err := net.ParseMAC(str) return err == nil @@ -2084,7 +2092,7 @@ func isCreditCard(str string) bool { return (sum+lastDigit)%decimalBase == 0 } -// isSSN will validate the given string as a U.S. Social Security Number +// isSSN will validate the given string as a U.S. Social Security Number. func isSSN(str string) bool { if str == "" || len(str) != 11 { return false diff --git a/vendor/github.com/go-openapi/strfmt/doc.go b/vendor/github.com/go-openapi/strfmt/doc.go index 5825b72108e..6652521c535 100644 --- a/vendor/github.com/go-openapi/strfmt/doc.go +++ b/vendor/github.com/go-openapi/strfmt/doc.go @@ -1,7 +1,5 @@ // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 -// Package strfmt contains custom string formats -// -// TODO: add info on how to define and register a custom format +// Package strfmt contains custom string formats. package strfmt diff --git a/vendor/github.com/go-openapi/strfmt/duration.go b/vendor/github.com/go-openapi/strfmt/duration.go index 908c1b02f3c..b710bfbf53c 100644 --- a/vendor/github.com/go-openapi/strfmt/duration.go +++ b/vendor/github.com/go-openapi/strfmt/duration.go @@ -14,9 +14,8 @@ import ( "time" ) -func init() { +func init() { //nolint:gochecknoinits // registers duration format in the default registry d := Duration(0) - // register this format in the default registry Default.Add("duration", &d, IsDuration) } @@ -25,6 +24,7 @@ const ( daysInWeek = 7 ) +//nolint:gochecknoglobals // package-level lookup tables for duration parsing var ( timeUnits = [][]string{ {"ns", "nano"}, @@ -51,7 +51,7 @@ var ( durationMatcher = regexp.MustCompile(`^(((?:-\s?)?\d+)(\.\d+)?\s*([A-Za-zµ]+))`) ) -// IsDuration returns true if the provided string is a valid duration +// IsDuration returns true if the provided string is a valid duration. func IsDuration(str string) bool { _, err := ParseDuration(str) return err == nil @@ -60,17 +60,17 @@ func IsDuration(str string) bool { // Duration represents a duration // // Duration stores a period of time as a nanosecond count, with the largest -// repesentable duration being approximately 290 years. +// representable duration being approximately 290 years. // -// swagger:strfmt duration +// swagger:strfmt duration. type Duration time.Duration -// MarshalText turns this instance into text +// MarshalText turns this instance into text. func (d Duration) MarshalText() ([]byte, error) { return []byte(time.Duration(d).String()), nil } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (d *Duration) UnmarshalText(data []byte) error { // validation is performed later on dd, err := ParseDuration(string(data)) if err != nil { @@ -80,7 +80,7 @@ func (d *Duration) UnmarshalText(data []byte) error { // validation is performed return nil } -// ParseDuration parses a duration from a string, compatible with scala duration syntax +// ParseDuration parses a duration from a string, compatible with scala duration syntax. func ParseDuration(cand string) (time.Duration, error) { if dur, err := time.ParseDuration(cand); err == nil { return dur, nil @@ -143,7 +143,7 @@ func ParseDuration(cand string) (time.Duration, error) { // Scan reads a Duration value from database driver type. func (d *Duration) Scan(raw any) error { switch v := raw.(type) { - // TODO: case []byte: // ? + // Proposal for enhancement: case []byte: // ? case int64: *d = Duration(v) case float64: @@ -162,17 +162,17 @@ func (d Duration) Value() (driver.Value, error) { return driver.Value(int64(d)), nil } -// String converts this duration to a string +// String converts this duration to a string. func (d Duration) String() string { return time.Duration(d).String() } -// MarshalJSON returns the Duration as JSON +// MarshalJSON returns the Duration as JSON. func (d Duration) MarshalJSON() ([]byte, error) { return json.Marshal(time.Duration(d).String()) } -// UnmarshalJSON sets the Duration from JSON +// UnmarshalJSON sets the Duration from JSON. func (d *Duration) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil diff --git a/vendor/github.com/go-openapi/strfmt/errors.go b/vendor/github.com/go-openapi/strfmt/errors.go index 9faa37cf2e5..5ed519d2e63 100644 --- a/vendor/github.com/go-openapi/strfmt/errors.go +++ b/vendor/github.com/go-openapi/strfmt/errors.go @@ -5,7 +5,7 @@ package strfmt type strfmtError string -// ErrFormat is an error raised by the strfmt package +// ErrFormat is an error raised by the [strfmt] package. const ErrFormat strfmtError = "format error" func (e strfmtError) Error() string { diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go index d9d9e04c208..e494dd7b83d 100644 --- a/vendor/github.com/go-openapi/strfmt/format.go +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -16,20 +16,20 @@ import ( "github.com/go-viper/mapstructure/v2" ) -// Default is the default formats registry -var Default = NewSeededFormats(nil, nil) +// Default is the default formats registry. +var Default = NewSeededFormats(nil, nil) //nolint:gochecknoglobals // package-level default registry, by design // Validator represents a validator for a string format. type Validator func(string) bool -// NewFormats creates a new formats registry seeded with the values from the default -func NewFormats() Registry { +// NewFormats creates a new formats registry seeded with the values from the default. +func NewFormats() Registry { //nolint:ireturn // factory function returns the Registry interface by design //nolint:forcetypeassert return NewSeededFormats(Default.(*defaultFormats).data, nil) } -// NewSeededFormats creates a new formats registry -func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry { +// NewSeededFormats creates a new formats registry. +func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry { //nolint:ireturn // factory function returns the Registry interface by design if normalizer == nil { normalizer = DefaultNameNormalizer } @@ -50,7 +50,7 @@ type knownFormat struct { // NameNormalizer is a function that normalizes a format name. type NameNormalizer func(string) string -// DefaultNameNormalizer removes all dashes +// DefaultNameNormalizer removes all dashes. func DefaultNameNormalizer(name string) string { return strings.ReplaceAll(name, "-", "") } @@ -62,8 +62,8 @@ type defaultFormats struct { normalizeName NameNormalizer } -// MapStructureHookFunc is a decode hook function for mapstructure -func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { +// MapStructureHookFunc is a decode hook function for mapstructure. +func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //nolint:ireturn // returns interface required by mapstructure return func(from reflect.Type, to reflect.Type, obj any) (any, error) { if from.Kind() != reflect.String { return obj, nil @@ -76,83 +76,87 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { for _, v := range f.data { tpe, _ := f.GetType(v.Name) if to == tpe { - switch v.Name { - case "date": - d, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) - if err != nil { - return nil, err - } - return Date(d), nil - case "datetime": - input := data - if len(input) == 0 { - return nil, fmt.Errorf("empty string is an invalid datetime format: %w", ErrFormat) - } - return ParseDateTime(input) - case "duration": - dur, err := ParseDuration(data) - if err != nil { - return nil, err - } - return Duration(dur), nil - case "uri": - return URI(data), nil - case "email": - return Email(data), nil - case "uuid": - return UUID(data), nil - case "uuid3": - return UUID3(data), nil - case "uuid4": - return UUID4(data), nil - case "uuid5": - return UUID5(data), nil - case "uuid7": - return UUID7(data), nil - case "hostname": - return Hostname(data), nil - case "ipv4": - return IPv4(data), nil - case "ipv6": - return IPv6(data), nil - case "cidr": - return CIDR(data), nil - case "mac": - return MAC(data), nil - case "isbn": - return ISBN(data), nil - case "isbn10": - return ISBN10(data), nil - case "isbn13": - return ISBN13(data), nil - case "creditcard": - return CreditCard(data), nil - case "ssn": - return SSN(data), nil - case "hexcolor": - return HexColor(data), nil - case "rgbcolor": - return RGBColor(data), nil - case "byte": - return Base64(data), nil - case "password": - return Password(data), nil - case "ulid": - ulid, err := ParseULID(data) - if err != nil { - return nil, err - } - return ulid, nil - default: - return nil, errors.InvalidTypeName(v.Name) - } + return decodeFormatFromString(v.Name, data) } } return data, nil } } -// Add adds a new format, return true if this was a new item instead of a replacement +// decodeFormatFromString decodes a string into the appropriate format type by name. +func decodeFormatFromString(name, data string) (any, error) { //nolint:gocyclo,cyclop // flat switch over format names, no real complexity + switch name { + case "date": + d, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) + if err != nil { + return nil, err + } + return Date(d), nil + case "datetime": + if len(data) == 0 { + return nil, fmt.Errorf("empty string is an invalid datetime format: %w", ErrFormat) + } + return ParseDateTime(data) + case "duration": + dur, err := ParseDuration(data) + if err != nil { + return nil, err + } + return Duration(dur), nil + case "uri": + return URI(data), nil + case "email": + return Email(data), nil + case "uuid": + return UUID(data), nil + case "uuid3": + return UUID3(data), nil + case "uuid4": + return UUID4(data), nil + case "uuid5": + return UUID5(data), nil + case "uuid7": + return UUID7(data), nil + case "hostname": + return Hostname(data), nil + case "ipv4": + return IPv4(data), nil + case "ipv6": + return IPv6(data), nil + case "cidr": + return CIDR(data), nil + case "mac": + return MAC(data), nil + case "isbn": + return ISBN(data), nil + case "isbn10": + return ISBN10(data), nil + case "isbn13": + return ISBN13(data), nil + case "creditcard": + return CreditCard(data), nil + case "ssn": + return SSN(data), nil + case "hexcolor": + return HexColor(data), nil + case "rgbcolor": + return RGBColor(data), nil + case "byte": + return Base64(data), nil + case "password": + return Password(data), nil + case "ulid": + ulid, err := ParseULID(data) + if err != nil { + return nil, err + } + return ulid, nil + default: + return nil, errors.InvalidTypeName(name) + } +} + +// Add adds a new format, return true if this was a new item instead of a replacement. func (f *defaultFormats) Add(name string, strfmt Format, validator Validator) bool { f.Lock() defer f.Unlock() @@ -178,7 +182,7 @@ func (f *defaultFormats) Add(name string, strfmt Format, validator Validator) bo return true } -// GetType gets the type for the specified name +// GetType gets the type for the specified name. func (f *defaultFormats) GetType(name string) (reflect.Type, bool) { f.Lock() defer f.Unlock() @@ -191,7 +195,7 @@ func (f *defaultFormats) GetType(name string) (reflect.Type, bool) { return nil, false } -// DelByName removes the format by the specified name, returns true when an item was actually removed +// DelByName removes the format by the specified name, returns true when an item was actually removed. func (f *defaultFormats) DelByName(name string) bool { f.Lock() defer f.Unlock() @@ -208,7 +212,7 @@ func (f *defaultFormats) DelByName(name string) bool { return false } -// DelByFormat removes the specified format, returns true when an item was actually removed +// DelByFormat removes the specified format, returns true when an item was actually removed. func (f *defaultFormats) DelByFormat(strfmt Format) bool { f.Lock() defer f.Unlock() @@ -228,7 +232,7 @@ func (f *defaultFormats) DelByFormat(strfmt Format) bool { return false } -// ContainsName returns true if this registry contains the specified name +// ContainsName returns true if this registry contains the specified name. func (f *defaultFormats) ContainsName(name string) bool { f.Lock() defer f.Unlock() @@ -241,7 +245,7 @@ func (f *defaultFormats) ContainsName(name string) bool { return false } -// ContainsFormat returns true if this registry contains the specified format +// ContainsFormat returns true if this registry contains the specified format. func (f *defaultFormats) ContainsFormat(strfmt Format) bool { f.Lock() defer f.Unlock() diff --git a/vendor/github.com/go-openapi/strfmt/go.work b/vendor/github.com/go-openapi/strfmt/go.work new file mode 100644 index 00000000000..288e7655d45 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/go.work @@ -0,0 +1,7 @@ +use ( + . + ./enable/mongodb + ./internal/testintegration +) + +go 1.24.0 diff --git a/vendor/github.com/go-openapi/strfmt/go.work.sum b/vendor/github.com/go-openapi/strfmt/go.work.sum new file mode 100644 index 00000000000..33dac969b64 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/go.work.sum @@ -0,0 +1,16 @@ +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30 h1:BHT1/DKsYDGkUgQ2jmMaozVcdk+sVfz0+1ZJq4zkWgw= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= +golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/vendor/github.com/go-openapi/strfmt/ifaces.go b/vendor/github.com/go-openapi/strfmt/ifaces.go index 1b9e72c64eb..6252ae98a81 100644 --- a/vendor/github.com/go-openapi/strfmt/ifaces.go +++ b/vendor/github.com/go-openapi/strfmt/ifaces.go @@ -13,7 +13,7 @@ import ( // Format represents a string format. // // All implementations of Format provide a string representation and text -// marshaling/unmarshaling interface to be used by encoders (e.g. encoding/json). +// marshaling/unmarshaling interface to be used by encoders (e.g. encoding/[json]). type Format interface { String() string encoding.TextMarshaler @@ -22,11 +22,11 @@ type Format interface { // Registry is a registry of string formats, with a validation method. type Registry interface { - Add(string, Format, Validator) bool - DelByName(string) bool - GetType(string) (reflect.Type, bool) - ContainsName(string) bool - Validates(string, string) bool - Parse(string, string) (any, error) + Add(name string, strfmt Format, validator Validator) bool + DelByName(name string) bool + GetType(name string) (reflect.Type, bool) + ContainsName(name string) bool + Validates(name, data string) bool + Parse(name, data string) (any, error) MapStructureHookFunc() mapstructure.DecodeHookFunc } diff --git a/vendor/github.com/go-openapi/strfmt/internal/bsonlite/codec.go b/vendor/github.com/go-openapi/strfmt/internal/bsonlite/codec.go new file mode 100644 index 00000000000..424f45466c7 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/internal/bsonlite/codec.go @@ -0,0 +1,71 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package bsonlite provides a minimal BSON codec for strfmt types. +// +// This codec produces BSON output compatible with go.mongodb.org/mongo-driver/v2 +// (v2.5.0). It handles only the exact BSON patterns used by strfmt: +// single-key {"data": value} documents with string, DateTime, or ObjectID values. +// +// This package is intended to provide a backward-compatible API to users of +// go-openapi/strfmt. It is not intended to be maintained or to follow the +// evolutions of the official MongoDB drivers. For up-to-date MongoDB support, +// import "github.com/go-openapi/strfmt/enable/mongodb" to replace this codec +// with one backed by the real driver. +package bsonlite + +import "time" + +// Codec provides BSON document marshal/unmarshal for strfmt types. +// +// MarshalDoc encodes a single-key BSON document {"data": value}. +// The value must be one of: string, time.Time, or [12]byte (ObjectID). +// +// UnmarshalDoc decodes a BSON document and returns the "data" field's value. +// Returns one of: string, time.Time, or [12]byte depending on the BSON type. +type Codec interface { + MarshalDoc(value any) ([]byte, error) + UnmarshalDoc(data []byte) (any, error) +} + +// C is the active BSON codec. +// +//nolint:gochecknoglobals // replaceable codec, by design +var C Codec = liteCodec{} + +// Replace swaps the active BSON codec with the provided implementation. +// This is intended to be called from enable/mongodb's init(). +// +// Since [Replace] affects the global state of the package, it is not intended for concurrent use. +func Replace(c Codec) { + C = c +} + +// BSON type tags (from the BSON specification). +const ( + TypeString byte = 0x02 + TypeObjectID byte = 0x07 + TypeDateTime byte = 0x09 + TypeNull byte = 0x0A +) + +// ObjectIDSize is the size of a BSON ObjectID in bytes. +const ObjectIDSize = 12 + +// DateTimeToMillis converts a time.Time to BSON DateTime milliseconds. +func DateTimeToMillis(t time.Time) int64 { + const ( + millisec = 1000 + microsec = 1_000_000 + ) + return t.Unix()*millisec + int64(t.Nanosecond())/microsec +} + +// MillisToTime converts BSON DateTime milliseconds to time.Time. +func MillisToTime(millis int64) time.Time { + const ( + millisec = 1000 + nanosPerMs = 1_000_000 + ) + return time.Unix(millis/millisec, millis%millisec*nanosPerMs) +} diff --git a/vendor/github.com/go-openapi/strfmt/internal/bsonlite/lite.go b/vendor/github.com/go-openapi/strfmt/internal/bsonlite/lite.go new file mode 100644 index 00000000000..6b0e0e1c55a --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/internal/bsonlite/lite.go @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package bsonlite + +import ( + "encoding/binary" + "errors" + "fmt" + "time" +) + +// liteCodec is a minimal BSON codec that handles only the patterns used by strfmt: +// single-key documents of the form {"data": } where value is a string, +// BSON DateTime (time.Time), or BSON ObjectID ([12]byte). +type liteCodec struct{} + +var _ Codec = liteCodec{} + +func (liteCodec) MarshalDoc(value any) ([]byte, error) { + switch v := value.(type) { + case string: + return marshalStringDoc(v), nil + case time.Time: + return marshalDateTimeDoc(v), nil + case [ObjectIDSize]byte: + return marshalObjectIDDoc(v), nil + default: + return nil, fmt.Errorf("bsonlite: unsupported value type %T: %w", value, errUnsupportedType) + } +} + +func (liteCodec) UnmarshalDoc(data []byte) (any, error) { + return unmarshalDoc(data) +} + +// BSON wire format helpers. +// +// Document: int32(size) + elements + 0x00 +// Element: byte(type) + cstring(key) + value +// String: int32(len+1) + bytes + 0x00 +// DateTime: int64 (LE, millis since epoch) +// ObjectID: [12]byte + +const dataKey = "data\x00" + +func marshalStringDoc(s string) []byte { + sBytes := []byte(s) + // doc_size(4) + type(1) + key("data\0"=5) + strlen(4) + string + \0(1) + doc_term(1) + docSize := 4 + 1 + len(dataKey) + 4 + len(sBytes) + 1 + 1 + + buf := make([]byte, docSize) + pos := 0 + + binary.LittleEndian.PutUint32(buf[pos:], uint32(docSize)) //nolint:gosec // size is computed from input, cannot overflow + pos += 4 + + buf[pos] = TypeString + pos++ + + pos += copy(buf[pos:], dataKey) + + binary.LittleEndian.PutUint32(buf[pos:], uint32(len(sBytes)+1)) //nolint:gosec // string length cannot overflow uint32 + pos += 4 + + pos += copy(buf[pos:], sBytes) + buf[pos] = 0 // string null terminator + pos++ + + buf[pos] = 0 // document terminator + + return buf +} + +func marshalDateTimeDoc(t time.Time) []byte { + // doc_size(4) + type(1) + key("data\0"=5) + int64(8) + doc_term(1) + const docSize = 4 + 1 + 5 + 8 + 1 + + buf := make([]byte, docSize) + pos := 0 + + binary.LittleEndian.PutUint32(buf[pos:], docSize) + pos += 4 + + buf[pos] = TypeDateTime + pos++ + + pos += copy(buf[pos:], dataKey) + + millis := DateTimeToMillis(t) + binary.LittleEndian.PutUint64(buf[pos:], uint64(millis)) //nolint:gosec // negative datetime millis are valid + // pos += 8 + + buf[docSize-1] = 0 // document terminator + + return buf +} + +func marshalObjectIDDoc(oid [ObjectIDSize]byte) []byte { + // doc_size(4) + type(1) + key("data\0"=5) + objectid(12) + doc_term(1) + const docSize = 4 + 1 + 5 + ObjectIDSize + 1 + + buf := make([]byte, docSize) + pos := 0 + + binary.LittleEndian.PutUint32(buf[pos:], docSize) + pos += 4 + + buf[pos] = TypeObjectID + pos++ + + pos += copy(buf[pos:], dataKey) + + copy(buf[pos:], oid[:]) + // pos += ObjectIDSize + + buf[docSize-1] = 0 // document terminator + + return buf +} + +var ( + errUnsupportedType = errors.New("bsonlite: unsupported type") + errDocTooShort = errors.New("bsonlite: document too short") + errDocSize = errors.New("bsonlite: document size mismatch") + errNoTerminator = errors.New("bsonlite: missing key terminator") + errTruncated = errors.New("bsonlite: truncated value") + errDataNotFound = errors.New("bsonlite: \"data\" field not found") +) + +func unmarshalDoc(raw []byte) (any, error) { + const minDocSize = 5 // int32(size) + terminator + + if len(raw) < minDocSize { + return nil, errDocTooShort + } + + docSize := int(binary.LittleEndian.Uint32(raw[:4])) + if docSize != len(raw) { + return nil, errDocSize + } + + pos := 4 + + for pos < docSize-1 { + if pos >= len(raw) { + return nil, errTruncated + } + typeByte := raw[pos] + pos++ + + // Read key (cstring: bytes until 0x00). + keyStart := pos + for pos < len(raw) && raw[pos] != 0 { + pos++ + } + if pos >= len(raw) { + return nil, errNoTerminator + } + key := string(raw[keyStart:pos]) + pos++ // skip null terminator + + val, newPos, err := readValue(typeByte, raw, pos) + if err != nil { + return nil, err + } + pos = newPos + + if key == "data" { + return val, nil + } + } + + return nil, errDataNotFound +} + +func readValue(typeByte byte, raw []byte, pos int) (any, int, error) { + switch typeByte { + case TypeString: + if pos+4 > len(raw) { + return nil, 0, errTruncated + } + strLen := int(binary.LittleEndian.Uint32(raw[pos:])) + pos += 4 + if pos+strLen > len(raw) || strLen < 1 { + return nil, 0, errTruncated + } + s := string(raw[pos : pos+strLen-1]) // exclude null terminator + return s, pos + strLen, nil + + case TypeObjectID: + if pos+ObjectIDSize > len(raw) { + return nil, 0, errTruncated + } + var oid [ObjectIDSize]byte + copy(oid[:], raw[pos:pos+ObjectIDSize]) + return oid, pos + ObjectIDSize, nil + + case TypeDateTime: + const dateTimeSize = 8 + if pos+dateTimeSize > len(raw) { + return nil, 0, errTruncated + } + millis := int64(binary.LittleEndian.Uint64(raw[pos:])) //nolint:gosec // negative datetime millis are valid + return MillisToTime(millis), pos + dateTimeSize, nil + + case TypeNull: + return nil, pos, nil + + default: + return nil, 0, fmt.Errorf("bsonlite: unsupported BSON type 0x%02x: %w", typeByte, errUnsupportedType) + } +} diff --git a/vendor/github.com/go-openapi/strfmt/mongo.go b/vendor/github.com/go-openapi/strfmt/mongo.go index 641fed9b1a6..be904ffa5de 100644 --- a/vendor/github.com/go-openapi/strfmt/mongo.go +++ b/vendor/github.com/go-openapi/strfmt/mongo.go @@ -9,70 +9,89 @@ import ( "fmt" "time" - "github.com/oklog/ulid" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/bsontype" - bsonprim "go.mongodb.org/mongo-driver/bson/primitive" + "github.com/go-openapi/strfmt/internal/bsonlite" + "github.com/oklog/ulid/v2" ) +// bsonMarshaler is satisfied by types implementing MarshalBSON. +type bsonMarshaler interface { + MarshalBSON() ([]byte, error) +} + +// bsonUnmarshaler is satisfied by types implementing UnmarshalBSON. +type bsonUnmarshaler interface { + UnmarshalBSON(data []byte) error +} + +// bsonValueMarshaler is satisfied by types implementing MarshalBSONValue. +type bsonValueMarshaler interface { + MarshalBSONValue() (byte, []byte, error) +} + +// bsonValueUnmarshaler is satisfied by types implementing UnmarshalBSONValue. +type bsonValueUnmarshaler interface { + UnmarshalBSONValue(tpe byte, data []byte) error +} + +// Compile-time interface checks. var ( - _ bson.Marshaler = Date{} - _ bson.Unmarshaler = &Date{} - _ bson.Marshaler = Base64{} - _ bson.Unmarshaler = &Base64{} - _ bson.Marshaler = Duration(0) - _ bson.Unmarshaler = (*Duration)(nil) - _ bson.Marshaler = DateTime{} - _ bson.Unmarshaler = &DateTime{} - _ bson.Marshaler = ULID{} - _ bson.Unmarshaler = &ULID{} - _ bson.Marshaler = URI("") - _ bson.Unmarshaler = (*URI)(nil) - _ bson.Marshaler = Email("") - _ bson.Unmarshaler = (*Email)(nil) - _ bson.Marshaler = Hostname("") - _ bson.Unmarshaler = (*Hostname)(nil) - _ bson.Marshaler = IPv4("") - _ bson.Unmarshaler = (*IPv4)(nil) - _ bson.Marshaler = IPv6("") - _ bson.Unmarshaler = (*IPv6)(nil) - _ bson.Marshaler = CIDR("") - _ bson.Unmarshaler = (*CIDR)(nil) - _ bson.Marshaler = MAC("") - _ bson.Unmarshaler = (*MAC)(nil) - _ bson.Marshaler = Password("") - _ bson.Unmarshaler = (*Password)(nil) - _ bson.Marshaler = UUID("") - _ bson.Unmarshaler = (*UUID)(nil) - _ bson.Marshaler = UUID3("") - _ bson.Unmarshaler = (*UUID3)(nil) - _ bson.Marshaler = UUID4("") - _ bson.Unmarshaler = (*UUID4)(nil) - _ bson.Marshaler = UUID5("") - _ bson.Unmarshaler = (*UUID5)(nil) - _ bson.Marshaler = UUID7("") - _ bson.Unmarshaler = (*UUID7)(nil) - _ bson.Marshaler = ISBN("") - _ bson.Unmarshaler = (*ISBN)(nil) - _ bson.Marshaler = ISBN10("") - _ bson.Unmarshaler = (*ISBN10)(nil) - _ bson.Marshaler = ISBN13("") - _ bson.Unmarshaler = (*ISBN13)(nil) - _ bson.Marshaler = CreditCard("") - _ bson.Unmarshaler = (*CreditCard)(nil) - _ bson.Marshaler = SSN("") - _ bson.Unmarshaler = (*SSN)(nil) - _ bson.Marshaler = HexColor("") - _ bson.Unmarshaler = (*HexColor)(nil) - _ bson.Marshaler = RGBColor("") - _ bson.Unmarshaler = (*RGBColor)(nil) - _ bson.Marshaler = ObjectId{} - _ bson.Unmarshaler = &ObjectId{} - - _ bson.ValueMarshaler = DateTime{} - _ bson.ValueUnmarshaler = &DateTime{} - _ bson.ValueMarshaler = ObjectId{} - _ bson.ValueUnmarshaler = &ObjectId{} + _ bsonMarshaler = Date{} + _ bsonUnmarshaler = &Date{} + _ bsonMarshaler = Base64{} + _ bsonUnmarshaler = &Base64{} + _ bsonMarshaler = Duration(0) + _ bsonUnmarshaler = (*Duration)(nil) + _ bsonMarshaler = DateTime{} + _ bsonUnmarshaler = &DateTime{} + _ bsonMarshaler = ULID{} + _ bsonUnmarshaler = &ULID{} + _ bsonMarshaler = URI("") + _ bsonUnmarshaler = (*URI)(nil) + _ bsonMarshaler = Email("") + _ bsonUnmarshaler = (*Email)(nil) + _ bsonMarshaler = Hostname("") + _ bsonUnmarshaler = (*Hostname)(nil) + _ bsonMarshaler = IPv4("") + _ bsonUnmarshaler = (*IPv4)(nil) + _ bsonMarshaler = IPv6("") + _ bsonUnmarshaler = (*IPv6)(nil) + _ bsonMarshaler = CIDR("") + _ bsonUnmarshaler = (*CIDR)(nil) + _ bsonMarshaler = MAC("") + _ bsonUnmarshaler = (*MAC)(nil) + _ bsonMarshaler = Password("") + _ bsonUnmarshaler = (*Password)(nil) + _ bsonMarshaler = UUID("") + _ bsonUnmarshaler = (*UUID)(nil) + _ bsonMarshaler = UUID3("") + _ bsonUnmarshaler = (*UUID3)(nil) + _ bsonMarshaler = UUID4("") + _ bsonUnmarshaler = (*UUID4)(nil) + _ bsonMarshaler = UUID5("") + _ bsonUnmarshaler = (*UUID5)(nil) + _ bsonMarshaler = UUID7("") + _ bsonUnmarshaler = (*UUID7)(nil) + _ bsonMarshaler = ISBN("") + _ bsonUnmarshaler = (*ISBN)(nil) + _ bsonMarshaler = ISBN10("") + _ bsonUnmarshaler = (*ISBN10)(nil) + _ bsonMarshaler = ISBN13("") + _ bsonUnmarshaler = (*ISBN13)(nil) + _ bsonMarshaler = CreditCard("") + _ bsonUnmarshaler = (*CreditCard)(nil) + _ bsonMarshaler = SSN("") + _ bsonUnmarshaler = (*SSN)(nil) + _ bsonMarshaler = HexColor("") + _ bsonUnmarshaler = (*HexColor)(nil) + _ bsonMarshaler = RGBColor("") + _ bsonUnmarshaler = (*RGBColor)(nil) + _ bsonMarshaler = ObjectId{} + _ bsonUnmarshaler = &ObjectId{} + + _ bsonValueMarshaler = DateTime{} + _ bsonValueUnmarshaler = &DateTime{} + _ bsonValueMarshaler = ObjectId{} + _ bsonValueUnmarshaler = &ObjectId{} ) const ( @@ -82,99 +101,105 @@ const ( ) func (d Date) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": d.String()}) + return bsonlite.C.MarshalDoc(d.String()) } func (d *Date) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { return err } - if data, ok := m["data"].(string); ok { - rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) - if err != nil { - return err - } - *d = Date(rd) - return nil + s, ok := v.(string) + if !ok { + return fmt.Errorf("couldn't unmarshal bson bytes value as Date: %w", ErrFormat) } - return fmt.Errorf("couldn't unmarshal bson bytes value as Date: %w", ErrFormat) + rd, err := time.ParseInLocation(RFC3339FullDate, s, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(rd) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (b Base64) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": b.String()}) + return bsonlite.C.MarshalDoc(b.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (b *Base64) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { return err } - if bd, ok := m["data"].(string); ok { - vb, err := base64.StdEncoding.DecodeString(bd) - if err != nil { - return err - } - *b = Base64(vb) - return nil + s, ok := v.(string) + if !ok { + return fmt.Errorf("couldn't unmarshal bson bytes as base64: %w", ErrFormat) + } + + vb, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return err } - return fmt.Errorf("couldn't unmarshal bson bytes as base64: %w", ErrFormat) + *b = Base64(vb) + return nil } func (d Duration) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": d.String()}) + return bsonlite.C.MarshalDoc(d.String()) } func (d *Duration) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { return err } - if data, ok := m["data"].(string); ok { - rd, err := ParseDuration(data) - if err != nil { - return err - } - *d = Duration(rd) - return nil + s, ok := v.(string) + if !ok { + return fmt.Errorf("couldn't unmarshal bson bytes value as Duration: %w", ErrFormat) } - return fmt.Errorf("couldn't unmarshal bson bytes value as Date: %w", ErrFormat) + rd, err := ParseDuration(s) + if err != nil { + return err + } + *d = Duration(rd) + return nil } -// MarshalBSON renders the DateTime as a BSON document +// MarshalBSON renders the [DateTime] as a BSON document. func (t DateTime) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": t}) + tNorm := NormalizeTimeForMarshal(time.Time(t)) + return bsonlite.C.MarshalDoc(tNorm) } -// UnmarshalBSON reads the DateTime from a BSON document +// UnmarshalBSON reads the [DateTime] from a BSON document. func (t *DateTime) UnmarshalBSON(data []byte) error { - var obj struct { - Data DateTime - } - - if err := bson.Unmarshal(data, &obj); err != nil { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { return err } - *t = obj.Data - + tv, ok := v.(time.Time) + if !ok { + return fmt.Errorf("couldn't unmarshal bson bytes value as DateTime: %w", ErrFormat) + } + *t = DateTime(tv) return nil } +// MarshalBSONValue marshals a [DateTime] as a BSON DateTime value (type 0x09), +// an int64 representing milliseconds since epoch. +// // MarshalBSONValue is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. +// into a BSON document represented as bytes. // -// Marshals a DateTime as a bson.TypeDateTime, an int64 representing -// milliseconds since epoch. -func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { +// The bytes returned must be a valid BSON document if the error is nil. +func (t DateTime) MarshalBSONValue() (byte, []byte, error) { // UnixNano cannot be used directly, the result of calling UnixNano on the zero // Time is undefined. Thats why we use time.Nanosecond() instead. @@ -183,15 +208,12 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { buf := make([]byte, bsonDateTimeSize) binary.LittleEndian.PutUint64(buf, uint64(i64)) //nolint:gosec // it's okay to handle negative int64 this way - return bson.TypeDateTime, buf, nil + return bsonlite.TypeDateTime, buf, nil } -// UnmarshalBSONValue is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { - if tpe == bson.TypeNull { +// UnmarshalBSONValue unmarshals a BSON DateTime value into this [DateTime]. +func (t *DateTime) UnmarshalBSONValue(tpe byte, data []byte) error { + if tpe == bsonlite.TypeNull { *t = DateTime{} return nil } @@ -206,440 +228,373 @@ func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u ULID) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *ULID) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { return err } - if ud, ok := m["data"].(string); ok { - id, err := ulid.ParseStrict(ud) - if err != nil { - return fmt.Errorf("couldn't parse bson bytes as ULID: %w: %w", err, ErrFormat) - } - u.ULID = id - return nil + s, ok := v.(string) + if !ok { + return fmt.Errorf("couldn't unmarshal bson bytes as ULID: %w", ErrFormat) } - return fmt.Errorf("couldn't unmarshal bson bytes as ULID: %w", ErrFormat) + + id, err := ulid.ParseStrict(s) + if err != nil { + return fmt.Errorf("couldn't parse bson bytes as ULID: %w: %w", err, ErrFormat) + } + u.ULID = id + return nil } -// MarshalBSON document from this value +// unmarshalBSONString is a helper for string-based strfmt types. +func unmarshalBSONString(data []byte, typeName string) (string, error) { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { + return "", err + } + s, ok := v.(string) + if !ok { + return "", fmt.Errorf("couldn't unmarshal bson bytes as %s: %w", typeName, ErrFormat) + } + return s, nil +} + +// MarshalBSON document from this value. func (u URI) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *URI) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "uri") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = URI(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as uri: %w", ErrFormat) + *u = URI(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (e Email) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": e.String()}) + return bsonlite.C.MarshalDoc(e.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (e *Email) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "email") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *e = Email(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as email: %w", ErrFormat) + *e = Email(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (h Hostname) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": h.String()}) + return bsonlite.C.MarshalDoc(h.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (h *Hostname) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "hostname") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *h = Hostname(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as hostname: %w", ErrFormat) + *h = Hostname(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u IPv4) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *IPv4) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "ipv4") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = IPv4(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as ipv4: %w", ErrFormat) + *u = IPv4(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u IPv6) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *IPv6) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "ipv6") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = IPv6(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as ipv6: %w", ErrFormat) + *u = IPv6(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u CIDR) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *CIDR) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "CIDR") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = CIDR(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as CIDR: %w", ErrFormat) + *u = CIDR(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u MAC) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *MAC) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "MAC") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = MAC(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as MAC: %w", ErrFormat) + *u = MAC(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (r Password) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": r.String()}) + return bsonlite.C.MarshalDoc(r.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (r *Password) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "Password") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *r = Password(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as Password: %w", ErrFormat) + *r = Password(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u UUID) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *UUID) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "UUID") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = UUID(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as UUID: %w", ErrFormat) + *u = UUID(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u UUID3) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *UUID3) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "UUID3") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = UUID3(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as UUID3: %w", ErrFormat) + *u = UUID3(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u UUID4) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *UUID4) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "UUID4") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = UUID4(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as UUID4: %w", ErrFormat) + *u = UUID4(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u UUID5) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *UUID5) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "UUID5") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = UUID5(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as UUID5: %w", ErrFormat) + *u = UUID5(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u UUID7) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *UUID7) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "UUID7") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = UUID7(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as UUID7: %w", ErrFormat) + *u = UUID7(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u ISBN) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *ISBN) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "ISBN") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = ISBN(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as ISBN: %w", ErrFormat) + *u = ISBN(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u ISBN10) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *ISBN10) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "ISBN10") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = ISBN10(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as ISBN10: %w", ErrFormat) + *u = ISBN10(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u ISBN13) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *ISBN13) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "ISBN13") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = ISBN13(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as ISBN13: %w", ErrFormat) + *u = ISBN13(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u CreditCard) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *CreditCard) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "CreditCard") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = CreditCard(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as CreditCard: %w", ErrFormat) + *u = CreditCard(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (u SSN) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": u.String()}) + return bsonlite.C.MarshalDoc(u.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (u *SSN) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "SSN") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *u = SSN(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as SSN: %w", ErrFormat) + *u = SSN(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (h HexColor) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": h.String()}) + return bsonlite.C.MarshalDoc(h.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (h *HexColor) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "HexColor") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *h = HexColor(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as HexColor: %w", ErrFormat) + *h = HexColor(s) + return nil } -// MarshalBSON document from this value +// MarshalBSON document from this value. func (r RGBColor) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": r.String()}) + return bsonlite.C.MarshalDoc(r.String()) } -// UnmarshalBSON document into this value +// UnmarshalBSON document into this value. func (r *RGBColor) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { + s, err := unmarshalBSONString(data, "RGBColor") + if err != nil { return err } - - if ud, ok := m["data"].(string); ok { - *r = RGBColor(ud) - return nil - } - return fmt.Errorf("couldn't unmarshal bson bytes as RGBColor: %w", ErrFormat) + *r = RGBColor(s) + return nil } -// MarshalBSON renders the object id as a BSON document +// MarshalBSON renders the object id as a BSON document. func (id ObjectId) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) + return bsonlite.C.MarshalDoc([12]byte(id)) } -// UnmarshalBSON reads the objectId from a BSON document +// UnmarshalBSON reads the objectId from a BSON document. func (id *ObjectId) UnmarshalBSON(data []byte) error { - var obj struct { - Data bsonprim.ObjectID - } - if err := bson.Unmarshal(data, &obj); err != nil { + v, err := bsonlite.C.UnmarshalDoc(data) + if err != nil { return err } - *id = ObjectId(obj.Data) + + oid, ok := v.([12]byte) + if !ok { + return fmt.Errorf("couldn't unmarshal bson bytes as ObjectId: %w", ErrFormat) + } + *id = ObjectId(oid) return nil } -// MarshalBSONValue is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { - oid := bsonprim.ObjectID(id) - return bson.TypeObjectID, oid[:], nil -} - -// UnmarshalBSONValue is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error { - var oid bsonprim.ObjectID +// MarshalBSONValue marshals the [ObjectId] as a raw BSON ObjectID value. +func (id ObjectId) MarshalBSONValue() (byte, []byte, error) { + oid := [12]byte(id) + return bsonlite.TypeObjectID, oid[:], nil +} + +// UnmarshalBSONValue unmarshals a raw BSON ObjectID value into this [ObjectId]. +func (id *ObjectId) UnmarshalBSONValue(_ byte, data []byte) error { + var oid [12]byte copy(oid[:], data) *id = ObjectId(oid) return nil diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go index 8085aaf6965..1fde8c6b119 100644 --- a/vendor/github.com/go-openapi/strfmt/time.go +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -12,14 +12,12 @@ import ( "time" ) -var ( - // UnixZero sets the zero unix UTC timestamp we want to compare against. - // - // Unix 0 for an EST timezone is not equivalent to a UTC timezone. - UnixZero = time.Unix(0, 0).UTC() -) +// UnixZero sets the zero unix UTC timestamp we want to compare against. +// +// Unix 0 for an EST timezone is not equivalent to a UTC timezone. +var UnixZero = time.Unix(0, 0).UTC() //nolint:gochecknoglobals // package-level sentinel value for unix epoch -func init() { +func init() { //nolint:gochecknoinits // registers datetime format in the default registry dt := DateTime{} Default.Add("datetime", &dt, IsDateTime) } @@ -50,38 +48,48 @@ func IsDateTime(str string) bool { } const ( - // RFC3339Millis represents a ISO8601 format to millis instead of to nanos + // RFC3339Millis represents a ISO8601 format to millis instead of to nanos. RFC3339Millis = "2006-01-02T15:04:05.000Z07:00" - // RFC3339MillisNoColon represents a ISO8601 format to millis instead of to nanos + // RFC3339MillisNoColon represents a ISO8601 format to millis instead of to nanos. RFC3339MillisNoColon = "2006-01-02T15:04:05.000Z0700" - // RFC3339Micro represents a ISO8601 format to micro instead of to nano + // RFC3339Micro represents a ISO8601 format to micro instead of to nano. RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" - // RFC3339MicroNoColon represents a ISO8601 format to micro instead of to nano + // RFC3339MicroNoColon represents a ISO8601 format to micro instead of to nano. RFC3339MicroNoColon = "2006-01-02T15:04:05.000000Z0700" - // ISO8601LocalTime represents a ISO8601 format to ISO8601 in local time (no timezone) + // ISO8601LocalTime represents a ISO8601 format to ISO8601 in local time (no timezone). ISO8601LocalTime = "2006-01-02T15:04:05" - // ISO8601TimeWithReducedPrecision represents a ISO8601 format with reduced precision (dropped secs) + // ISO8601TimeWithReducedPrecision represents a ISO8601 format with reduced precision (dropped secs). ISO8601TimeWithReducedPrecision = "2006-01-02T15:04Z" - // ISO8601TimeWithReducedPrecisionLocaltime represents a ISO8601 format with reduced precision and no timezone (dropped seconds + no timezone) + // ISO8601TimeWithReducedPrecisionLocaltime represents a ISO8601 format with reduced precision and no timezone (dropped seconds + no timezone). ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04" // ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern. ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05" - // ISO8601TimeUniversalSortableDateTimePatternShortForm is the short form of ISO8601TimeUniversalSortableDateTimePattern + // ISO8601TimeUniversalSortableDateTimePatternShortForm is the short form of [ISO8601TimeUniversalSortableDateTimePattern]. ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02" // DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6 DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$` ) +//nolint:gochecknoglobals // package-level configuration for datetime parsing and marshaling var ( rxDateTime = regexp.MustCompile(DateTimePattern) - // DateTimeFormats is the collection of formats used by ParseDateTime() - DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm} + // DateTimeFormats is the collection of formats used by [ParseDateTime](). + DateTimeFormats = []string{ + RFC3339Micro, RFC3339MicroNoColon, + RFC3339Millis, RFC3339MillisNoColon, + time.RFC3339, time.RFC3339Nano, + ISO8601LocalTime, + ISO8601TimeWithReducedPrecision, + ISO8601TimeWithReducedPrecisionLocaltime, + ISO8601TimeUniversalSortableDateTimePattern, + ISO8601TimeUniversalSortableDateTimePatternShortForm, + } - // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds) + // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds). MarshalFormat = RFC3339Millis - // NormalizeTimeForMarshal provides a normalization function on time before marshalling (e.g. time.UTC). + // NormalizeTimeForMarshal provides a normalization function on time before marshaling (e.g. [time.UTC]). // By default, the time value is not changed. NormalizeTimeForMarshal = func(t time.Time) time.Time { return t } @@ -89,7 +97,7 @@ var ( DefaultTimeLocation = time.UTC ) -// ParseDateTime parses a string that represents an ISO8601 time or a unix epoch +// ParseDateTime parses a string that represents an ISO8601 time or a unix epoch. func ParseDateTime(data string) (DateTime, error) { if data == "" { return NewDateTime(), nil @@ -112,46 +120,46 @@ func ParseDateTime(data string) (DateTime, error) { // Most APIs we encounter want either millisecond or second precision times. // This just tries to make it worry-free. // -// swagger:strfmt date-time +// swagger:strfmt date-time. type DateTime time.Time // NewDateTime is a representation of the UNIX epoch (January 1, 1970 00:00:00 UTC) for the [DateTime] type. // // Notice that this is not the zero value of the [DateTime] type. // -// You may use [DateTime.IsUNIXZero] to check against this value. +// You may use [DateTime.IsUnixZero] to check against this value. func NewDateTime() DateTime { return DateTime(time.Unix(0, 0).UTC()) } // MakeDateTime is a representation of the zero value of the [DateTime] type (January 1, year 1, 00:00:00 UTC). // -// You may use [Datetime.IsZero] to check against this value. +// You may use [DateTime.IsZero] to check against this value. func MakeDateTime() DateTime { return DateTime(time.Time{}) } -// String converts this time to a string +// String converts this time to a string. func (t DateTime) String() string { return NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat) } -// IsZero returns whether the date time is a zero value +// IsZero returns whether the date time is a zero value. func (t DateTime) IsZero() bool { return time.Time(t).IsZero() } -// IsUnixZero returns whether the date time is equivalent to time.Unix(0, 0).UTC(). +// IsUnixZero returns whether the date time is equivalent to [time.Unix](0, 0).UTC(). func (t DateTime) IsUnixZero() bool { return time.Time(t).Equal(UnixZero) } -// MarshalText implements the text marshaller interface +// MarshalText implements the text marshaler interface. func (t DateTime) MarshalText() ([]byte, error) { return []byte(t.String()), nil } -// UnmarshalText implements the text unmarshaller interface +// UnmarshalText implements the text unmarshaler interface. func (t *DateTime) UnmarshalText(text []byte) error { tt, err := ParseDateTime(string(text)) if err != nil { @@ -161,9 +169,9 @@ func (t *DateTime) UnmarshalText(text []byte) error { return nil } -// Scan scans a DateTime value from database driver type. +// Scan scans a [DateTime] value from database driver type. func (t *DateTime) Scan(raw any) error { - // TODO: case int64: and case float64: ? + // Proposal for enhancement: case int64: and case float64: ? switch v := raw.(type) { case []byte: return t.UnmarshalText(v) @@ -180,17 +188,17 @@ func (t *DateTime) Scan(raw any) error { return nil } -// Value converts DateTime to a primitive value ready to written to a database. +// Value converts [DateTime] to a primitive value ready to written to a database. func (t DateTime) Value() (driver.Value, error) { return driver.Value(t.String()), nil } -// MarshalJSON returns the DateTime as JSON +// MarshalJSON returns the [DateTime] as JSON. func (t DateTime) MarshalJSON() ([]byte, error) { return json.Marshal(NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat)) } -// UnmarshalJSON sets the DateTime from JSON +// UnmarshalJSON sets the [DateTime] from JSON. func (t *DateTime) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -213,7 +221,7 @@ func (t *DateTime) DeepCopyInto(out *DateTime) { *out = *t } -// DeepCopy copies the receiver into a new DateTime. +// DeepCopy copies the receiver into a new [DateTime]. func (t *DateTime) DeepCopy() *DateTime { if t == nil { return nil @@ -233,12 +241,12 @@ func (t *DateTime) GobDecode(data []byte) error { return t.UnmarshalBinary(data) } -// MarshalBinary implements the encoding.BinaryMarshaler interface. +// MarshalBinary implements the encoding.[encoding.BinaryMarshaler] interface. func (t DateTime) MarshalBinary() ([]byte, error) { return NormalizeTimeForMarshal(time.Time(t)).MarshalBinary() } -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// UnmarshalBinary implements the encoding.[encoding.BinaryUnmarshaler] interface. func (t *DateTime) UnmarshalBinary(data []byte) error { var original time.Time @@ -252,7 +260,7 @@ func (t *DateTime) UnmarshalBinary(data []byte) error { return nil } -// Equal checks if two DateTime instances are equal using time.Time's Equal method +// Equal checks if two [DateTime] instances are equal using [time.Time]'s Equal method. func (t DateTime) Equal(t2 DateTime) bool { return time.Time(t).Equal(time.Time(t2)) } diff --git a/vendor/github.com/go-openapi/strfmt/ulid.go b/vendor/github.com/go-openapi/strfmt/ulid.go index 85c5b53e6c7..f05d22c5187 100644 --- a/vendor/github.com/go-openapi/strfmt/ulid.go +++ b/vendor/github.com/go-openapi/strfmt/ulid.go @@ -11,23 +11,25 @@ import ( "io" "sync" - "github.com/oklog/ulid" + "github.com/oklog/ulid/v2" ) -// ULID represents a ulid string format -// ref: +// ULID represents a [ulid] string format. +// +// # Reference // // https://github.com/ulid/spec // -// impl: +// # Implementation // // https://github.com/oklog/ulid // -// swagger:strfmt ulid +// swagger:strfmt ulid. type ULID struct { ulid.ULID } +//nolint:gochecknoglobals // package-level ULID configuration and overridable scan/value functions var ( ulidEntropyPool = sync.Pool{ New: func() any { @@ -35,6 +37,7 @@ var ( }, } + // ULIDScanDefaultFunc is the default implementation for scanning a [ULID] from a database driver value. ULIDScanDefaultFunc = func(raw any) (ULID, error) { u := NewULIDZero() switch x := raw.(type) { @@ -54,45 +57,44 @@ var ( return u, fmt.Errorf("cannot sql.Scan() strfmt.ULID from: %#v: %w", raw, ulid.ErrScanValue) } - // ULIDScanOverrideFunc allows you to override the Scan method of the ULID type + // ULIDScanOverrideFunc allows you to override the Scan method of the [ULID] type. ULIDScanOverrideFunc = ULIDScanDefaultFunc + // ULIDValueDefaultFunc is the default implementation for converting a [ULID] to a database driver value. ULIDValueDefaultFunc = func(u ULID) (driver.Value, error) { return driver.Value(u.String()), nil } - // ULIDValueOverrideFunc allows you to override the Value method of the ULID type + // ULIDValueOverrideFunc allows you to override the Value method of the [ULID] type. ULIDValueOverrideFunc = ULIDValueDefaultFunc ) -func init() { - // register formats in the default registry: - // - ulid +func init() { //nolint:gochecknoinits // registers ulid format in the default registry ulid := ULID{} Default.Add("ulid", &ulid, IsULID) } -// IsULID checks if provided string is ULID format -// Be noticed that this function considers overflowed ULID as non-ulid. -// For more details see https://github.com/ulid/spec +// IsULID checks if provided string is [ULID] format +// Be noticed that this function considers overflowed [ULID] as non-[ulid]. +// For more details see https://github.com/[ulid]/spec func IsULID(str string) bool { _, err := ulid.ParseStrict(str) return err == nil } -// ParseULID parses a string that represents an valid ULID +// ParseULID parses a string that represents an valid [ULID]. func ParseULID(str string) (ULID, error) { var u ULID return u, u.UnmarshalText([]byte(str)) } -// NewULIDZero returns a zero valued ULID type +// NewULIDZero returns a zero valued [ULID] type. func NewULIDZero() ULID { return ULID{} } -// NewULID generates new unique ULID value and a error if any +// NewULID generates new unique [ULID] value and a error if any. func NewULID() (ULID, error) { var u ULID @@ -112,22 +114,22 @@ func NewULID() (ULID, error) { return u, nil } -// GetULID returns underlying instance of ULID +// GetULID returns underlying instance of [ULID]. func (u *ULID) GetULID() any { return u.ULID } -// MarshalText returns this instance into text +// MarshalText returns this instance into text. func (u ULID) MarshalText() ([]byte, error) { return u.ULID.MarshalText() } -// UnmarshalText hydrates this instance from text +// UnmarshalText hydrates this instance from text. func (u *ULID) UnmarshalText(data []byte) error { // validation is performed later on return u.ULID.UnmarshalText(data) } -// Scan reads a value from a database driver +// Scan reads a value from a database driver. func (u *ULID) Scan(raw any) error { ul, err := ULIDScanOverrideFunc(raw) if err == nil { @@ -136,7 +138,7 @@ func (u *ULID) Scan(raw any) error { return err } -// Value converts a value to a database driver value +// Value converts a value to a database driver value. func (u ULID) Value() (driver.Value, error) { return ULIDValueOverrideFunc(u) } @@ -145,12 +147,12 @@ func (u ULID) String() string { return u.ULID.String() } -// MarshalJSON returns the ULID as JSON +// MarshalJSON returns the [ULID] as JSON. func (u ULID) MarshalJSON() ([]byte, error) { return json.Marshal(u.String()) } -// UnmarshalJSON sets the ULID from JSON +// UnmarshalJSON sets the [ULID] from JSON. func (u *ULID) UnmarshalJSON(data []byte) error { if string(data) == jsonNull { return nil @@ -172,7 +174,7 @@ func (u *ULID) DeepCopyInto(out *ULID) { *out = *u } -// DeepCopy copies the receiver into a new ULID. +// DeepCopy copies the receiver into a new [ULID]. func (u *ULID) DeepCopy() *ULID { if u == nil { return nil @@ -192,17 +194,17 @@ func (u *ULID) GobDecode(data []byte) error { return u.ULID.UnmarshalBinary(data) } -// MarshalBinary implements the encoding.BinaryMarshaler interface. +// MarshalBinary implements the encoding.[encoding.BinaryMarshaler] interface. func (u ULID) MarshalBinary() ([]byte, error) { return u.ULID.MarshalBinary() } -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// UnmarshalBinary implements the encoding.[encoding.BinaryUnmarshaler] interface. func (u *ULID) UnmarshalBinary(data []byte) error { return u.ULID.UnmarshalBinary(data) } -// Equal checks if two ULID instances are equal by their underlying type +// Equal checks if two [ULID] instances are equal by their underlying type. func (u ULID) Equal(other ULID) bool { return u.ULID == other.ULID } diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore index c4b1b64f04e..a0a95a96b3f 100644 --- a/vendor/github.com/go-openapi/swag/.gitignore +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -3,3 +3,5 @@ vendor Godeps .idea *.out +.mcp.json +.claude/ diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md index 9322b065e37..bac878f216a 100644 --- a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/swag/CONTRIBUTORS.md b/vendor/github.com/go-openapi/swag/CONTRIBUTORS.md new file mode 100644 index 00000000000..bc76fe820c0 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/CONTRIBUTORS.md @@ -0,0 +1,36 @@ +# Contributors + +- Repository: ['go-openapi/swag'] + +| Total Contributors | Total Contributions | +| --- | --- | +| 24 | 235 | + +| Username | All Time Contribution Count | All Commits | +| --- | --- | --- | +| @fredbi | 105 | | +| @casualjim | 98 | | +| @alexandear | 4 | | +| @orisano | 3 | | +| @reinerRubin | 2 | | +| @n-inja | 2 | | +| @nitinmohan87 | 2 | | +| @Neo2308 | 2 | | +| @michaelbowler-form3 | 2 | | +| @ujjwalsh | 1 | | +| @griffin-stewie | 1 | | +| @POD666 | 1 | | +| @pytlesk4 | 1 | | +| @shirou | 1 | | +| @seanprince | 1 | | +| @petrkotas | 1 | | +| @mszczygiel | 1 | | +| @sosiska | 1 | | +| @kzys | 1 | | +| @faguirre1 | 1 | | +| @posener | 1 | | +| @diego-fu-hs | 1 | | +| @davidalpert | 1 | | +| @Xe | 1 | | + + _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 371fd55fdc3..834eb2ffb9c 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,26 +1,60 @@ -# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) +# Swag + + +[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url] + + + +[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] + + +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](https://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) +--- -Package `swag` contains a bunch of helper functions for go-openapi and go-swagger projects. +A bunch of helper functions for go-openapi and go-swagger projects. You may also use it standalone for your projects. > **NOTE** > `swag` is one of the foundational building blocks of the go-openapi initiative. +> > Most repositories in `github.com/go-openapi/...` depend on it in some way. > And so does our CLI tool `github.com/go-swagger/go-swagger`, > as well as the code generated by this tool. * [Contents](#contents) * [Dependencies](#dependencies) -* [Release Notes](#release-notes) +* [Change log](#change-log) * [Licensing](#licensing) * [Note to contributors](#note-to-contributors) -* [TODOs, suggestions and plans](#todos-suggestions-and-plans) +* [Roadmap](#roadmap) + +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + +## Status + +API is stable. + +## Import this library in your project + +```cmd +go get github.com/go-openapi/swag/{module} +``` + +Or for backward compatibility: + +```cmd +go get github.com/go-openapi/swag +``` ## Contents @@ -36,7 +70,7 @@ Child modules will continue to evolve and some new ones may be added in the futu | `cmdutils` | utilities to work with CLIs || | `conv` | type conversion utilities | convert between values and pointers for any types
convert from string to builtin types (wraps `strconv`)
require `./typeutils` (test dependency)
| | `fileutils` | file utilities | | -| `jsonname` | JSON utilities | infer JSON names from `go` properties
| +| `jsonname` | JSON utilities | infer JSON names from `go` properties
| | `jsonutils` | JSON utilities | fast json concatenation
read and write JSON from and to dynamic `go` data structures
~require `github.com/mailru/easyjson`~
| | `loading` | file loading | load from file or http
require `./yamlutils`
| | `mangling` | safe name generation | name mangling for `go`
| @@ -49,84 +83,19 @@ Child modules will continue to evolve and some new ones may be added in the futu ## Dependencies -The root module `github.com/go-openapi/swag` at the repo level maintains a few +The root module `github.com/go-openapi/swag` at the repo level maintains a few dependencies outside of the standard library. * YAML utilities depend on `go.yaml.in/yaml/v3` * JSON utilities depend on their registered adapter module: - * by default, only the standard library is used - * `github.com/mailru/easyjson` is now only a dependency for module - `github.com/go-openapi/swag/jsonutils/adapters/easyjson/json`, - for users willing to import that module. - * integration tests and benchmarks use all the dependencies are published as their own module + * by default, only the standard library is used + * `github.com/mailru/easyjson` is now only a dependency for module + `github.com/go-openapi/swag/jsonutils/adapters/easyjson/json`, + for users willing to import that module. + * integration tests and benchmarks use all the dependencies are published as their own module * other dependencies are test dependencies drawn from `github.com/stretchr/testify` -## Release notes - -### v0.25.4 - -** mangling** - -Bug fix - -* [x] mangler may panic with pluralized overlapping initialisms - -Tests - -* [x] introduced fuzz tests - -### v0.25.3 - -** mangling** - -Bug fix - -* [x] mangler may panic with pluralized initialisms - -### v0.25.2 - -Minor changes due to internal maintenance that don't affect the behavior of the library. - -* [x] removed indirect test dependencies by switching all tests to `go-openapi/testify`, - a fork of `stretch/testify` with zero-dependencies. -* [x] improvements to CI to catch test reports. -* [x] modernized licensing annotations in source code, using the more compact SPDX annotations - rather than the full license terms. -* [x] simplified a bit JSON & YAML testing by using newly available assertions -* started the journey to an OpenSSF score card badge: - * [x] explicited permissions in CI workflows - * [x] published security policy - * pinned dependencies to github actions - * introduced fuzzing in tests - -### v0.25.1 - -* fixes a data race that could occur when using the standard library implementation of a JSON ordered map - -### v0.25.0 - -**New with this release**: - -* requires `go1.24`, as iterators are being introduced -* removes the dependency to `mailru/easyjson` by default (#68) - * functionality remains the same, but performance may somewhat degrade for applications - that relied on `easyjson` - * users of the JSON or YAML utilities who want to use `easyjson` as their preferred JSON serializer library - will be able to do so by registering this the corresponding JSON adapter at runtime. See below. - * ordered keys in JSON and YAML objects: this feature used to rely solely on `easyjson`. - With this release, an implementation relying on the standard `encoding/json` is provided. - * an independent [benchmark](./jsonutils/adapters/testintegration/benchmarks/README.md) to compare the different adapters -* improves the "float is integer" check (`conv.IsFloat64AJSONInteger`) (#59) -* removes the _direct_ dependency to `gopkg.in/yaml.v3` (indirect dependency is still incurred through `stretchr/testify`) (#127) -* exposed `conv.IsNil()` (previously kept private): a safe nil check (accounting for the "non-nil interface with nil value" nonsensical go trick) - -**What coming next?** - -Moving forward, we want to : -* provide an implementation of the JSON adapter based on `encoding/json/v2`, for `go1.25` builds. -* provide similar implementations for `goccy/go-json` and `jsoniterator/go`, and perhaps some other - similar libraries may be interesting too. - +## Usage **How to explicitly register a dependency at runtime**? @@ -150,90 +119,106 @@ or fallback to the standard library. For more details, you may also look at our [integration tests](jsonutils/adapters/testintegration/integration_suite_test.go#29). -### v0.24.0 +--- -With this release, we have largely modernized the API of `swag`: +## Note to contributors -* The traditional `swag` API is still supported: code that imports `swag` will still - compile and work the same. -* A deprecation notice is published to encourage consumers of this library to adopt - the newer API -* **Deprecation notice** - * configuration through global variables is now deprecated, in favor of options passed as parameters - * all helper functions are moved to more specialized packages, which are exposed as - go modules. Importing such a module would reduce the footprint of dependencies. - * _all_ functions, variables, constants exposed by the deprecated API have now moved, so - that consumers of the new API no longer need to import github.com/go-openapi/swag, but - should import the desired sub-module(s). +All kinds of contributions are welcome. -**New with this release**: +This repo is a go mono-repo. See [docs](docs/MAINTAINERS.md). -* [x] type converters and pointer to value helpers now support generic types -* [x] name mangling now support pluralized initialisms (issue #46) - Strings like "contact IDs" are now recognized as such a plural form and mangled as a linter would expect. -* [x] performance: small improvements to reduce the overhead of convert/format wrappers (see issues #110, or PR #108) -* [x] performance: name mangling utilities run ~ 10% faster (PR #115) +More general guidelines are available [here](.github/CONTRIBUTING.md). ---- +## Roadmap -## Licensing +See the current [TODO list](docs/TODOS.md) -This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). +## Change log -## Note to contributors +See -A mono-repo structure comes with some unavoidable extra pains... +For pre-v0.26.0 releases, see [release notes](./docs/NOTES.md). -* Testing +**What coming next?** -> The usual `go test ./...` command, run from the root of this repo won't work any longer to test all submodules. -> -> Each module constitutes an independant unit of test. So you have to run `go test` inside each module. -> Or you may take a look at how this is achieved by CI -> [here] https://github.com/go-openapi/swag/blob/master/.github/workflows/go-test.yml). -> -> There are also some alternative tricks using `go work`, for local development, if you feel comfortable with -> go workspaces. Perhaps some day, we'll have a `go work test` to run all tests without any hack. +Moving forward, we want to : -* Releasing +* provide an implementation of the JSON adapter based on `encoding/json/v2`, for `go1.25` builds. +* provide similar implementations for `goccy/go-json` and `jsoniterator/go`, and perhaps some other + similar libraries may be interesting too. -> Each module follows its own independant module versioning. -> -> So you have tags like `mangling/v0.24.0`, `fileutils/v0.24.0` etc that are used by `go mod` and `go get` -> to refer to the tagged version of each module specifically. -> -> This means we may release patches etc to each module independently. -> -> We'd like to adopt the rule that modules in this repo would only differ by a patch version -> (e.g. `v0.24.5` vs `v0.24.3`), and we'll level all modules whenever a minor version is introduced. -> -> A script in `./hack` is provided to tag all modules with the same version in one go. + -## Todos, suggestions and plans +## Licensing -All kinds of contributions are welcome. +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). -A few ideas: - -* [x] Complete the split of dependencies to isolate easyjson from the rest -* [x] Improve CI to reduce needed tests -* [x] Replace dependency to `gopkg.in/yaml.v3` (`yamlutil`) -* [ ] Improve mangling utilities (improve readability, support for capitalized words, - better word substitution for non-letter symbols...) -* [ ] Move back to this common shared pot a few of the technical features introduced by go-swagger independently - (e.g. mangle go package names, search package with go modules support, ...) -* [ ] Apply a similar mono-repo approach to go-openapi/strfmt which suffer from similar woes: bloated API, - imposed dependency to some database driver. -* [ ] Adapt `go-swagger` (incl. generated code) to the new `swag` API. -* [ ] Factorize some tests, as there is a lot of redundant testing code in `jsonutils` -* [ ] Benchmark & profiling: publish independently the tool built to analyze and chart benchmarks (e.g. similar to `benchvisual`) -* [ ] more thorough testing for nil / null case -* [ ] ci pipeline to manage releases -* [ ] cleaner mockery generation (doesn't work out of the box for all sub-modules) + + + + +## Other documentation + +* [All-time contributors](./CONTRIBUTORS.md) +* [Contributing guidelines](.github/CONTRIBUTING.md) +* [Maintainers documentation](docs/MAINTAINERS.md) +* [Code style](docs/STYLE.md) + +## Cutting a new release + +Maintainers can cut a new release by either: + +* running [this workflow](https://github.com/go-openapi/swag/actions/workflows/bump-release.yml) +* or pushing a semver tag + * signed tags are preferred + * The tag message is prepended to release notes + + +[test-badge]: https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg +[test-url]: https://github.com/go-openapi/swag/actions/workflows/go-test.yml +[cov-badge]: https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/go-openapi/swag +[vuln-scan-badge]: https://github.com/go-openapi/swag/actions/workflows/scanner.yml/badge.svg +[vuln-scan-url]: https://github.com/go-openapi/swag/actions/workflows/scanner.yml +[codeql-badge]: https://github.com/go-openapi/swag/actions/workflows/codeql.yml/badge.svg +[codeql-url]: https://github.com/go-openapi/swag/actions/workflows/codeql.yml + +[release-badge]: https://badge.fury.io/gh/go-openapi%2Fswag.svg +[release-url]: https://badge.fury.io/gh/go-openapi%2Fswag +[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fswag.svg +[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fswag + +[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/swag +[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/swag +[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/swag +[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/swag + +[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F +[doc-url]: https://goswagger.io/go-openapi +[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/swag +[godoc-url]: http://pkg.go.dev/github.com/go-openapi/swag +[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png +[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM +[slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: https://github.com/go-openapi/swag/?tab=Apache-2.0-1-ov-file#readme + +[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/swag +[goversion-url]: https://github.com/go-openapi/swag/blob/master/go.mod +[top-badge]: https://img.shields.io/github/languages/top/go-openapi/swag +[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/swag/latest diff --git a/vendor/github.com/go-openapi/swag/jsonutils/README.md b/vendor/github.com/go-openapi/swag/jsonutils/README.md index d745cdb466e..07a2ca1d714 100644 --- a/vendor/github.com/go-openapi/swag/jsonutils/README.md +++ b/vendor/github.com/go-openapi/swag/jsonutils/README.md @@ -1,11 +1,11 @@ - # jsonutils +# jsonutils `jsonutils` exposes a few tools to work with JSON: - a fast, simple `Concat` to concatenate (not merge) JSON objects and arrays - `FromDynamicJSON` to convert a data structure into a "dynamic JSON" data structure - `ReadJSON` and `WriteJSON` behave like `json.Unmarshal` and `json.Marshal`, - with the ability to use another underlying serialization library through an `Adapter` + with the ability to use another underlying serialization library through an `Adapter` configured at runtime - a `JSONMapSlice` structure that may be used to store JSON objects with the order of keys maintained @@ -64,7 +64,7 @@ find a registered implementation that support ordered keys in objects. Our standard library implementation supports this. As of `v0.25.0`, we support through such an adapter the popular `mailru/easyjson` -library, which kicks in when the passed values support the `easyjson.Unmarshaler` +library, which kicks in when the passed values support the `easyjson.Unmarshaler` or `easyjson.Marshaler` interfaces. In the future, we plan to add more similar libraries that compete on the go JSON @@ -77,8 +77,9 @@ In package `github.com/go-openapi/swag/easyjson/adapters`, several adapters are Each adapter is an independent go module. Hence you'll pick its dependencies only if you import it. At this moment we provide: -* `stdlib`: JSON adapter based on the standard library -* `easyjson`: JSON adapter based on the `github.com/mailru/easyjson` + +- `stdlib`: JSON adapter based on the standard library +- `easyjson`: JSON adapter based on the `github.com/mailru/easyjson` The adapters provide the basic `Marshal` and `Unmarshal` capabilities, plus an implementation of the `MapSlice` pattern. diff --git a/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md b/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md index 6674c63b729..abe6e9533ea 100644 --- a/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md +++ b/vendor/github.com/go-openapi/swag/mangling/BENCHMARK.md @@ -4,7 +4,7 @@ go test -bench XXX -run XXX -benchtime 30s ``` -## Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df +## Benchmarks at `b3e7a5386f996177e4808f11acb2aa93a0f660df` ``` goos: linux @@ -49,7 +49,7 @@ BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op ``` -## Benchmarks at d7d2d1b895f5b6747afaff312dd2a402e69e818b +## Benchmarks at `d7d2d1b895f5b6747afaff312dd2a402e69e818b` go1.24 diff --git a/vendor/github.com/go-openapi/validate/.gitignore b/vendor/github.com/go-openapi/validate/.gitignore index fea8b84eca9..d8f4186fe59 100644 --- a/vendor/github.com/go-openapi/validate/.gitignore +++ b/vendor/github.com/go-openapi/validate/.gitignore @@ -1,5 +1,5 @@ -secrets.yml -coverage.out -*.cov *.out -playground +*.cov +.idea +.env +.mcp.json diff --git a/vendor/github.com/go-openapi/validate/.golangci.yml b/vendor/github.com/go-openapi/validate/.golangci.yml index 10c513342fc..4d6b36e4722 100644 --- a/vendor/github.com/go-openapi/validate/.golangci.yml +++ b/vendor/github.com/go-openapi/validate/.golangci.yml @@ -2,27 +2,15 @@ version: "2" linters: default: all disable: - - cyclop - depguard - - errchkjson - - errorlint - - exhaustruct - - forcetypeassert - funlen - gochecknoglobals - gochecknoinits - - gocognit - - godot - godox - - gomoddirectives - - gosmopolitan - - inamedparam - - intrange + - exhaustruct - ireturn - - lll - - musttag - - nestif - nlreturn + - nestif - nonamedreturns - noinlineerr - paralleltest @@ -30,7 +18,6 @@ linters: - testpackage - thelper - tparallel - - unparam - varnamelen - whitespace - wrapcheck @@ -42,8 +29,17 @@ linters: goconst: min-len: 2 min-occurrences: 3 + cyclop: + max-complexity: 40 gocyclo: - min-complexity: 45 + min-complexity: 40 + gocognit: + min-complexity: 40 + exhaustive: + default-signifies-exhaustive: true + default-case-required: true + lll: + line-length: 180 exclusions: generated: lax presets: @@ -59,6 +55,7 @@ formatters: enable: - gofmt - goimports + - gofumpt exclusions: generated: lax paths: diff --git a/vendor/github.com/go-openapi/validate/BENCHMARK.md b/vendor/github.com/go-openapi/validate/BENCHMARK.md index 79cf6a077ba..0353eae5bda 100644 --- a/vendor/github.com/go-openapi/validate/BENCHMARK.md +++ b/vendor/github.com/go-openapi/validate/BENCHMARK.md @@ -3,6 +3,7 @@ Validating the Kubernetes Swagger API ## v0.22.6: 60,000,000 allocs + ``` goos: linux goarch: amd64 @@ -11,7 +12,8 @@ cpu: AMD Ryzen 7 5800X 8-Core Processor Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 8549863982 ns/op 7067424936 B/op 59583275 allocs/op ``` -## After refact PR: minor but noticable improvements: 25,000,000 allocs +## After refact PR: minor but noticeable improvements: 25,000,000 allocs + ``` go test -bench Spec goos: linux @@ -22,6 +24,7 @@ Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 40645355 ``` ## After reduce GC pressure PR: 17,000,000 allocs + ``` goos: linux goarch: amd64 diff --git a/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md index 9322b065e37..bac878f216a 100644 --- a/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md +++ b/vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md @@ -23,7 +23,9 @@ include: Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or + advances + * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic @@ -55,7 +57,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All +reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. @@ -68,7 +70,7 @@ members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] +available at [][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/validate/CONTRIBUTORS.md b/vendor/github.com/go-openapi/validate/CONTRIBUTORS.md new file mode 100644 index 00000000000..7b79b765dc3 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/CONTRIBUTORS.md @@ -0,0 +1,43 @@ +# Contributors + +- Repository: ['go-openapi/validate'] + +| Total Contributors | Total Contributions | +| --- | --- | +| 31 | 295 | + +| Username | All Time Contribution Count | All Commits | +| --- | --- | --- | +| @casualjim | 169 | | +| @fredbi | 58 | | +| @sttts | 11 | | +| @youyuanwu | 9 | | +| @keramix | 8 | | +| @jerome-laforge | 4 | | +| @GlenDC | 4 | | +| @galaxie | 3 | | +| @tossmilestone | 2 | | +| @EleanorRigby | 2 | | +| @jiuker | 2 | | +| @pytlesk4 | 2 | | +| @dimovnike | 2 | | +| @gbjk | 2 | | +| @ujjwalsh | 1 | | +| @key-amb | 1 | | +| @caglar10ur | 1 | | +| @petrkotas | 1 | | +| @dolmen | 1 | | +| @nikhita | 1 | | +| @koron | 1 | | +| @liggitt | 1 | | +| @ilyakaznacheev | 1 | | +| @hypnoglow | 1 | | +| @gautierdelorme | 1 | | +| @flavioribeiro | 1 | | +| @pheepi | 1 | | +| @carvind | 1 | | +| @artemseleznev | 1 | | +| @dadgar | 1 | | +| @elakito | 1 | | + + _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/validate/README.md b/vendor/github.com/go-openapi/validate/README.md index 73d87ce4f01..fec42b7c6ed 100644 --- a/vendor/github.com/go-openapi/validate/README.md +++ b/vendor/github.com/go-openapi/validate/README.md @@ -1,15 +1,42 @@ -# Validation helpers [![Build Status](https://github.com/go-openapi/validate/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/validate/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/validate) +# validate -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/validate/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/validate.svg)](https://pkg.go.dev/github.com/go-openapi/validate) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/validate)](https://goreportcard.com/report/github.com/go-openapi/validate) + +[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url] + + + +[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] + + +[![GoDoc][godoc-badge]][godoc-url] [![Discord Channel][discord-badge]][discord-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] -This package provides helpers to validate Swagger 2.0. specification (aka OpenAPI 2.0). +--- + +A validator for OpenAPI v2 specifications and JSON schema draft 4. + +## Announcements + +* **2025-12-19** : new community chat on discord + * a new discord community channel is available to be notified of changes and support users + * our venerable Slack channel remains open, and will be eventually discontinued on **2026-03-31** + +You may join the discord community by clicking the invite link on the discord badge (also above). [![Discord Channel][discord-badge]][discord-url] + +Or join our Slack channel: [![Slack Channel][slack-logo]![slack-badge]][slack-url] + +## Status + +API is stable. -Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. +## Import this library in your project -## What's inside? +```cmd +go get github.com/go-openapi/validate +``` + +## Contents + +This package provides helpers to validate Swagger 2.0. specification (aka OpenAPI 2.0). * A validator for Swagger specifications * A validator for JSON schemas draft4 @@ -22,13 +49,7 @@ Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/m * Minimum, Maximum, MultipleOf * FormatOf -[Documentation](https://pkg.go.dev/github.com/go-openapi/validate) - -## Licensing - -This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). - -## FAQ +### FAQ * Does this library support OpenAPI 3? @@ -37,4 +58,67 @@ This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). > There is no plan to make it evolve toward supporting OpenAPI 3.x. > This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. > -> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 +> An early attempt to support Swagger 3 may be found at: + +## Change log + +See + +## References + + + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +## Other documentation + +* [All-time contributors](./CONTRIBUTORS.md) +* [Contributing guidelines](.github/CONTRIBUTING.md) +* [Maintainers documentation](docs/MAINTAINERS.md) +* [Code style](docs/STYLE.md) + +## Cutting a new release + +Maintainers can cut a new release by either: + +* running [this workflow](https://github.com/go-openapi/validate/actions/workflows/bump-release.yml) +* or pushing a semver tag + * signed tags are preferred + * The tag message is prepended to release notes + + +[test-badge]: https://github.com/go-openapi/validate/actions/workflows/go-test.yml/badge.svg +[test-url]: https://github.com/go-openapi/validate/actions/workflows/go-test.yml +[cov-badge]: https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/go-openapi/validate +[vuln-scan-badge]: https://github.com/go-openapi/validate/actions/workflows/scanner.yml/badge.svg +[vuln-scan-url]: https://github.com/go-openapi/validate/actions/workflows/scanner.yml +[codeql-badge]: https://github.com/go-openapi/validate/actions/workflows/codeql.yml/badge.svg +[codeql-url]: https://github.com/go-openapi/validate/actions/workflows/codeql.yml + +[release-badge]: https://badge.fury.io/gh/go-openapi%2Fvalidate.svg +[release-url]: https://badge.fury.io/gh/go-openapi%2Fvalidate + +[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/validate +[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/validate +[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/validate +[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/validate + +[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/validate +[godoc-url]: http://pkg.go.dev/github.com/go-openapi/validate +[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png +[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM +[slack-url]: https://goswagger.slack.com/archives/C04R30YMU +[discord-badge]: https://img.shields.io/discord/1446918742398341256?logo=discord&label=discord&color=blue +[discord-url]: https://discord.gg/twZ9BwT3 + + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: https://github.com/go-openapi/validate/?tab=Apache-2.0-1-ov-file#readme + +[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/validate +[goversion-url]: https://github.com/go-openapi/validate/blob/master/go.mod +[top-badge]: https://img.shields.io/github/languages/top/go-openapi/validate +[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/validate/latest diff --git a/vendor/github.com/go-openapi/validate/SECURITY.md b/vendor/github.com/go-openapi/validate/SECURITY.md new file mode 100644 index 00000000000..6ceb159ca22 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/SECURITY.md @@ -0,0 +1,37 @@ +# Security Policy + +This policy outlines the commitment and practices of the go-openapi maintainers regarding security. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.x | :white_check_mark: | + +## Vulnerability checks in place + +This repository uses automated vulnerability scans, at every merged commit and at least once a week. + +We use: + +* [`GitHub CodeQL`][codeql-url] +* [`trivy`][trivy-url] +* [`govulncheck`][govulncheck-url] + +Reports are centralized in github security reports and visible only to the maintainers. + +## Reporting a vulnerability + +If you become aware of a security vulnerability that affects the current repository, +**please report it privately to the maintainers** +rather than opening a publicly visible GitHub issue. + +Please follow the instructions provided by github to [Privately report a security vulnerability][github-guidance-url]. + +> [!NOTE] +> On Github, navigate to the project's "Security" tab then click on "Report a vulnerability". + +[codeql-url]: https://github.com/github/codeql +[trivy-url]: https://trivy.dev/docs/latest/getting-started +[govulncheck-url]: https://go.dev/blog/govulncheck +[github-guidance-url]: https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability diff --git a/vendor/github.com/go-openapi/validate/context.go b/vendor/github.com/go-openapi/validate/context.go index b4587dcd560..7f295f97139 100644 --- a/vendor/github.com/go-openapi/validate/context.go +++ b/vendor/github.com/go-openapi/validate/context.go @@ -7,7 +7,7 @@ import ( "context" ) -// validateCtxKey is the key type of context key in this pkg +// validateCtxKey is the key type of context key in this pkg. type validateCtxKey string const ( @@ -25,13 +25,13 @@ const ( var operationTypeEnum = []operationType{request, response, none} // WithOperationRequest returns a new context with operationType request -// in context value +// in context value. func WithOperationRequest(ctx context.Context) context.Context { return withOperation(ctx, request) } // WithOperationResponse returns a new context with operationType response -// in context value +// in context value. func WithOperationResponse(ctx context.Context) context.Context { return withOperation(ctx, response) } @@ -41,7 +41,7 @@ func withOperation(ctx context.Context, operation operationType) context.Context } // extractOperationType extracts the operation type from ctx -// if not specified or of unknown value, return none operation type +// if not specified or of unknown value, return none operation type. func extractOperationType(ctx context.Context) operationType { v := ctx.Value(operationTypeKey) if v == nil { diff --git a/vendor/github.com/go-openapi/validate/debug.go b/vendor/github.com/go-openapi/validate/debug.go index 79145a4495d..c890d1280f8 100644 --- a/vendor/github.com/go-openapi/validate/debug.go +++ b/vendor/github.com/go-openapi/validate/debug.go @@ -15,7 +15,7 @@ var ( // Debug is true when the SWAGGER_DEBUG env var is not empty. // It enables a more verbose logging of validators. Debug = os.Getenv("SWAGGER_DEBUG") != "" - // validateLogger is a debug logger for this package + // validateLogger is a debug logger for this package. validateLogger *log.Logger ) diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go index 79a431677e4..ebcd8071374 100644 --- a/vendor/github.com/go-openapi/validate/default_validator.go +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -18,7 +18,7 @@ type defaultValidator struct { schemaOptions *SchemaValidatorOptions } -// Validate validates the default values declared in the swagger spec +// Validate validates the default values declared in the swagger spec. func (d *defaultValidator) Validate() *Result { errs := pools.poolOfResults.BorrowResult() // will redeem when merged @@ -30,7 +30,7 @@ func (d *defaultValidator) Validate() *Result { return errs } -// resetVisited resets the internal state of visited schemas +// resetVisited resets the internal state of visited schemas. func (d *defaultValidator) resetVisited() { if d.visitedSchemas == nil { d.visitedSchemas = make(map[string]struct{}) @@ -38,7 +38,7 @@ func (d *defaultValidator) resetVisited() { return } - // TODO(go1.21): clear(ex.visitedSchemas) + // NOTE(go1.21): clear(ex.visitedSchemas) for k := range d.visitedSchemas { delete(d.visitedSchemas, k) } @@ -73,16 +73,17 @@ func isVisited(path string, visitedSchemas map[string]struct{}) bool { return false } -// beingVisited asserts a schema is being visited +// beingVisited asserts a schema is being visited. func (d *defaultValidator) beingVisited(path string) { d.visitedSchemas[path] = struct{}{} } -// isVisited tells if a path has already been visited +// isVisited tells if a path has already been visited. func (d *defaultValidator) isVisited(path string) bool { return isVisited(path, d.visitedSchemas) } +//nolint:gocognit // refactor in a forthcoming PR func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { // every default value that is specified must validate against the schema for that property // headers, items, parameters, schema @@ -272,7 +273,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri return res } -// TODO: Temporary duplicated code. Need to refactor with examples +// NOTE: Temporary duplicated code. Need to refactor with examples func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root any, items *spec.Items) *Result { res := pools.poolOfResults.BorrowResult() diff --git a/vendor/github.com/go-openapi/validate/doc.go b/vendor/github.com/go-openapi/validate/doc.go index a99893e1a38..5218ec85c63 100644 --- a/vendor/github.com/go-openapi/validate/doc.go +++ b/vendor/github.com/go-openapi/validate/doc.go @@ -1,76 +1,76 @@ // SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers // SPDX-License-Identifier: Apache-2.0 -/* -Package validate provides methods to validate a swagger specification, -as well as tools to validate data against their schema. - -This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference -can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. - -# Validating a specification - -Validates a spec document (from JSON or YAML) against the JSON schema for swagger, -then checks a number of extra rules that can't be expressed in JSON schema. - -Entry points: - - Spec() - - NewSpecValidator() - - SpecValidator.Validate() - -Reported as errors: - - [x] definition can't declare a property that's already defined by one of its ancestors - [x] definition's ancestor can't be a descendant of the same model - [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness. - [x] each security reference should contain only unique scopes - [x] each security scope in a security definition should be unique - [x] parameters in path must be unique - [x] each path parameter must correspond to a parameter placeholder and vice versa - [x] each referenceable definition must have references - [x] each definition property listed in the required array must be defined in the properties of the model - [x] each parameter should have a unique `name` and `type` combination - [x] each operation should have only 1 parameter of type body - [x] each reference must point to a valid object - [x] every default value that is specified must validate against the schema for that property - [x] items property is required for all schemas/definitions of type `array` - [x] path parameters must be declared a required - [x] headers must not contain $ref - [x] schema and property examples provided must validate against their respective object's schema - [x] examples provided must validate their schema - -Reported as warnings: - - [x] path parameters should not contain any of [{,},\w] - [x] empty path - [x] unused definitions - [x] unsupported validation of examples on non-JSON media types - [x] examples in response without schema - [x] readOnly properties should not be required - -# Validating a schema - -The schema validation toolkit validates data against JSON-schema-draft 04 schema. - -It is tested against the full json-schema-testing-suite (https://github.com/json-schema-org/JSON-Schema-Test-Suite), -except for the optional part (bignum, ECMA regexp, ...). - -It supports the complete JSON-schema vocabulary, including keywords not supported by Swagger (e.g. additionalItems, ...) - -Entry points: - - AgainstSchema() - - ... - -# Known limitations - -With the current version of this package, the following aspects of swagger are not yet supported: - - [ ] errors and warnings are not reported with key/line number in spec - [ ] default values and examples on responses only support application/json producer type - [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values - [ ] rules for collectionFormat are not implemented - [ ] no validation rule for polymorphism support (discriminator) [not done here] - [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid - [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 -*/ +// Package validate provides methods to validate a swagger specification, +// as well as tools to validate data against their schema. +// +// This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference. +// can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. +// +// # Validating a specification +// +// Validates a spec document (from JSON or YAML) against the JSON schema for swagger, +// then checks a number of extra rules that can't be expressed in JSON schema. +// +// Entry points: +// +// - Spec() +// - [NewSpecValidator]() +// - [SpecValidator].Validate() +// +// Reported as errors: +// +// [x] definition can't declare a property that's already defined by one of its ancestors +// [x] definition's ancestor can't be a descendant of the same model +// [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness. +// [x] each security reference should contain only unique scopes +// [x] each security scope in a security definition should be unique +// [x] parameters in path must be unique +// [x] each path parameter must correspond to a parameter placeholder and vice versa +// [x] each referenceable definition must have references +// [x] each definition property listed in the required array must be defined in the properties of the model +// [x] each parameter should have a unique `name` and `type` combination +// [x] each operation should have only 1 parameter of type body +// [x] each reference must point to a valid object +// [x] every default value that is specified must validate against the schema for that property +// [x] items property is required for all schemas/definitions of type `array` +// [x] path parameters must be declared a required +// [x] headers must not contain $ref +// [x] schema and property examples provided must validate against their respective object's schema +// [x] examples provided must validate their schema +// +// Reported as warnings: +// +// [x] path parameters should not contain any of [{,},\w] +// [x] empty path +// [x] unused definitions +// [x] unsupported validation of examples on non-JSON media types +// [x] examples in response without schema +// [x] readOnly properties should not be required +// +// # Validating a schema +// +// The schema validation toolkit validates data against JSON-schema-draft 04 schema. +// +// It is tested against the full json-schema-testing-suite (https://github.com/json-schema-org/JSON-Schema-Test-Suite), +// except for the optional part (bignum, ECMA regexp, ...). +// +// It supports the complete JSON-schema vocabulary, including keywords not supported by Swagger (e.g. additionalItems, ...) +// +// Entry points: +// +// - [AgainstSchema]() +// - ... +// +// # Known limitations +// +// With the current version of this package, the following aspects of swagger are not yet supported: +// +// [ ] errors and warnings are not reported with key/line number in spec +// [ ] default values and examples on responses only support application/json producer type +// [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values +// [ ] rules for collectionFormat are not implemented +// [ ] no validation rule for polymorphism support (discriminator) [not done here] +// [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid +// [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 package validate diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go index e4ef52c6dc1..eb6b5ee5c70 100644 --- a/vendor/github.com/go-openapi/validate/example_validator.go +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -9,7 +9,7 @@ import ( "github.com/go-openapi/spec" ) -// ExampleValidator validates example values defined in a spec +// ExampleValidator validates example values defined in a spec. type exampleValidator struct { SpecValidator *SpecValidator visitedSchemas map[string]struct{} @@ -35,7 +35,7 @@ func (ex *exampleValidator) Validate() *Result { return errs } -// resetVisited resets the internal state of visited schemas +// resetVisited resets the internal state of visited schemas. func (ex *exampleValidator) resetVisited() { if ex.visitedSchemas == nil { ex.visitedSchemas = make(map[string]struct{}) @@ -43,22 +43,23 @@ func (ex *exampleValidator) resetVisited() { return } - // TODO(go1.21): clear(ex.visitedSchemas) + // NOTE(go1.21): clear(ex.visitedSchemas) for k := range ex.visitedSchemas { delete(ex.visitedSchemas, k) } } -// beingVisited asserts a schema is being visited +// beingVisited asserts a schema is being visited. func (ex *exampleValidator) beingVisited(path string) { ex.visitedSchemas[path] = struct{}{} } -// isVisited tells if a path has already been visited +// isVisited tells if a path has already been visited. func (ex *exampleValidator) isVisited(path string) bool { return isVisited(path, ex.visitedSchemas) } +//nolint:gocognit // refactor in a forthcoming PR func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { // every example value that is specified must validate against the schema for that property // in: schemas, properties, object, items @@ -205,7 +206,7 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo newSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, s.schemaOptions).Validate(example), ) } else { - // TODO: validate other media types too + // Proposal for enhancement: validate other media types too res.AddWarnings(examplesMimeNotSupportedMsg(operationID, responseName)) } } else { @@ -264,7 +265,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str return res } -// TODO: Temporary duplicated code. Need to refactor with examples +// NOTE: Temporary duplicated code. Need to refactor with examples // func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root any, items *spec.Items) *Result { diff --git a/vendor/github.com/go-openapi/validate/formats.go b/vendor/github.com/go-openapi/validate/formats.go index 85ee6349418..eab26153762 100644 --- a/vendor/github.com/go-openapi/validate/formats.go +++ b/vendor/github.com/go-openapi/validate/formats.go @@ -76,7 +76,12 @@ func (f *formatValidator) Validate(val any) *Result { result = new(Result) } - if err := FormatOf(f.Path, f.In, f.Format, val.(string), f.KnownFormats); err != nil { + str, ok := val.(string) + if !ok { + return result + } + + if err := FormatOf(f.Path, f.In, f.Format, str, f.KnownFormats); err != nil { result.AddErrors(err) } diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go index 49b130473a9..8a1a2312830 100644 --- a/vendor/github.com/go-openapi/validate/helpers.go +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -3,7 +3,7 @@ package validate -// TODO: define this as package validate/internal +// Proposal for enhancement: define this as package validate/internal // This must be done while keeping CI intact with all tests and test coverage import ( @@ -36,7 +36,7 @@ const ( jsonProperties = "properties" jsonItems = "items" jsonType = "type" - // jsonSchema = "schema" + // jsonSchema = "schema". jsonDefault = "default" ) @@ -45,7 +45,7 @@ const ( stringFormatDateTime = "date-time" stringFormatPassword = "password" stringFormatByte = "byte" - // stringFormatBinary = "binary" + // stringFormatBinary = "binary". stringFormatCreditCard = "creditcard" stringFormatDuration = "duration" stringFormatEmail = "email" @@ -77,7 +77,7 @@ const ( numberFormatDouble = "double" ) -// Helpers available at the package level +// Helpers available at the package level. var ( pathHelp *pathHelper valueHelp *valueHelper @@ -126,10 +126,11 @@ func (h *pathHelper) stripParametersInPath(path string) string { // Regexp to extract parameters from path, with surrounding {}. // NOTE: important non-greedy modifier rexParsePathParam := mustCompileRegexp(`{[^{}]+?}`) - strippedSegments := []string{} + segments := strings.Split(path, "/") + strippedSegments := make([]string, len(segments)) - for segment := range strings.SplitSeq(path, "/") { - strippedSegments = append(strippedSegments, rexParsePathParam.ReplaceAllString(segment, "X")) + for i, segment := range segments { + strippedSegments[i] = rexParsePathParam.ReplaceAllString(segment, "X") } return strings.Join(strippedSegments, "/") } @@ -154,7 +155,7 @@ func (h *valueHelper) asInt64(val any) int64 { // Number conversion function for int64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { //nolint:exhaustive + switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -171,7 +172,7 @@ func (h *valueHelper) asUint64(val any) uint64 { // Number conversion function for uint64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { //nolint:exhaustive + switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return uint64(v.Int()) //nolint:gosec case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -184,12 +185,12 @@ func (h *valueHelper) asUint64(val any) uint64 { } } -// Same for unsigned floats +// Same for unsigned floats. func (h *valueHelper) asFloat64(val any) float64 { // Number conversion function for float64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { //nolint:exhaustive + switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return float64(v.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -244,7 +245,6 @@ func (h *paramHelper) resolveParam(path, method, operationID string, param *spec err = spec.ExpandParameterWithRoot(param, s.spec.Spec(), nil) } else { err = spec.ExpandParameter(param, s.spec.SpecFilePath()) - } if err != nil { // Safeguard // NOTE: we may enter here when the whole parameter is an unresolved $ref @@ -288,7 +288,8 @@ type responseHelper struct { func (r *responseHelper) expandResponseRef( response *spec.Response, - path string, s *SpecValidator) (*spec.Response, *Result) { + path string, s *SpecValidator, +) (*spec.Response, *Result) { // Ensure response is expanded var err error res := new(Result) @@ -309,7 +310,8 @@ func (r *responseHelper) expandResponseRef( func (r *responseHelper) responseMsgVariants( responseType string, - responseCode int) (responseName, responseCodeAsStr string) { + responseCode int, +) (responseName, responseCodeAsStr string) { // Path variants for messages if responseType == jsonDefault { responseCodeAsStr = jsonDefault diff --git a/vendor/github.com/go-openapi/validate/object_validator.go b/vendor/github.com/go-openapi/validate/object_validator.go index cf98ed377d5..e651b3f70f0 100644 --- a/vendor/github.com/go-openapi/validate/object_validator.go +++ b/vendor/github.com/go-openapi/validate/object_validator.go @@ -31,7 +31,8 @@ type objectValidator struct { func newObjectValidator(path, in string, maxProperties, minProperties *int64, required []string, properties spec.SchemaProperties, additionalProperties *spec.SchemaOrBool, patternProperties spec.SchemaProperties, - root any, formats strfmt.Registry, opts *SchemaValidatorOptions) *objectValidator { + root any, formats strfmt.Registry, opts *SchemaValidatorOptions, +) *objectValidator { if opts == nil { opts = new(SchemaValidatorOptions) } @@ -104,7 +105,7 @@ func (o *objectValidator) Validate(data any) *Result { o.validatePropertiesSchema(val, res) // Check patternProperties - // TODO: it looks like we have done that twice in many cases + // NOTE: it looks like we have done that twice in many cases for key, value := range val { _, regularProperty := o.Properties[key] matched, _, patterns := o.validatePatternProperty(key, value, res) // applies to regular properties as well @@ -115,7 +116,7 @@ func (o *objectValidator) Validate(data any) *Result { for _, pName := range patterns { if v, ok := o.PatternProperties[pName]; ok { r := newSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value) - res.mergeForField(data.(map[string]any), key, r) + res.mergeForField(data.(map[string]any), key, r) //nolint:forcetypeassert // data is always map[string]any at this point } } } @@ -129,7 +130,7 @@ func (o *objectValidator) SetPath(path string) { } func (o *objectValidator) Applies(source any, kind reflect.Kind) bool { - // TODO: this should also work for structs + // NOTE: this should also work for structs // there is a problem in the type validator where it will be unhappy about null values // so that requires more testing _, isSchema := source.(*spec.Schema) @@ -285,7 +286,7 @@ func (o *objectValidator) validateNoAdditionalProperties(val map[string]any, res /* case "$ref": if val[k] != nil { - // TODO: check context of that ref: warn about siblings, check against invalid context + // Proposal for enhancement: check context of that ref: warn about siblings, check against invalid context } */ } @@ -377,7 +378,7 @@ func (o *objectValidator) validatePropertiesSchema(val map[string]any, res *Resu } } -// TODO: succeededOnce is not used anywhere +// NOTE: succeededOnce is not used anywhere. func (o *objectValidator) validatePatternProperty(key string, value any, result *Result) (bool, bool, []string) { if len(o.PatternProperties) == 0 { return false, false, nil diff --git a/vendor/github.com/go-openapi/validate/options.go b/vendor/github.com/go-openapi/validate/options.go index f5e7f7131c7..6a6891a6ec7 100644 --- a/vendor/github.com/go-openapi/validate/options.go +++ b/vendor/github.com/go-openapi/validate/options.go @@ -5,7 +5,7 @@ package validate import "sync" -// Opts specifies validation options for a SpecValidator. +// Opts specifies validation options for a [SpecValidator]. // // NOTE: other options might be needed, for example a go-swagger specific mode. type Opts struct { diff --git a/vendor/github.com/go-openapi/validate/pools.go b/vendor/github.com/go-openapi/validate/pools.go index 1e734be493b..c8936bd10ba 100644 --- a/vendor/github.com/go-openapi/validate/pools.go +++ b/vendor/github.com/go-openapi/validate/pools.go @@ -245,7 +245,7 @@ type ( ) func (p schemaValidatorsPool) BorrowValidator() *SchemaValidator { - return p.Get().(*SchemaValidator) + return p.Get().(*SchemaValidator) //nolint:forcetypeassert // pool New always returns this type } func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { @@ -254,7 +254,7 @@ func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { } func (p objectValidatorsPool) BorrowValidator() *objectValidator { - return p.Get().(*objectValidator) + return p.Get().(*objectValidator) //nolint:forcetypeassert // pool New always returns this type } func (p objectValidatorsPool) RedeemValidator(s *objectValidator) { @@ -262,7 +262,7 @@ func (p objectValidatorsPool) RedeemValidator(s *objectValidator) { } func (p sliceValidatorsPool) BorrowValidator() *schemaSliceValidator { - return p.Get().(*schemaSliceValidator) + return p.Get().(*schemaSliceValidator) //nolint:forcetypeassert // pool New always returns this type } func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { @@ -270,7 +270,7 @@ func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { } func (p itemsValidatorsPool) BorrowValidator() *itemsValidator { - return p.Get().(*itemsValidator) + return p.Get().(*itemsValidator) //nolint:forcetypeassert // pool New always returns this type } func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) { @@ -278,7 +278,7 @@ func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) { } func (p basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator { - return p.Get().(*basicCommonValidator) + return p.Get().(*basicCommonValidator) //nolint:forcetypeassert // pool New always returns this type } func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { @@ -286,7 +286,7 @@ func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { } func (p headerValidatorsPool) BorrowValidator() *HeaderValidator { - return p.Get().(*HeaderValidator) + return p.Get().(*HeaderValidator) //nolint:forcetypeassert // pool New always returns this type } func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) { @@ -294,7 +294,7 @@ func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) { } func (p paramValidatorsPool) BorrowValidator() *ParamValidator { - return p.Get().(*ParamValidator) + return p.Get().(*ParamValidator) //nolint:forcetypeassert // pool New always returns this type } func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) { @@ -302,7 +302,7 @@ func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) { } func (p basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator { - return p.Get().(*basicSliceValidator) + return p.Get().(*basicSliceValidator) //nolint:forcetypeassert // pool New always returns this type } func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { @@ -310,7 +310,7 @@ func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { } func (p numberValidatorsPool) BorrowValidator() *numberValidator { - return p.Get().(*numberValidator) + return p.Get().(*numberValidator) //nolint:forcetypeassert // pool New always returns this type } func (p numberValidatorsPool) RedeemValidator(s *numberValidator) { @@ -318,7 +318,7 @@ func (p numberValidatorsPool) RedeemValidator(s *numberValidator) { } func (p stringValidatorsPool) BorrowValidator() *stringValidator { - return p.Get().(*stringValidator) + return p.Get().(*stringValidator) //nolint:forcetypeassert // pool New always returns this type } func (p stringValidatorsPool) RedeemValidator(s *stringValidator) { @@ -326,7 +326,7 @@ func (p stringValidatorsPool) RedeemValidator(s *stringValidator) { } func (p schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator { - return p.Get().(*schemaPropsValidator) + return p.Get().(*schemaPropsValidator) //nolint:forcetypeassert // pool New always returns this type } func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { @@ -334,7 +334,7 @@ func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { } func (p formatValidatorsPool) BorrowValidator() *formatValidator { - return p.Get().(*formatValidator) + return p.Get().(*formatValidator) //nolint:forcetypeassert // pool New always returns this type } func (p formatValidatorsPool) RedeemValidator(s *formatValidator) { @@ -342,7 +342,7 @@ func (p formatValidatorsPool) RedeemValidator(s *formatValidator) { } func (p typeValidatorsPool) BorrowValidator() *typeValidator { - return p.Get().(*typeValidator) + return p.Get().(*typeValidator) //nolint:forcetypeassert // pool New always returns this type } func (p typeValidatorsPool) RedeemValidator(s *typeValidator) { @@ -350,7 +350,7 @@ func (p typeValidatorsPool) RedeemValidator(s *typeValidator) { } func (p schemasPool) BorrowSchema() *spec.Schema { - return p.Get().(*spec.Schema) + return p.Get().(*spec.Schema) //nolint:forcetypeassert // pool New always returns this type } func (p schemasPool) RedeemSchema(s *spec.Schema) { @@ -358,7 +358,7 @@ func (p schemasPool) RedeemSchema(s *spec.Schema) { } func (p resultsPool) BorrowResult() *Result { - return p.Get().(*Result).cleared() + return p.Get().(*Result).cleared() //nolint:forcetypeassert // pool New always returns *Result } func (p resultsPool) RedeemResult(s *Result) { diff --git a/vendor/github.com/go-openapi/validate/result.go b/vendor/github.com/go-openapi/validate/result.go index 69219e99823..ede945503d7 100644 --- a/vendor/github.com/go-openapi/validate/result.go +++ b/vendor/github.com/go-openapi/validate/result.go @@ -25,7 +25,7 @@ var emptyResult = &Result{MatchCount: 1} // schema validation. Results from the validation branch // with most matches get eventually selected. // -// TODO: keep path of key originating the error +// Proposal for enhancement: keep path of key originating the error. type Result struct { Errors []error Warnings []error @@ -66,7 +66,7 @@ func NewFieldKey(obj map[string]any, field string) FieldKey { // Object returns the underlying object of this key. func (fk *FieldKey) Object() map[string]any { - return fk.object.Interface().(map[string]any) + return fk.object.Interface().(map[string]any) //nolint:forcetypeassert // object is always map[string]any } // Field returns the underlying field of this key. @@ -81,7 +81,7 @@ func NewItemKey(slice any, i int) ItemKey { // Slice returns the underlying slice of this key. func (ik *ItemKey) Slice() []any { - return ik.slice.Interface().([]any) + return ik.slice.Interface().([]any) //nolint:forcetypeassert // slice is always []any } // Index returns the underlying index of this key. @@ -283,14 +283,14 @@ func (r *Result) HasErrorsOrWarnings() bool { return len(r.Errors) > 0 || len(r.Warnings) > 0 } -// Inc increments the match count +// Inc increments the match count. func (r *Result) Inc() { r.MatchCount++ } -// AsError renders this result as an error interface +// AsError renders this result as an error interface. // -// TODO: reporting / pretty print with path ordered and indented +// Proposal for enhancement: reporting / pretty print with path ordered and indented. func (r *Result) AsError() error { if r.IsValid() { return nil @@ -425,7 +425,7 @@ func stripImportantTag(err error) error { } func (r *Result) keepRelevantErrors() *Result { - // TODO: this one is going to disapear... + // NOTE: this one is going to disapear... // keepRelevantErrors strips a result from standard errors and keeps // the ones which are supposedly more accurate. // diff --git a/vendor/github.com/go-openapi/validate/rexp.go b/vendor/github.com/go-openapi/validate/rexp.go index 795f148d0cf..478036c0876 100644 --- a/vendor/github.com/go-openapi/validate/rexp.go +++ b/vendor/github.com/go-openapi/validate/rexp.go @@ -10,7 +10,7 @@ import ( "sync/atomic" ) -// Cache for compiled regular expressions +// Cache for compiled regular expressions. var ( cacheMutex = &sync.Mutex{} reDict = atomic.Value{} // map[string]*re.Regexp diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go index 375a98765d7..b72a47bc331 100644 --- a/vendor/github.com/go-openapi/validate/schema.go +++ b/vendor/github.com/go-openapi/validate/schema.go @@ -13,7 +13,7 @@ import ( "github.com/go-openapi/swag/jsonutils" ) -// SchemaValidator validates data against a JSON schema +// SchemaValidator validates data against a JSON schema. type SchemaValidator struct { Path string in string @@ -26,7 +26,7 @@ type SchemaValidator struct { // AgainstSchema validates the specified data against the provided schema, using a registry of supported formats. // -// When no pre-parsed *spec.Schema structure is provided, it uses a JSON schema as default. See example. +// When no pre-parsed *[spec.Schema] structure is provided, it uses a JSON schema as default. See example. func AgainstSchema(schema *spec.Schema, data any, formats strfmt.Registry, options ...Option) error { res := NewSchemaValidator(schema, nil, "", formats, append(options, WithRecycleValidators(true), withRecycleResults(true))..., @@ -103,18 +103,20 @@ func newSchemaValidator(schema *spec.Schema, rootSchema any, root string, format return s } -// SetPath sets the path for this schema valdiator +// SetPath sets the path for this schema validator. func (s *SchemaValidator) SetPath(path string) { s.Path = path } -// Applies returns true when this schema validator applies +// Applies returns true when this schema validator applies. func (s *SchemaValidator) Applies(source any, _ reflect.Kind) bool { _, ok := source.(*spec.Schema) return ok } -// Validate validates the data against the schema +// Validate validates the data against the schema. +// +//nolint:gocognit // refactor in a forthcoming PR func (s *SchemaValidator) Validate(data any) *Result { if s == nil { return emptyResult @@ -176,7 +178,7 @@ func (s *SchemaValidator) Validate(data any) *Result { d = dd } - // TODO: this part should be handed over to type validator + // Proposal for enhancement: this part should be handed over to type validator // Handle special case of json.Number data (number marshalled as string) isnumber := s.Schema != nil && (s.Schema.Type.Contains(numberType) || s.Schema.Type.Contains(integerType)) if num, ok := data.(json.Number); ok && isnumber { diff --git a/vendor/github.com/go-openapi/validate/schema_messages.go b/vendor/github.com/go-openapi/validate/schema_messages.go index e8c7c48ad7f..e0f1801e6e4 100644 --- a/vendor/github.com/go-openapi/validate/schema_messages.go +++ b/vendor/github.com/go-openapi/validate/schema_messages.go @@ -11,57 +11,64 @@ import ( const ( // ArrayDoesNotAllowAdditionalItemsError when an additionalItems construct is not verified by the array values provided. // - // TODO: should move to package go-openapi/errors + // Proposal for enhancement: should move to package go-openapi/errors. ArrayDoesNotAllowAdditionalItemsError = "array doesn't allow for additional items" - // HasDependencyError indicates that a dependencies construct was not verified + // HasDependencyError indicates that a dependencies construct was not verified. HasDependencyError = "%q has a dependency on %s" - // InvalidSchemaProvidedError indicates that the schema provided to validate a value cannot be properly compiled + // InvalidSchemaProvidedError indicates that the schema provided to validate a value cannot be properly compiled. InvalidSchemaProvidedError = "Invalid schema provided to SchemaValidator: %v" - // InvalidTypeConversionError indicates that a numerical conversion for the given type could not be carried on + // InvalidTypeConversionError indicates that a numerical conversion for the given type could not be carried on. InvalidTypeConversionError = "invalid type conversion in %s: %v " - // MustValidateAtLeastOneSchemaError indicates that in a AnyOf construct, none of the schema constraints specified were verified + // MustValidateAtLeastOneSchemaError indicates that in a AnyOf construct, none of the schema constraints specified were verified. MustValidateAtLeastOneSchemaError = "%q must validate at least one schema (anyOf)" - // MustValidateOnlyOneSchemaError indicates that in a OneOf construct, either none of the schema constraints specified were verified, or several were + // MustValidateOnlyOneSchemaError indicates that in a OneOf construct, either none of the schema constraints specified were verified, or several were. MustValidateOnlyOneSchemaError = "%q must validate one and only one schema (oneOf). %s" - // MustValidateAllSchemasError indicates that in a AllOf construct, at least one of the schema constraints specified were not verified + // MustValidateAllSchemasError indicates that in a AllOf construct, at least one of the schema constraints specified were not verified. // - // TODO: punctuation in message + // NOTE: punctuation in message. MustValidateAllSchemasError = "%q must validate all the schemas (allOf)%s" - // MustNotValidateSchemaError indicates that in a Not construct, the schema constraint specified was verified + // MustNotValidateSchemaError indicates that in a Not construct, the schema constraint specified was verified. MustNotValidateSchemaError = "%q must not validate the schema (not)" ) -// Warning messages related to schema validation and returned as results +// Warning messages related to schema validation and returned as results. const () func invalidSchemaProvidedMsg(err error) errors.Error { return errors.New(InternalErrorCode, InvalidSchemaProvidedError, err) } + func invalidTypeConversionMsg(path string, err error) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidTypeConversionError, path, err) } + func mustValidateOnlyOneSchemaMsg(path, additionalMsg string) errors.Error { return errors.New(errors.CompositeErrorCode, MustValidateOnlyOneSchemaError, path, additionalMsg) } + func mustValidateAtLeastOneSchemaMsg(path string) errors.Error { return errors.New(errors.CompositeErrorCode, MustValidateAtLeastOneSchemaError, path) } + func mustValidateAllSchemasMsg(path, additionalMsg string) errors.Error { return errors.New(errors.CompositeErrorCode, MustValidateAllSchemasError, path, additionalMsg) } + func mustNotValidatechemaMsg(path string) errors.Error { return errors.New(errors.CompositeErrorCode, MustNotValidateSchemaError, path) } + func hasADependencyMsg(path, depkey string) errors.Error { return errors.New(errors.CompositeErrorCode, HasDependencyError, path, depkey) } + func arrayDoesNotAllowAdditionalItemsMsg() errors.Error { return errors.New(errors.CompositeErrorCode, ArrayDoesNotAllowAdditionalItemsError) } diff --git a/vendor/github.com/go-openapi/validate/schema_option.go b/vendor/github.com/go-openapi/validate/schema_option.go index d9fd21a75a1..3e1b882ed38 100644 --- a/vendor/github.com/go-openapi/validate/schema_option.go +++ b/vendor/github.com/go-openapi/validate/schema_option.go @@ -3,7 +3,7 @@ package validate -// SchemaValidatorOptions defines optional rules for schema validation +// SchemaValidatorOptions defines optional rules for schema validation. type SchemaValidatorOptions struct { EnableObjectArrayTypeCheck bool EnableArrayMustHaveItemsCheck bool @@ -12,24 +12,24 @@ type SchemaValidatorOptions struct { skipSchemataResult bool } -// Option sets optional rules for schema validation +// Option sets optional rules for schema validation. type Option func(*SchemaValidatorOptions) -// EnableObjectArrayTypeCheck activates the swagger rule: an items must be in type: array +// EnableObjectArrayTypeCheck activates the swagger rule: an items must be in type: array. func EnableObjectArrayTypeCheck(enable bool) Option { return func(svo *SchemaValidatorOptions) { svo.EnableObjectArrayTypeCheck = enable } } -// EnableArrayMustHaveItemsCheck activates the swagger rule: an array must have items defined +// EnableArrayMustHaveItemsCheck activates the swagger rule: an array must have items defined. func EnableArrayMustHaveItemsCheck(enable bool) Option { return func(svo *SchemaValidatorOptions) { svo.EnableArrayMustHaveItemsCheck = enable } } -// SwaggerSchema activates swagger schema validation rules +// SwaggerSchema activates swagger schema validation rules. func SwaggerSchema(enable bool) Option { return func(svo *SchemaValidatorOptions) { svo.EnableObjectArrayTypeCheck = enable @@ -53,14 +53,14 @@ func withRecycleResults(enable bool) Option { } } -// WithSkipSchemataResult skips the deep audit payload stored in validation Result +// WithSkipSchemataResult skips the deep audit payload stored in validation Result. func WithSkipSchemataResult(enable bool) Option { return func(svo *SchemaValidatorOptions) { svo.skipSchemataResult = enable } } -// Options returns the current set of options +// Options returns the current set of options. func (svo SchemaValidatorOptions) Options() []Option { return []Option{ EnableObjectArrayTypeCheck(svo.EnableObjectArrayTypeCheck), diff --git a/vendor/github.com/go-openapi/validate/schema_props.go b/vendor/github.com/go-openapi/validate/schema_props.go index 485f536adc3..2c4354d08a6 100644 --- a/vendor/github.com/go-openapi/validate/schema_props.go +++ b/vendor/github.com/go-openapi/validate/schema_props.go @@ -34,7 +34,8 @@ func (s *schemaPropsValidator) SetPath(path string) { func newSchemaPropsValidator( path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root any, formats strfmt.Registry, - opts *SchemaValidatorOptions) *schemaPropsValidator { + opts *SchemaValidatorOptions, +) *schemaPropsValidator { if opts == nil { opts = new(SchemaValidatorOptions) } @@ -281,7 +282,7 @@ func (s *schemaPropsValidator) validateNot(data any, mainResult *Result) { } func (s *schemaPropsValidator) validateDependencies(data any, mainResult *Result) { - val := data.(map[string]any) + val := data.(map[string]any) //nolint:forcetypeassert // caller guarantees map[string]any for key := range val { dep, ok := s.Dependencies[key] if !ok { diff --git a/vendor/github.com/go-openapi/validate/slice_validator.go b/vendor/github.com/go-openapi/validate/slice_validator.go index 4a5a2089687..8f49d137073 100644 --- a/vendor/github.com/go-openapi/validate/slice_validator.go +++ b/vendor/github.com/go-openapi/validate/slice_validator.go @@ -27,7 +27,8 @@ type schemaSliceValidator struct { func newSliceValidator(path, in string, maxItems, minItems *int64, uniqueItems bool, additionalItems *spec.SchemaOrBool, items *spec.SchemaOrArray, - root any, formats strfmt.Registry, opts *SchemaValidatorOptions) *schemaSliceValidator { + root any, formats strfmt.Registry, opts *SchemaValidatorOptions, +) *schemaSliceValidator { if opts == nil { opts = new(SchemaValidatorOptions) } diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go index 8616a861f28..b85432f92b1 100644 --- a/vendor/github.com/go-openapi/validate/spec.go +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -25,16 +25,16 @@ import ( // // Returns an error flattening in a single standard error, all validation messages. // -// - TODO: $ref should not have siblings -// - TODO: make sure documentation reflects all checks and warnings -// - TODO: check on discriminators -// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) -// - TODO: full list of unresolved refs -// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples -// - TODO: option to determine if we validate for go-swagger or in a more general context -// - TODO: check on required properties to support anyOf, allOf, oneOf +// - Proposal for enhancement: $ref should not have siblings +// - Proposal for enhancement: make sure documentation reflects all checks and warnings +// - Proposal for enhancement: check on discriminators +// - Proposal for enhancement: explicit message on unsupported keywords (better than "forbidden property"...) +// - Proposal for enhancement: full list of unresolved refs +// - Proposal for enhancement: validate numeric constraints (issue#581): this should be handled like defaults and examples +// - Proposal for enhancement: option to determine if we validate for go-swagger or in a more general context +// - Proposal for enhancement: check on required properties to support anyOf, allOf, oneOf // -// NOTE: SecurityScopes are maps: no need to check uniqueness +// NOTE: SecurityScopes are maps: no need to check uniqueness. func Spec(doc *loads.Document, formats strfmt.Registry) error { errs, _ /*warns*/ := NewSpecValidator(doc.Schema(), formats).Validate(doc) if errs.HasErrors() { @@ -43,7 +43,7 @@ func Spec(doc *loads.Document, formats strfmt.Registry) error { return nil } -// SpecValidator validates a swagger 2.0 spec +// SpecValidator validates a swagger 2.0 spec. type SpecValidator struct { schema *spec.Schema // swagger 2.0 schema spec *loads.Document @@ -54,7 +54,7 @@ type SpecValidator struct { schemaOptions *SchemaValidatorOptions } -// NewSpecValidator creates a new swagger spec validator instance +// NewSpecValidator creates a new swagger spec validator instance. func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidator { // schema options that apply to all called validators schemaOptions := new(SchemaValidatorOptions) @@ -74,7 +74,7 @@ func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidat } } -// Validate validates the swagger spec +// Validate validates the swagger spec. func (s *SpecValidator) Validate(data any) (*Result, *Result) { s.schemaOptions.skipSchemataResult = s.Options.SkipSchemataResult var sd *loads.Document @@ -347,6 +347,7 @@ func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, kno return ancs, res } +//nolint:gocognit // refactor in a forthcoming PR func (s *SpecValidator) validateItems() *Result { // validate parameter, items, schema and response objects for presence of item if type is array res := pools.poolOfResults.BorrowResult() @@ -406,7 +407,7 @@ func (s *SpecValidator) validateItems() *Result { return res } -// Verifies constraints on array type +// Verifies constraints on array type. func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID string) *Result { res := pools.poolOfResults.BorrowResult() if !schema.Type.Contains(arrayType) { @@ -597,7 +598,7 @@ func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Sche } else if v.AdditionalProperties.Schema != nil { // additionalProperties as schema are upported in swagger // recursively validates additionalProperties schema - // TODO : anyOf, allOf, oneOf like in schemaPropsValidator + // Proposal for enhancement: anyOf, allOf, oneOf like in schemaPropsValidator red := s.validateRequiredProperties(path, in, v.AdditionalProperties.Schema) if red.IsValid() { additionalPropertiesMatch = true @@ -620,6 +621,7 @@ func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Sche return res } +//nolint:gocognit // refactor in a forthcoming PR func (s *SpecValidator) validateParameters() *Result { // - for each method, path is unique, regardless of path parameters // e.g. GET:/petstore/{id}, GET:/petstore/{pet}, GET:/petstore are @@ -645,7 +647,6 @@ func (s *SpecValidator) validateParameters() *Result { // Check uniqueness of stripped paths if _, found := methodPaths[method][pathToAdd]; found { - // Sort names for stable, testable output if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) @@ -666,7 +667,7 @@ func (s *SpecValidator) validateParameters() *Result { var hasForm, hasBody bool // Check parameters names uniqueness for operation - // TODO: should be done after param expansion + // NOTE: should be done after param expansion res.Merge(s.checkUniqueParams(path, method, op)) // pick the root schema from the swagger specification which describes a parameter diff --git a/vendor/github.com/go-openapi/validate/spec_messages.go b/vendor/github.com/go-openapi/validate/spec_messages.go index 9b079af647a..42ce3602852 100644 --- a/vendor/github.com/go-openapi/validate/spec_messages.go +++ b/vendor/github.com/go-openapi/validate/spec_messages.go @@ -20,25 +20,25 @@ const ( // ArrayInHeaderRequiresItemsError ... ArrayInHeaderRequiresItemsError = "header %q for %q is a collection without an element type (array requires items definition)" - // BothFormDataAndBodyError indicates that an operation specifies both a body and a formData parameter, which is forbidden + // BothFormDataAndBodyError indicates that an operation specifies both a body and a formData parameter, which is forbidden. BothFormDataAndBodyError = "operation %q has both formData and body parameters. Only one such In: type may be used for a given operation" - // CannotResolveReferenceError when a $ref could not be resolved + // CannotResolveReferenceError when a $ref could not be resolved. CannotResolveReferenceError = "could not resolve reference in %s to $ref %s: %v" // CircularAncestryDefinitionError ... CircularAncestryDefinitionError = "definition %q has circular ancestry: %v" - // DefaultValueDoesNotValidateError results from an invalid default value provided + // DefaultValueDoesNotValidateError results from an invalid default value provided. DefaultValueDoesNotValidateError = "default value for %s in %s does not validate its schema" - // DefaultValueItemsDoesNotValidateError results from an invalid default value provided for Items + // DefaultValueItemsDoesNotValidateError results from an invalid default value provided for Items. DefaultValueItemsDoesNotValidateError = "default value for %s.items in %s does not validate its schema" - // DefaultValueHeaderDoesNotValidateError results from an invalid default value provided in header + // DefaultValueHeaderDoesNotValidateError results from an invalid default value provided in header. DefaultValueHeaderDoesNotValidateError = "in operation %q, default value in header %s for %s does not validate its schema" - // DefaultValueHeaderItemsDoesNotValidateError results from an invalid default value provided in header.items + // DefaultValueHeaderItemsDoesNotValidateError results from an invalid default value provided in header.items. DefaultValueHeaderItemsDoesNotValidateError = "in operation %q, default value in header.items %s for %s does not validate its schema" // DefaultValueInDoesNotValidateError ... @@ -50,31 +50,31 @@ const ( // DuplicatePropertiesError ... DuplicatePropertiesError = "definition %q contains duplicate properties: %v" - // ExampleValueDoesNotValidateError results from an invalid example value provided + // ExampleValueDoesNotValidateError results from an invalid example value provided. ExampleValueDoesNotValidateError = "example value for %s in %s does not validate its schema" - // ExampleValueItemsDoesNotValidateError results from an invalid example value provided for Items + // ExampleValueItemsDoesNotValidateError results from an invalid example value provided for Items. ExampleValueItemsDoesNotValidateError = "example value for %s.items in %s does not validate its schema" - // ExampleValueHeaderDoesNotValidateError results from an invalid example value provided in header + // ExampleValueHeaderDoesNotValidateError results from an invalid example value provided in header. ExampleValueHeaderDoesNotValidateError = "in operation %q, example value in header %s for %s does not validate its schema" - // ExampleValueHeaderItemsDoesNotValidateError results from an invalid example value provided in header.items + // ExampleValueHeaderItemsDoesNotValidateError results from an invalid example value provided in header.items. ExampleValueHeaderItemsDoesNotValidateError = "in operation %q, example value in header.items %s for %s does not validate its schema" // ExampleValueInDoesNotValidateError ... ExampleValueInDoesNotValidateError = "in operation %q, example value in %s does not validate its schema" - // EmptyPathParameterError means that a path parameter was found empty (e.g. "{}") + // EmptyPathParameterError means that a path parameter was found empty (e.g. "{}"). EmptyPathParameterError = "%q contains an empty path parameter" - // InvalidDocumentError states that spec validation only processes spec.Document objects + // InvalidDocumentError states that spec validation only processes spec.Document objects. InvalidDocumentError = "spec validator can only validate spec.Document objects" - // InvalidItemsPatternError indicates an Items definition with invalid pattern + // InvalidItemsPatternError indicates an Items definition with invalid pattern. InvalidItemsPatternError = "%s for %q has invalid items pattern: %q" - // InvalidParameterDefinitionError indicates an error detected on a parameter definition + // InvalidParameterDefinitionError indicates an error detected on a parameter definition. InvalidParameterDefinitionError = "invalid definition for parameter %s in %s in operation %q" // InvalidParameterDefinitionAsSchemaError indicates an error detected on a parameter definition, which was mistaken with a schema definition. @@ -84,41 +84,41 @@ const ( // InvalidPatternError ... InvalidPatternError = "pattern %q is invalid in %s" - // InvalidPatternInError indicates an invalid pattern in a schema or items definition + // InvalidPatternInError indicates an invalid pattern in a schema or items definition. InvalidPatternInError = "%s in %s has invalid pattern: %q" - // InvalidPatternInHeaderError indicates a header definition with an invalid pattern + // InvalidPatternInHeaderError indicates a header definition with an invalid pattern. InvalidPatternInHeaderError = "in operation %q, header %s for %s has invalid pattern %q: %v" // InvalidPatternInParamError ... InvalidPatternInParamError = "operation %q has invalid pattern in param %q: %q" - // InvalidReferenceError indicates that a $ref property could not be resolved + // InvalidReferenceError indicates that a $ref property could not be resolved. InvalidReferenceError = "invalid ref %q" // InvalidResponseDefinitionAsSchemaError indicates an error detected on a response definition, which was mistaken with a schema definition. // Most likely, this situation is encountered whenever a $ref has been added as a sibling of the response definition. InvalidResponseDefinitionAsSchemaError = "invalid definition as Schema for response %s in %s" - // MultipleBodyParamError indicates that an operation specifies multiple parameter with in: body + // MultipleBodyParamError indicates that an operation specifies multiple parameter with in: body. MultipleBodyParamError = "operation %q has more than 1 body param: %v" - // NonUniqueOperationIDError indicates that the same operationId has been specified several times + // NonUniqueOperationIDError indicates that the same operationId has been specified several times. NonUniqueOperationIDError = "%q is defined %d times" - // NoParameterInPathError indicates that a path was found without any parameter + // NoParameterInPathError indicates that a path was found without any parameter. NoParameterInPathError = "path param %q has no parameter definition" // NoValidPathErrorOrWarning indicates that no single path could be validated. If Paths is empty, this message is only a warning. NoValidPathErrorOrWarning = "spec has no valid path defined" - // NoValidResponseError indicates that no valid response description could be found for an operation + // NoValidResponseError indicates that no valid response description could be found for an operation. NoValidResponseError = "operation %q has no valid response" // PathOverlapError ... PathOverlapError = "path %s overlaps with %s" - // PathParamNotInPathError indicates that a parameter specified with in: path was not found in the path specification + // PathParamNotInPathError indicates that a parameter specified with in: path was not found in the path specification. PathParamNotInPathError = "path param %q is not present in path %q" // PathParamNotUniqueError ... @@ -127,32 +127,32 @@ const ( // PathParamRequiredError ... PathParamRequiredError = "in operation %q,path param %q must be declared as required" - // RefNotAllowedInHeaderError indicates a $ref was found in a header definition, which is not allowed by Swagger + // RefNotAllowedInHeaderError indicates a $ref was found in a header definition, which is not allowed by Swagger. RefNotAllowedInHeaderError = "IMPORTANT!in %q: $ref are not allowed in headers. In context for header %q%s" // RequiredButNotDefinedError ... RequiredButNotDefinedError = "%q is present in required but not defined as property in definition %q" - // SomeParametersBrokenError indicates that some parameters could not be resolved, which might result in partial checks to be carried on + // SomeParametersBrokenError indicates that some parameters could not be resolved, which might result in partial checks to be carried on. SomeParametersBrokenError = "some parameters definitions are broken in %q.%s. Cannot carry on full checks on parameters for operation %s" - // UnresolvedReferencesError indicates that at least one $ref could not be resolved + // UnresolvedReferencesError indicates that at least one $ref could not be resolved. UnresolvedReferencesError = "some references could not be resolved in spec. First found: %v" ) -// Warning messages related to spec validation and returned as results +// Warning messages related to spec validation and returned as results. const ( - // ExamplesWithoutSchemaWarning indicates that examples are provided for a response,but not schema to validate the example against + // ExamplesWithoutSchemaWarning indicates that examples are provided for a response,but not schema to validate the example against. ExamplesWithoutSchemaWarning = "Examples provided without schema in operation %q, %s" // ExamplesMimeNotSupportedWarning indicates that examples are provided with a mime type different than application/json, which - // the validator dos not support yetl + // the validator dos not support yetl. ExamplesMimeNotSupportedWarning = "No validation attempt for examples for media types other than application/json, in operation %q, %s" // PathParamGarbledWarning ... PathParamGarbledWarning = "in path %q, param %q contains {,} or white space. Albeit not stricly illegal, this is probably no what you want" - // ParamValidationTypeMismatch indicates that parameter has validation which does not match its type + // ParamValidationTypeMismatch indicates that parameter has validation which does not match its type. ParamValidationTypeMismatch = "validation keywords of parameter %q in path %q don't match its type %s" // PathStrippedParamGarbledWarning ... @@ -165,7 +165,7 @@ const ( // which is most likely not wanted. RefShouldNotHaveSiblingsWarning = "$ref property should have no sibling in %q.%s" - // RequiredHasDefaultWarning indicates that a required parameter property should not have a default + // RequiredHasDefaultWarning indicates that a required parameter property should not have a default. RequiredHasDefaultWarning = "%s in %s has a default value and is required as parameter" // UnusedDefinitionWarning ... @@ -180,164 +180,214 @@ const ( InvalidObject = "expected an object in %q.%s" ) -// Additional error codes +// Additional error codes. const ( - // InternalErrorCode reports an internal technical error + // InternalErrorCode reports an internal technical error. InternalErrorCode = http.StatusInternalServerError - // NotFoundErrorCode indicates that a resource (e.g. a $ref) could not be found + // NotFoundErrorCode indicates that a resource (e.g. a $ref) could not be found. NotFoundErrorCode = http.StatusNotFound ) func invalidDocumentMsg() errors.Error { return errors.New(InternalErrorCode, InvalidDocumentError) } + func invalidRefMsg(path string) errors.Error { return errors.New(NotFoundErrorCode, InvalidReferenceError, path) } + func unresolvedReferencesMsg(err error) errors.Error { return errors.New(errors.CompositeErrorCode, UnresolvedReferencesError, err) } + func noValidPathMsg() errors.Error { return errors.New(errors.CompositeErrorCode, NoValidPathErrorOrWarning) } + func emptyPathParameterMsg(path string) errors.Error { return errors.New(errors.CompositeErrorCode, EmptyPathParameterError, path) } + func nonUniqueOperationIDMsg(path string, i int) errors.Error { return errors.New(errors.CompositeErrorCode, NonUniqueOperationIDError, path, i) } + func circularAncestryDefinitionMsg(path string, args any) errors.Error { return errors.New(errors.CompositeErrorCode, CircularAncestryDefinitionError, path, args) } + func duplicatePropertiesMsg(path string, args any) errors.Error { return errors.New(errors.CompositeErrorCode, DuplicatePropertiesError, path, args) } + func pathParamNotInPathMsg(path, param string) errors.Error { return errors.New(errors.CompositeErrorCode, PathParamNotInPathError, param, path) } + func arrayRequiresItemsMsg(path, operation string) errors.Error { return errors.New(errors.CompositeErrorCode, ArrayRequiresItemsError, path, operation) } + func arrayInParamRequiresItemsMsg(path, operation string) errors.Error { return errors.New(errors.CompositeErrorCode, ArrayInParamRequiresItemsError, path, operation) } + func arrayInHeaderRequiresItemsMsg(path, operation string) errors.Error { return errors.New(errors.CompositeErrorCode, ArrayInHeaderRequiresItemsError, path, operation) } + func invalidItemsPatternMsg(path, operation, pattern string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidItemsPatternError, path, operation, pattern) } + func invalidPatternMsg(pattern, path string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidPatternError, pattern, path) } + func requiredButNotDefinedMsg(path, definition string) errors.Error { return errors.New(errors.CompositeErrorCode, RequiredButNotDefinedError, path, definition) } + func pathParamGarbledMsg(path, param string) errors.Error { return errors.New(errors.CompositeErrorCode, PathParamGarbledWarning, path, param) } + func pathStrippedParamGarbledMsg(path string) errors.Error { return errors.New(errors.CompositeErrorCode, PathStrippedParamGarbledWarning, path) } + func pathOverlapMsg(path, arg string) errors.Error { return errors.New(errors.CompositeErrorCode, PathOverlapError, path, arg) } + func invalidPatternInParamMsg(operation, param, pattern string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidPatternInParamError, operation, param, pattern) } + func pathParamRequiredMsg(operation, param string) errors.Error { return errors.New(errors.CompositeErrorCode, PathParamRequiredError, operation, param) } + func bothFormDataAndBodyMsg(operation string) errors.Error { return errors.New(errors.CompositeErrorCode, BothFormDataAndBodyError, operation) } + func multipleBodyParamMsg(operation string, args any) errors.Error { return errors.New(errors.CompositeErrorCode, MultipleBodyParamError, operation, args) } + func pathParamNotUniqueMsg(path, param, arg string) errors.Error { return errors.New(errors.CompositeErrorCode, PathParamNotUniqueError, path, param, arg) } + func duplicateParamNameMsg(path, param, operation string) errors.Error { return errors.New(errors.CompositeErrorCode, DuplicateParamNameError, param, path, operation) } + func unusedParamMsg(arg string) errors.Error { return errors.New(errors.CompositeErrorCode, UnusedParamWarning, arg) } + func unusedDefinitionMsg(arg string) errors.Error { return errors.New(errors.CompositeErrorCode, UnusedDefinitionWarning, arg) } + func unusedResponseMsg(arg string) errors.Error { return errors.New(errors.CompositeErrorCode, UnusedResponseWarning, arg) } + func readOnlyAndRequiredMsg(path, param string) errors.Error { return errors.New(errors.CompositeErrorCode, ReadOnlyAndRequiredWarning, param, path) } + func noParameterInPathMsg(param string) errors.Error { return errors.New(errors.CompositeErrorCode, NoParameterInPathError, param) } + func requiredHasDefaultMsg(param, path string) errors.Error { return errors.New(errors.CompositeErrorCode, RequiredHasDefaultWarning, param, path) } + func defaultValueDoesNotValidateMsg(param, path string) errors.Error { return errors.New(errors.CompositeErrorCode, DefaultValueDoesNotValidateError, param, path) } + func defaultValueItemsDoesNotValidateMsg(param, path string) errors.Error { return errors.New(errors.CompositeErrorCode, DefaultValueItemsDoesNotValidateError, param, path) } + func noValidResponseMsg(operation string) errors.Error { return errors.New(errors.CompositeErrorCode, NoValidResponseError, operation) } + func defaultValueHeaderDoesNotValidateMsg(operation, header, path string) errors.Error { return errors.New(errors.CompositeErrorCode, DefaultValueHeaderDoesNotValidateError, operation, header, path) } + func defaultValueHeaderItemsDoesNotValidateMsg(operation, header, path string) errors.Error { return errors.New(errors.CompositeErrorCode, DefaultValueHeaderItemsDoesNotValidateError, operation, header, path) } + func invalidPatternInHeaderMsg(operation, header, path, pattern string, args any) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidPatternInHeaderError, operation, header, path, pattern, args) } + func invalidPatternInMsg(path, in, pattern string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidPatternInError, path, in, pattern) } + func defaultValueInDoesNotValidateMsg(operation, path string) errors.Error { return errors.New(errors.CompositeErrorCode, DefaultValueInDoesNotValidateError, operation, path) } + func exampleValueDoesNotValidateMsg(param, path string) errors.Error { return errors.New(errors.CompositeErrorCode, ExampleValueDoesNotValidateError, param, path) } + func exampleValueItemsDoesNotValidateMsg(param, path string) errors.Error { return errors.New(errors.CompositeErrorCode, ExampleValueItemsDoesNotValidateError, param, path) } + func exampleValueHeaderDoesNotValidateMsg(operation, header, path string) errors.Error { return errors.New(errors.CompositeErrorCode, ExampleValueHeaderDoesNotValidateError, operation, header, path) } + func exampleValueHeaderItemsDoesNotValidateMsg(operation, header, path string) errors.Error { return errors.New(errors.CompositeErrorCode, ExampleValueHeaderItemsDoesNotValidateError, operation, header, path) } + func exampleValueInDoesNotValidateMsg(operation, path string) errors.Error { return errors.New(errors.CompositeErrorCode, ExampleValueInDoesNotValidateError, operation, path) } + func examplesWithoutSchemaMsg(operation, response string) errors.Error { return errors.New(errors.CompositeErrorCode, ExamplesWithoutSchemaWarning, operation, response) } + func examplesMimeNotSupportedMsg(operation, response string) errors.Error { return errors.New(errors.CompositeErrorCode, ExamplesMimeNotSupportedWarning, operation, response) } + func refNotAllowedInHeaderMsg(path, header, ref string) errors.Error { return errors.New(errors.CompositeErrorCode, RefNotAllowedInHeaderError, path, header, ref) } + func cannotResolveRefMsg(path, ref string, err error) errors.Error { return errors.New(errors.CompositeErrorCode, CannotResolveReferenceError, path, ref, err) } + func invalidParameterDefinitionMsg(path, method, operationID string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidParameterDefinitionError, path, method, operationID) } + func invalidParameterDefinitionAsSchemaMsg(path, method, operationID string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidParameterDefinitionAsSchemaError, path, method, operationID) } + func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error { return errors.New(errors.CompositeErrorCode, ParamValidationTypeMismatch, param, path, typ) } + func invalidObjectMsg(path, in string) errors.Error { return errors.New(errors.CompositeErrorCode, InvalidObject, path, in) } @@ -350,6 +400,7 @@ func invalidObjectMsg(path, in string) errors.Error { func someParametersBrokenMsg(path, method, operationID string) errors.Error { return errors.New(errors.CompositeErrorCode, SomeParametersBrokenError, path, method, operationID) } + func refShouldNotHaveSiblingsMsg(path, operationID string) errors.Error { return errors.New(errors.CompositeErrorCode, RefShouldNotHaveSiblingsWarning, operationID, path) } diff --git a/vendor/github.com/go-openapi/validate/type.go b/vendor/github.com/go-openapi/validate/type.go index 9b9ab8d917d..d29574c3498 100644 --- a/vendor/github.com/go-openapi/validate/type.go +++ b/vendor/github.com/go-openapi/validate/type.go @@ -71,7 +71,7 @@ func (t *typeValidator) Validate(data any) *Result { if data == nil { // nil or zero value for the passed structure require Type: null - if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // TODO: if a property is not required it also passes this + if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // NOTE: if a property is not required it also passes this return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType), t.Options.recycleResult) } @@ -86,15 +86,18 @@ func (t *typeValidator) Validate(data any) *Result { schType, format := t.schemaInfoForType(data) // check numerical types - // TODO: check unsigned ints - // TODO: check json.Number (see schema.go) + // Proposal for enhancement: check unsigned ints + // Proposal for enhancement: check json.Number (see schema.go) isLowerInt := t.Format == integerFormatInt64 && format == integerFormatInt32 isLowerFloat := t.Format == numberFormatFloat64 && format == numberFormatFloat32 isFloatInt := schType == numberType && conv.IsFloat64AJSONInteger(val.Float()) && t.Type.Contains(integerType) isIntFloat := schType == integerType && t.Type.Contains(numberType) - if kind != reflect.String && kind != reflect.Slice && t.Format != "" && !t.Type.Contains(schType) && format != t.Format && !isFloatInt && !isIntFloat && !isLowerInt && !isLowerFloat { - // TODO: test case + formatMismatch := kind != reflect.String && kind != reflect.Slice && + t.Format != "" && !t.Type.Contains(schType) && format != t.Format && + !isFloatInt && !isIntFloat && !isLowerInt && !isLowerFloat + if formatMismatch { + // NOTE: test case return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format), t.Options.recycleResult) } @@ -112,7 +115,7 @@ func (t *typeValidator) Validate(data any) *Result { func (t *typeValidator) schemaInfoForType(data any) (string, string) { // internal type to JSON type with swagger 2.0 format (with go-openapi/strfmt extensions), // see https://github.com/go-openapi/strfmt/blob/master/README.md - // TODO: this switch really is some sort of reverse lookup for formats. It should be provided by strfmt. + // NOTE: this switch really is some sort of reverse lookup for formats. It should be provided by strfmt. switch data.(type) { case []byte, strfmt.Base64, *strfmt.Base64: return stringType, stringFormatByte @@ -162,8 +165,8 @@ func (t *typeValidator) schemaInfoForType(data any) (string, string) { return stringType, stringFormatUUID4 case strfmt.UUID5, *strfmt.UUID5: return stringType, stringFormatUUID5 - // TODO: missing binary (io.ReadCloser) - // TODO: missing json.Number + // Proposal for enhancement: missing binary (io.ReadCloser) + // Proposal for enhancement: missing json.Number default: val := reflect.ValueOf(data) tpe := val.Type() diff --git a/vendor/github.com/go-openapi/validate/update-fixtures.sh b/vendor/github.com/go-openapi/validate/update-fixtures.sh index 21b06e2b09a..8ee55ca3b24 100644 --- a/vendor/github.com/go-openapi/validate/update-fixtures.sh +++ b/vendor/github.com/go-openapi/validate/update-fixtures.sh @@ -1,4 +1,6 @@ -#!/bin/bash +#!/bin/bash +# SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +# SPDX-License-Identifier: Apache-2.0 set -eu -o pipefail dir=$(git rev-parse --show-toplevel) diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go index 289a847fc7b..e7aebc52561 100644 --- a/vendor/github.com/go-openapi/validate/validator.go +++ b/vendor/github.com/go-openapi/validate/validator.go @@ -12,15 +12,15 @@ import ( "github.com/go-openapi/strfmt" ) -// An EntityValidator is an interface for things that can validate entities +// An EntityValidator is an interface for things that can validate entities. type EntityValidator interface { - Validate(any) *Result + Validate(data any) *Result } type valueValidator interface { SetPath(path string) - Applies(any, reflect.Kind) bool - Validate(any) *Result + Applies(source any, kind reflect.Kind) bool + Validate(data any) *Result } type itemsValidator struct { @@ -286,7 +286,7 @@ func (b *basicCommonValidator) redeem() { pools.poolOfBasicCommonValidators.RedeemValidator(b) } -// A HeaderValidator has very limited subset of validations to apply +// A HeaderValidator has very limited subset of validations to apply. type HeaderValidator struct { name string header *spec.Header @@ -295,7 +295,7 @@ type HeaderValidator struct { Options *SchemaValidatorOptions } -// NewHeaderValidator creates a new header validator object +// NewHeaderValidator creates a new header validator object. func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, options ...Option) *HeaderValidator { opts := new(SchemaValidatorOptions) for _, o := range options { @@ -340,7 +340,7 @@ func newHeaderValidator(name string, header *spec.Header, formats strfmt.Registr return p } -// Validate the value of the header against its schema +// Validate the value of the header against its schema. func (p *HeaderValidator) Validate(data any) *Result { if p.Options.recycleValidators { defer func() { @@ -479,7 +479,7 @@ func (p *HeaderValidator) redeemChildren() { } } -// A ParamValidator has very limited subset of validations to apply +// A ParamValidator has very limited subset of validations to apply. type ParamValidator struct { param *spec.Parameter validators [6]valueValidator @@ -487,7 +487,7 @@ type ParamValidator struct { Options *SchemaValidatorOptions } -// NewParamValidator creates a new param validator object +// NewParamValidator creates a new param validator object. func NewParamValidator(param *spec.Parameter, formats strfmt.Registry, options ...Option) *ParamValidator { opts := new(SchemaValidatorOptions) for _, o := range options { @@ -531,7 +531,7 @@ func newParamValidator(param *spec.Parameter, formats strfmt.Registry, opts *Sch return p } -// Validate the data against the description of the parameter +// Validate the data against the description of the parameter. func (p *ParamValidator) Validate(data any) *Result { if data == nil { return nil @@ -554,7 +554,7 @@ func (p *ParamValidator) Validate(data any) *Result { }() } - // TODO: validate type + // Proposal for enhancement: validate type for idx, validator := range p.validators { if !validator.Applies(p.param, kind) { if p.Options.recycleValidators { @@ -688,7 +688,8 @@ func newBasicSliceValidator( path, in string, def any, maxItems, minItems *int64, uniqueItems bool, items *spec.Items, source any, formats strfmt.Registry, - opts *SchemaValidatorOptions) *basicSliceValidator { + opts *SchemaValidatorOptions, +) *basicSliceValidator { if opts == nil { opts = new(SchemaValidatorOptions) } @@ -797,7 +798,8 @@ func newNumberValidator( path, in string, def any, multipleOf, maximum *float64, exclusiveMaximum bool, minimum *float64, exclusiveMinimum bool, typ, format string, - opts *SchemaValidatorOptions) *numberValidator { + opts *SchemaValidatorOptions, +) *numberValidator { if opts == nil { opts = new(SchemaValidatorOptions) } @@ -857,9 +859,9 @@ func (n *numberValidator) Applies(source any, kind reflect.Kind) bool { // // If this is the case, replace AddErrors() by AddWarnings() and IsValid() by !HasWarnings(). // -// TODO: consider replacing boundary check errors by simple warnings. +// Proposal for enhancement: consider replacing boundary check errors by simple warnings. // -// TODO: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?) +// NOTE: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?) func (n *numberValidator) Validate(val any) *Result { if n.Options.recycleValidators { defer func() { @@ -959,7 +961,8 @@ type stringValidator struct { func newStringValidator( path, in string, def any, required, allowEmpty bool, maxLength, minLength *int64, pattern string, - opts *SchemaValidatorOptions) *stringValidator { + opts *SchemaValidatorOptions, +) *stringValidator { if opts == nil { opts = new(SchemaValidatorOptions) } diff --git a/vendor/github.com/go-openapi/validate/values.go b/vendor/github.com/go-openapi/validate/values.go index e7dd5c8d3ab..2b80766bd50 100644 --- a/vendor/github.com/go-openapi/validate/values.go +++ b/vendor/github.com/go-openapi/validate/values.go @@ -21,15 +21,15 @@ func (e valueError) Error() string { return string(e) } -// ErrValue indicates that a value validation occurred +// ErrValue indicates that a value validation occurred. const ErrValue valueError = "value validation error" -// Enum validates if the data is a member of the enum +// Enum validates if the data is a member of the enum. func Enum(path, in string, data any, enum any) *errors.Validation { return EnumCase(path, in, data, enum, true) } -// EnumCase validates if the data is a member of the enum and may respect case-sensitivity for strings +// EnumCase validates if the data is a member of the enum and may respect case-sensitivity for strings. func EnumCase(path, in string, data any, enum any, caseSensitive bool) *errors.Validation { val := reflect.ValueOf(enum) if val.Kind() != reflect.Slice { @@ -66,7 +66,7 @@ func EnumCase(path, in string, data any, enum any, caseSensitive bool) *errors.V return errors.EnumFail(path, in, data, values) } -// convertEnumCaseStringKind converts interface if it is kind of string and case insensitivity is set +// convertEnumCaseStringKind converts interface if it is kind of string and case insensitivity is set. func convertEnumCaseStringKind(value any, caseSensitive bool) *string { if caseSensitive { return nil @@ -81,7 +81,7 @@ func convertEnumCaseStringKind(value any, caseSensitive bool) *string { return &str } -// MinItems validates that there are at least n items in a slice +// MinItems validates that there are at least n items in a slice. func MinItems(path, in string, size, minimum int64) *errors.Validation { if size < minimum { return errors.TooFewItems(path, in, minimum, size) @@ -89,7 +89,7 @@ func MinItems(path, in string, size, minimum int64) *errors.Validation { return nil } -// MaxItems validates that there are at most n items in a slice +// MaxItems validates that there are at most n items in a slice. func MaxItems(path, in string, size, maximum int64) *errors.Validation { if size > maximum { return errors.TooManyItems(path, in, maximum, size) @@ -97,7 +97,7 @@ func MaxItems(path, in string, size, maximum int64) *errors.Validation { return nil } -// UniqueItems validates that the provided slice has unique elements +// UniqueItems validates that the provided slice has unique elements. func UniqueItems(path, in string, data any) *errors.Validation { val := reflect.ValueOf(data) if val.Kind() != reflect.Slice { @@ -116,7 +116,7 @@ func UniqueItems(path, in string, data any) *errors.Validation { return nil } -// MinLength validates a string for minimum length +// MinLength validates a string for minimum length. func MinLength(path, in, data string, minLength int64) *errors.Validation { strLen := int64(utf8.RuneCountInString(data)) if strLen < minLength { @@ -125,7 +125,7 @@ func MinLength(path, in, data string, minLength int64) *errors.Validation { return nil } -// MaxLength validates a string for maximum length +// MaxLength validates a string for maximum length. func MaxLength(path, in, data string, maxLength int64) *errors.Validation { strLen := int64(utf8.RuneCountInString(data)) if strLen > maxLength { @@ -134,9 +134,8 @@ func MaxLength(path, in, data string, maxLength int64) *errors.Validation { return nil } -// ReadOnly validates an interface for readonly +// ReadOnly validates an interface for readonly. func ReadOnly(ctx context.Context, path, in string, data any) *errors.Validation { - // read only is only validated when operationType is request if op := extractOperationType(ctx); op != request { return nil @@ -155,7 +154,7 @@ func ReadOnly(ctx context.Context, path, in string, data any) *errors.Validation return errors.ReadOnly(path, in, data) } -// Required validates an interface for requiredness +// Required validates an interface for requiredness. func Required(path, in string, data any) *errors.Validation { val := reflect.ValueOf(data) if val.IsValid() { @@ -167,7 +166,7 @@ func Required(path, in string, data any) *errors.Validation { return errors.Required(path, in, data) } -// RequiredString validates a string for requiredness +// RequiredString validates a string for requiredness. func RequiredString(path, in, data string) *errors.Validation { if data == "" { return errors.Required(path, in, data) @@ -175,7 +174,7 @@ func RequiredString(path, in, data string) *errors.Validation { return nil } -// RequiredNumber validates a number for requiredness +// RequiredNumber validates a number for requiredness. func RequiredNumber(path, in string, data float64) *errors.Validation { if data == 0 { return errors.Required(path, in, data) @@ -183,7 +182,7 @@ func RequiredNumber(path, in string, data float64) *errors.Validation { return nil } -// Pattern validates a string against a regular expression +// Pattern validates a string against a regular expression. func Pattern(path, in, data, pattern string) *errors.Validation { re, err := compileRegexp(pattern) if err != nil { @@ -195,7 +194,7 @@ func Pattern(path, in, data, pattern string) *errors.Validation { return nil } -// MaximumInt validates if a number is smaller than a given maximum +// MaximumInt validates if a number is smaller than a given maximum. func MaximumInt(path, in string, data, maximum int64, exclusive bool) *errors.Validation { if (!exclusive && data > maximum) || (exclusive && data >= maximum) { return errors.ExceedsMaximumInt(path, in, maximum, exclusive, data) @@ -203,7 +202,7 @@ func MaximumInt(path, in string, data, maximum int64, exclusive bool) *errors.Va return nil } -// MaximumUint validates if a number is smaller than a given maximum +// MaximumUint validates if a number is smaller than a given maximum. func MaximumUint(path, in string, data, maximum uint64, exclusive bool) *errors.Validation { if (!exclusive && data > maximum) || (exclusive && data >= maximum) { return errors.ExceedsMaximumUint(path, in, maximum, exclusive, data) @@ -211,7 +210,7 @@ func MaximumUint(path, in string, data, maximum uint64, exclusive bool) *errors. return nil } -// Maximum validates if a number is smaller than a given maximum +// Maximum validates if a number is smaller than a given maximum. func Maximum(path, in string, data, maximum float64, exclusive bool) *errors.Validation { if (!exclusive && data > maximum) || (exclusive && data >= maximum) { return errors.ExceedsMaximum(path, in, maximum, exclusive, data) @@ -219,7 +218,7 @@ func Maximum(path, in string, data, maximum float64, exclusive bool) *errors.Val return nil } -// Minimum validates if a number is smaller than a given minimum +// Minimum validates if a number is smaller than a given minimum. func Minimum(path, in string, data, minimum float64, exclusive bool) *errors.Validation { if (!exclusive && data < minimum) || (exclusive && data <= minimum) { return errors.ExceedsMinimum(path, in, minimum, exclusive, data) @@ -227,7 +226,7 @@ func Minimum(path, in string, data, minimum float64, exclusive bool) *errors.Val return nil } -// MinimumInt validates if a number is smaller than a given minimum +// MinimumInt validates if a number is smaller than a given minimum. func MinimumInt(path, in string, data, minimum int64, exclusive bool) *errors.Validation { if (!exclusive && data < minimum) || (exclusive && data <= minimum) { return errors.ExceedsMinimumInt(path, in, minimum, exclusive, data) @@ -235,7 +234,7 @@ func MinimumInt(path, in string, data, minimum int64, exclusive bool) *errors.Va return nil } -// MinimumUint validates if a number is smaller than a given minimum +// MinimumUint validates if a number is smaller than a given minimum. func MinimumUint(path, in string, data, minimum uint64, exclusive bool) *errors.Validation { if (!exclusive && data < minimum) || (exclusive && data <= minimum) { return errors.ExceedsMinimumUint(path, in, minimum, exclusive, data) @@ -243,7 +242,7 @@ func MinimumUint(path, in string, data, minimum uint64, exclusive bool) *errors. return nil } -// MultipleOf validates if the provided number is a multiple of the factor +// MultipleOf validates if the provided number is a multiple of the factor. func MultipleOf(path, in string, data, factor float64) *errors.Validation { // multipleOf factor must be positive if factor <= 0 { @@ -261,7 +260,7 @@ func MultipleOf(path, in string, data, factor float64) *errors.Validation { return nil } -// MultipleOfInt validates if the provided integer is a multiple of the factor +// MultipleOfInt validates if the provided integer is a multiple of the factor. func MultipleOfInt(path, in string, data int64, factor int64) *errors.Validation { // multipleOf factor must be positive if factor <= 0 { @@ -274,7 +273,7 @@ func MultipleOfInt(path, in string, data int64, factor int64) *errors.Validation return nil } -// MultipleOfUint validates if the provided unsigned integer is a multiple of the factor +// MultipleOfUint validates if the provided unsigned integer is a multiple of the factor. func MultipleOfUint(path, in string, data, factor uint64) *errors.Validation { // multipleOf factor must be positive if factor == 0 { @@ -287,7 +286,7 @@ func MultipleOfUint(path, in string, data, factor uint64) *errors.Validation { return nil } -// FormatOf validates if a string matches a format in the format registry +// FormatOf validates if a string matches a format in the format registry. func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.Validation { if registry == nil { registry = strfmt.Default @@ -310,10 +309,10 @@ func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.V // NOTE: currently, the max value is marshalled as a float64, no matter what, // which means there may be a loss during conversions (e.g. for very large integers) // -// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +// NOTE: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free. func MaximumNativeType(path, in string, val any, maximum float64, exclusive bool) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { //nolint:exhaustive + switch kind { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MaximumInt(path, in, value, int64(maximum), exclusive) @@ -340,10 +339,10 @@ func MaximumNativeType(path, in string, val any, maximum float64, exclusive bool // NOTE: currently, the min value is marshalled as a float64, no matter what, // which means there may be a loss during conversions (e.g. for very large integers) // -// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +// NOTE: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free. func MinimumNativeType(path, in string, val any, minimum float64, exclusive bool) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { //nolint:exhaustive + switch kind { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MinimumInt(path, in, value, int64(minimum), exclusive) @@ -362,7 +361,7 @@ func MinimumNativeType(path, in string, val any, minimum float64, exclusive bool } // MultipleOfNativeType provides native type constraint validation as a facade -// to various numeric types version of MultipleOf constraint check. +// to various numeric types version of [MultipleOf] constraint check. // // Assumes that any possible loss conversion during conversion has been // checked beforehand. @@ -370,10 +369,10 @@ func MinimumNativeType(path, in string, val any, minimum float64, exclusive bool // NOTE: currently, the multipleOf factor is marshalled as a float64, no matter what, // which means there may be a loss during conversions (e.g. for very large integers) // -// TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free +// NOTE: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free. func MultipleOfNativeType(path, in string, val any, multipleOf float64) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { //nolint:exhaustive + switch kind { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MultipleOfInt(path, in, value, int64(multipleOf)) @@ -391,13 +390,13 @@ func MultipleOfNativeType(path, in string, val any, multipleOf float64) *errors. // IsValueValidAgainstRange checks that a numeric value is compatible with // the range defined by Type and Format, that is, may be converted without loss. // -// NOTE: this check is about type capacity and not formal verification such as: 1.0 != 1L +// NOTE: this check is about type capacity and not formal verification such as: 1.0 != 1L. func IsValueValidAgainstRange(val any, typeName, format, prefix, path string) error { kind := reflect.ValueOf(val).Type().Kind() // What is the string representation of val var stringRep string - switch kind { //nolint:exhaustive + switch kind { case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: stringRep = conv.FormatUinteger(valueHelp.asUint64(val)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: diff --git a/vendor/github.com/gophercloud/gophercloud/v2/.golangci.yaml b/vendor/github.com/gophercloud/gophercloud/v2/.golangci.yaml index 828a099a40c..2de1155c037 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/.golangci.yaml +++ b/vendor/github.com/gophercloud/gophercloud/v2/.golangci.yaml @@ -1,19 +1,34 @@ ---- +version: "2" linters: - disable-all: true + default: none enable: - errcheck - - gofmt - - goimports - govet - staticcheck - unparam - unused - -issues: - exclude: - - SA1006 # printf-style function with dynamic format string and no further arguments should use print-style function instead (staticcheck) - exclude-rules: - - linters: - - staticcheck - text: 'SA1019: (x509.EncryptPEMBlock|strings.Title)' + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - staticcheck + text: "SA1019: (x509.EncryptPEMBlock|strings.Title)" + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md index d53f641b8ad..388e8c50b91 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md +++ b/vendor/github.com/gophercloud/gophercloud/v2/CHANGELOG.md @@ -1,3 +1,15 @@ +## v2.11.0 (2026-03-04) + +* [GH-3602](https://github.com/gophercloud/gophercloud/pull/3602) [v2] Add PCIAddress field to baremetal InterfaceType +* [GH-3610](https://github.com/gophercloud/gophercloud/pull/3610) [v2] Networking V2: Added support for ML2 extension port_trusted_vif +* [GH-3611](https://github.com/gophercloud/gophercloud/pull/3611) [v2] networking/v2/layer3/routers: Add external gateways management +* [GH-3625](https://github.com/gophercloud/gophercloud/pull/3625) [v2] Use jimmy amphora in octavia job +* [GH-3629](https://github.com/gophercloud/gophercloud/pull/3629) [v2] Add a new Ironic field representing node health to Gophercloud +* [GH-3630](https://github.com/gophercloud/gophercloud/pull/3630) [v2] Bump go +* [GH-3632](https://github.com/gophercloud/gophercloud/pull/3632) [v2] CI: Fix fwaas jobs +* [GH-3633](https://github.com/gophercloud/gophercloud/pull/3633) [v2] Add TSIG key support for OpenStack DNS v2 API +* [GH-3640](https://github.com/gophercloud/gophercloud/pull/3640) [v2] fix: networkipavailabilities: handle scientific notation in IP counts + ## v2.10.0 (2026-01-05) * [GH-3569](https://github.com/gophercloud/gophercloud/pull/3569) identity/role: restore backward compatibility for description diff --git a/vendor/github.com/gophercloud/gophercloud/v2/Makefile b/vendor/github.com/gophercloud/gophercloud/v2/Makefile index c63adb8d031..79411c0026f 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/Makefile +++ b/vendor/github.com/gophercloud/gophercloud/v2/Makefile @@ -1,7 +1,7 @@ undefine GOFLAGS -GOLANGCI_LINT_VERSION?=v1.62.2 -GOTESTSUM_VERSION?=v1.12.2 +GOLANGCI_LINT_VERSION?=v2.5.0 +GOTESTSUM_VERSION?=v1.13.0 GO_TEST?=go run gotest.tools/gotestsum@$(GOTESTSUM_VERSION) --format testname -- TIMEOUT := "60m" @@ -78,6 +78,10 @@ acceptance-dns: $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/dns/... .PHONY: acceptance-dns +acceptance-fwaas: + $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/networking/v2/extensions/fwaas_v2/... +.PHONY: acceptance-fwaas + acceptance-identity: $(GO_TEST) -timeout $(TIMEOUT) -tags "fixtures acceptance" ./internal/acceptance/openstack/identity/... .PHONY: acceptance-identity diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go index 73ca5c56d50..a0e84a1e808 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/client.go @@ -105,7 +105,7 @@ func Authenticate(ctx context.Context, client *gophercloud.ProviderClient, optio return v3auth(ctx, client, endpoint, &options, gophercloud.EndpointOpts{}) default: // The switch statement must be out of date from the versions list. - return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) + return fmt.Errorf("unrecognized identity version: %s", chosen.ID) } } @@ -351,7 +351,7 @@ func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointO eo.ApplyDefaults(clientType) if eo.Version != 0 && eo.Version != version { - return sc, errors.New("Conflict between requested service major version and manually set version") + return sc, errors.New("conflict between requested service major version and manually set version") } eo.Version = version @@ -458,7 +458,7 @@ func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.Endpoi sc, err := initClientOpts(client, eo, "load-balancer", 2) // Fixes edge case having an OpenStack lb endpoint with trailing version number. - endpoint := strings.Replace(sc.Endpoint, "v2.0/", "", -1) + endpoint := strings.ReplaceAll(sc.Endpoint, "v2.0/", "") sc.ResourceBase = endpoint + "v2.0/" return sc, err diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/hypervisors/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/hypervisors/results.go index c73f239cd66..9b337eb5182 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/hypervisors/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/hypervisors/results.go @@ -195,7 +195,7 @@ func (r *Hypervisor) UnmarshalJSON(b []byte) error { case float64: r.HypervisorVersion = int(t) default: - return fmt.Errorf("Hypervisor version has unexpected type: %T", t) + return fmt.Errorf("HypervisorVersion has unexpected type: %T", t) } // free_disk_gb doesn't exist after api version 2.87 @@ -206,7 +206,7 @@ func (r *Hypervisor) UnmarshalJSON(b []byte) error { case float64: r.FreeDiskGB = int(t) default: - return fmt.Errorf("Free disk GB has unexpected type: %T", t) + return fmt.Errorf("FreeDiskGB has unexpected type: %T", t) } } @@ -218,7 +218,7 @@ func (r *Hypervisor) UnmarshalJSON(b []byte) error { case float64: r.LocalGB = int(t) default: - return fmt.Errorf("Local GB has unexpected type: %T", t) + return fmt.Errorf("LocalGB has unexpected type: %T", t) } } diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go index edc2740f685..b11b9b236b3 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers/results.go @@ -27,11 +27,11 @@ func (r serverResult) Extract() (*Server, error) { } func (r serverResult) ExtractInto(v any) error { - return r.Result.ExtractIntoStructPtr(v, "server") + return r.ExtractIntoStructPtr(v, "server") } func ExtractServersInto(r pagination.Page, v any) error { - return r.(ServerPage).Result.ExtractIntoSlicePtr(v, "servers") + return r.(ServerPage).ExtractIntoSlicePtr(v, "servers") } // CreateResult is the response from a Create operation. Call its Extract @@ -119,11 +119,11 @@ func decryptPassword(encryptedPassword string, privateKey *rsa.PrivateKey) (stri n, err := base64.StdEncoding.Decode(b64EncryptedPassword, []byte(encryptedPassword)) if err != nil { - return "", fmt.Errorf("Failed to base64 decode encrypted password: %s", err) + return "", fmt.Errorf("failed to base64 decode encrypted password: %s", err) } password, err := rsa.DecryptPKCS1v15(nil, privateKey, b64EncryptedPassword[0:n]) if err != nil { - return "", fmt.Errorf("Failed to decrypt password: %s", err) + return "", fmt.Errorf("failed to decrypt password: %s", err) } return string(password), nil @@ -159,7 +159,7 @@ func (r CreateImageResult) extractImageIDFromLocationHeader() (string, error) { imageID := path.Base(u.Path) if imageID == "." || imageID == "/" { - return "", fmt.Errorf("Failed to parse the ID of newly created image: %s", u) + return "", fmt.Errorf("failed to parse the ID of newly created image: %s", u) } return imageID, nil diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/results.go index 7ea6160032c..89fd5cf9cdc 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips/results.go @@ -111,7 +111,7 @@ func (r commonResult) Extract() (*FloatingIP, error) { } func (r commonResult) ExtractInto(v any) error { - return r.Result.ExtractIntoStructPtr(v, "floatingip") + return r.ExtractIntoStructPtr(v, "floatingip") } // CreateResult represents the result of a create operation. Call its Extract @@ -180,5 +180,5 @@ func ExtractFloatingIPs(r pagination.Page) ([]FloatingIP, error) { } func ExtractFloatingIPsInto(r pagination.Page, v any) error { - return r.(FloatingIPPage).Result.ExtractIntoSlicePtr(v, "floatingips") + return r.(FloatingIPPage).ExtractIntoSlicePtr(v, "floatingips") } diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go index db223d48c1d..83cc941389d 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports/results.go @@ -20,7 +20,7 @@ func (r commonResult) Extract() (*Port, error) { } func (r commonResult) ExtractInto(v any) error { - return r.Result.ExtractIntoStructPtr(v, "port") + return r.ExtractIntoStructPtr(v, "port") } // CreateResult represents the result of a create operation. Call its Extract @@ -202,5 +202,5 @@ func ExtractPorts(r pagination.Page) ([]Port, error) { } func ExtractPortsInto(r pagination.Page, v any) error { - return r.(PortPage).Result.ExtractIntoSlicePtr(v, "ports") + return r.(PortPage).ExtractIntoSlicePtr(v, "ports") } diff --git a/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go b/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go index ccc56345a67..b9298ad844b 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/openstack/utils/choose_version.go @@ -89,7 +89,7 @@ func ChooseVersion(ctx context.Context, client *gophercloud.ProviderClient, reco // Prefer a version that exactly matches the provided endpoint. if href == identityEndpoint { if href == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) + return nil, "", fmt.Errorf("endpoint missing in version %s response from %s", value.ID, client.IdentityBase) } return version, href, nil } @@ -106,10 +106,10 @@ func ChooseVersion(ctx context.Context, client *gophercloud.ProviderClient, reco } if highest == nil { - return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase) + return nil, "", fmt.Errorf("no supported version available from endpoint %s", client.IdentityBase) } if endpoint == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) + return nil, "", fmt.Errorf("endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) } return highest, endpoint, nil diff --git a/vendor/github.com/gophercloud/gophercloud/v2/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/v2/pagination/pager.go index 3581012566b..2eed649f784 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/pagination/pager.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/pagination/pager.go @@ -13,7 +13,7 @@ import ( var ( // ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist. - ErrPageNotAvailable = errors.New("The requested page does not exist.") + ErrPageNotAvailable = errors.New("the requested page does not exist") ) // Page must be satisfied by the result type of any resource collection. diff --git a/vendor/github.com/gophercloud/gophercloud/v2/params.go b/vendor/github.com/gophercloud/gophercloud/v2/params.go index 4a2ed6c9425..e2820336839 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/params.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/params.go @@ -109,7 +109,7 @@ func BuildRequestBody(opts any, parent string) (map[string]any, error) { } xorFieldIsZero = isZero(xorField) } - if !(zero != xorFieldIsZero) { + if zero == xorFieldIsZero { err := ErrMissingInput{} err.Argument = fmt.Sprintf("%s/%s", f.Name, xorTag) err.Info = fmt.Sprintf("Exactly one of %s and %s must be provided", f.Name, xorTag) @@ -219,12 +219,12 @@ func BuildRequestBody(opts any, parent string) (map[string]any, error) { optsMaps[i] = b } if parent == "" { - return nil, fmt.Errorf("Parent is required when passing an array or a slice.") + return nil, fmt.Errorf("parent is required when passing an array or a slice") } return map[string]any{parent: optsMaps}, nil } // Return an error if we can't work with the underlying type of 'opts' - return nil, fmt.Errorf("Options type is not a struct, a slice, or an array.") + return nil, fmt.Errorf("options type is not a struct, a slice, or an array") } // EnabledState is a convenience type, mostly used in Create and Update @@ -429,7 +429,7 @@ func BuildQueryString(opts any) (*url.URL, error) { } else { // if the field has a 'required' tag, it can't have a zero-value if requiredTag := f.Tag.Get("required"); requiredTag == "true" { - return &url.URL{}, fmt.Errorf("Required query parameter [%s] not set.", f.Name) + return &url.URL{}, fmt.Errorf("required query parameter [%s] not set", f.Name) } } } @@ -438,7 +438,7 @@ func BuildQueryString(opts any) (*url.URL, error) { return &url.URL{RawQuery: params.Encode()}, nil } // Return an error if the underlying type of 'opts' isn't a struct. - return nil, fmt.Errorf("Options type is not a struct.") + return nil, fmt.Errorf("options type is not a struct") } /* @@ -509,7 +509,7 @@ func BuildHeaders(opts any) (map[string]string, error) { } else { // if the field has a 'required' tag, it can't have a zero-value if requiredTag := f.Tag.Get("required"); requiredTag == "true" { - return optsMap, fmt.Errorf("Required header [%s] not set.", f.Name) + return optsMap, fmt.Errorf("required header [%s] not set", f.Name) } } } @@ -518,7 +518,7 @@ func BuildHeaders(opts any) (map[string]string, error) { return optsMap, nil } // Return an error if the underlying type of 'opts' isn't a struct. - return optsMap, fmt.Errorf("Options type is not a struct.") + return optsMap, fmt.Errorf("options type is not a struct") } // IDSliceToQueryString takes a slice of elements and converts them into a query diff --git a/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go b/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go index a9bde9457c0..26e2d8874c3 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/provider_client.go @@ -14,7 +14,7 @@ import ( // DefaultUserAgent is the default User-Agent string set in the request header. const ( - DefaultUserAgent = "gophercloud/v2.10.0" + DefaultUserAgent = "gophercloud/v2.11.0" DefaultMaxBackoffRetries = 60 ) diff --git a/vendor/github.com/gophercloud/gophercloud/v2/results.go b/vendor/github.com/gophercloud/gophercloud/v2/results.go index b12c15a0266..d5b947b7f0f 100644 --- a/vendor/github.com/gophercloud/gophercloud/v2/results.go +++ b/vendor/github.com/gophercloud/gophercloud/v2/results.go @@ -185,23 +185,23 @@ func (r Result) ExtractIntoStructPtr(to any, label string) error { } if to == nil { - return fmt.Errorf("Expected pointer, got %T", to) + return fmt.Errorf("expected pointer, got %T", to) } t := reflect.TypeOf(to) if k := t.Kind(); k != reflect.Ptr { - return fmt.Errorf("Expected pointer, got %v", k) + return fmt.Errorf("expected pointer, got %v", k) } if reflect.ValueOf(to).IsNil() { - return fmt.Errorf("Expected pointer, got %T", to) + return fmt.Errorf("expected pointer, got %T", to) } switch t.Elem().Kind() { case reflect.Struct: return r.extractIntoPtr(to, label) default: - return fmt.Errorf("Expected pointer to struct, got: %v", t) + return fmt.Errorf("expected pointer to struct, got: %v", t) } } @@ -220,23 +220,23 @@ func (r Result) ExtractIntoSlicePtr(to any, label string) error { } if to == nil { - return fmt.Errorf("Expected pointer, got %T", to) + return fmt.Errorf("expected pointer, got %T", to) } t := reflect.TypeOf(to) if k := t.Kind(); k != reflect.Ptr { - return fmt.Errorf("Expected pointer, got %v", k) + return fmt.Errorf("expected pointer, got %v", k) } if reflect.ValueOf(to).IsNil() { - return fmt.Errorf("Expected pointer, got %T", to) + return fmt.Errorf("expected pointer, got %T", to) } switch t.Elem().Kind() { case reflect.Slice: return r.extractIntoPtr(to, label) default: - return fmt.Errorf("Expected pointer to slice, got: %v", t) + return fmt.Errorf("expected pointer to slice, got: %v", t) } } diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 4528059ca68..804a2018167 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -31,6 +31,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm - id: "s2d" binary: s2d @@ -57,6 +60,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm - id: "s2sx" binary: s2sx @@ -84,6 +90,9 @@ builds: - mips64le goarm: - 7 + ignore: + - goos: windows + goarch: arm archives: - @@ -91,7 +100,7 @@ archives: name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" format_overrides: - goos: windows - format: zip + formats: ['zip'] files: - unpack/* - s2/LICENSE diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 5125c1f267e..e839fe9c60c 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -26,6 +26,12 @@ This package will support the current Go version and 2 versions back. Use the links above for more information on each. # changelog + +* Feb 9th, 2026 [1.18.4](https://github.com/klauspost/compress/releases/tag/v1.18.4) + * gzhttp: Add zstandard to server handler wrapper https://github.com/klauspost/compress/pull/1121 + * zstd: Add ResetWithOptions to encoder/decoder https://github.com/klauspost/compress/pull/1122 + * gzhttp: preserve qvalue when extra parameters follow in Accept-Encoding by @analytically in https://github.com/klauspost/compress/pull/1116 + * Jan 16th, 2026 [1.18.3](https://github.com/klauspost/compress/releases/tag/v1.18.3) * Downstream CVE-2025-61728. See [golang/go#77102](https://github.com/golang/go/issues/77102). @@ -691,3 +697,4 @@ This code is licensed under the same conditions as the original Go code. See LIC + diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index 5f901bd0fe8..4b312dea3e5 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -407,8 +407,8 @@ func histogramSplit(b []byte, h []uint16) { for i, t := range x { v0 := &h[t] v1 := &h[y[i]] - v3 := &h[w[i]] v2 := &h[z[i]] + v3 := &h[w[i]] *v0++ *v1++ *v2++ diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go index 1b7a2cbd793..e62caf711e4 100644 --- a/vendor/github.com/klauspost/compress/flate/regmask_other.go +++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go @@ -1,5 +1,4 @@ //go:build !amd64 -// +build !amd64 package flate diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index 99ddd4af97c..2d6ef64be15 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc // This file contains the specialisation of Decoder.Decompress4X // and Decoder.Decompress1X that use an asm implementation of thir main loops. diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go index 908c17de63f..61039232224 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm // This file contains a generic implementation of Decoder.Decompress4X. package huff0 diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go index e802579c4f9..b97f9056f4c 100644 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package cpuinfo diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go index c99d40b69d0..2905ba2774d 100644 --- a/vendor/github.com/klauspost/compress/s2/decode_other.go +++ b/vendor/github.com/klauspost/compress/s2/decode_other.go @@ -4,7 +4,6 @@ // license that can be found in the LICENSE file. //go:build (!amd64 && !arm64) || appengine || !gc || noasm -// +build !amd64,!arm64 appengine !gc noasm package s2 diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go index 7aadd255fe3..68d72a41d3a 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_amd64.go +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -1,5 +1,4 @@ //go:build !appengine && !noasm && gc -// +build !appengine,!noasm,gc package s2 diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go index e25b78445d7..5597f3ef2ec 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_go.go +++ b/vendor/github.com/klauspost/compress/s2/encode_go.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm package s2 diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index fd35ea1480a..0e33aea4422 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -78,6 +78,7 @@ func (b *blockEnc) initNewEncode() { b.recentOffsets = [3]uint32{1, 4, 8} b.litEnc.Reuse = huff0.ReusePolicyNone b.coders.setPrev(nil, nil, nil) + b.dictLitEnc = nil } // reset will reset the block for a new encode, but in the same stream, diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index c1192ec38f4..c4de134a7a4 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -21,7 +21,7 @@ type fastBase struct { crc *xxhash.Digest tmp [8]byte blk *blockEnc - lastDictID uint32 + lastDict *dict lowMem bool } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index c1581cfcb8b..851799322bd 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -479,10 +479,13 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { if d == nil { return } + dictChanged := d != e.lastDict // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || dictChanged { if len(e.dictTable) != len(e.table) { e.dictTable = make([]prevEntry, len(e.table)) + } else { + clear(e.dictTable) } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff; i < end; i += 4 { @@ -510,13 +513,14 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { offset: i + 3, } } - e.lastDictID = d.id } - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + // Init or copy dict long table + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]prevEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -538,8 +542,8 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { off++ } } - e.lastDictID = d.id } + e.lastDict = d // Reset table to initial state copy(e.longTable[:], e.dictLongTable) diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 85dcd28c32e..3305f09248c 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -1102,10 +1102,13 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { if d == nil { return } + dictChanged := d != e.lastDict // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || dictChanged { if len(e.dictTable) != len(e.table) { e.dictTable = make([]tableEntry, len(e.table)) + } else { + clear(e.dictTable) } end := int32(len(d.content)) - 8 + e.maxMatchOff for i := e.maxMatchOff; i < end; i += 4 { @@ -1133,14 +1136,15 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { offset: i + 3, } } - e.lastDictID = d.id e.allDirty = true } - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + // Init or copy dict long table + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]prevEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -1162,9 +1166,9 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { off++ } } - e.lastDictID = d.id e.allDirty = true } + e.lastDict = d // Reset table to initial state { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index cf8cad00dcf..2fb6da112bc 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -1040,15 +1040,18 @@ func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { // ResetDict will reset and set a dictionary if not nil func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { allDirty := e.allDirty + dictChanged := d != e.lastDict e.fastEncoderDict.Reset(d, singleBlock) if d == nil { return } // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) || dictChanged { if len(e.dictLongTable) != len(e.longTable) { e.dictLongTable = make([]tableEntry, len(e.longTable)) + } else { + clear(e.dictLongTable) } if len(d.content) >= 8 { cv := load6432(d.content, 0) @@ -1065,7 +1068,6 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } } } - e.lastDictID = d.id allDirty = true } // Reset table to initial state diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 9180a3a5820..5e104f1a482 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -805,9 +805,11 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) || d != e.lastDict { if len(e.dictTable) != len(e.table) { e.dictTable = make([]tableEntry, len(e.table)) + } else { + clear(e.dictTable) } if true { end := e.maxMatchOff + int32(len(d.content)) - 8 @@ -827,7 +829,7 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } } } - e.lastDictID = d.id + e.lastDict = d e.allDirty = true } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 19e730acc26..0f2a00a0033 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -138,11 +138,18 @@ func (e *Encoder) Reset(w io.Writer) { func (e *Encoder) ResetWithOptions(w io.Writer, opts ...EOption) error { e.o.resetOpt = true defer func() { e.o.resetOpt = false }() + hadDict := e.o.dict != nil for _, o := range opts { if err := o(&e.o); err != nil { return err } } + hasDict := e.o.dict != nil + if hadDict != hasDict { + // Dict presence changed — encoder type must be recreated. + e.state.encoder = nil + e.init = sync.Once{} + } e.Reset(w) return nil } @@ -448,6 +455,12 @@ func (e *Encoder) Close() error { if s.encoder == nil { return nil } + if s.w == nil { + if len(s.filling) == 0 && !s.headerWritten && !s.eofWritten && s.nInput == 0 { + return nil + } + return errors.New("zstd: encoder has no writer") + } err := e.nextBlock(true) if err != nil { if errors.Is(s.err, ErrEncoderClosed) { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 8e0f5cac71b..e217be0a17a 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -42,6 +42,7 @@ func (o *encoderOptions) setDefault() { level: SpeedDefault, allLitEntropy: false, lowMem: false, + fullZero: true, } } diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go index d04a829b0a0..b8c8607b5df 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 8adfebb0297..2138f8091a9 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 0be16cefc7f..9576426e686 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -1,5 +1,4 @@ //go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm package xxhash diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go index f41932b7a4f..1ed18927f95 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go index bea1779e973..379746c96ca 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 1f8c3cec28c..18c3703ddc9 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -1,5 +1,4 @@ //go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc package zstd diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go index 7cec2197cd9..516cd9b0701 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -1,5 +1,4 @@ //go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm package zstd diff --git a/vendor/github.com/knadh/koanf/v2/koanf.go b/vendor/github.com/knadh/koanf/v2/koanf.go index f7475d75171..1b871057305 100644 --- a/vendor/github.com/knadh/koanf/v2/koanf.go +++ b/vendor/github.com/knadh/koanf/v2/koanf.go @@ -434,15 +434,25 @@ func (ko *Koanf) Delim() string { func (ko *Koanf) merge(c map[string]any, opts *options) error { ko.mu.Lock() - defer ko.mu.Unlock() maps.IntfaceKeysToStrings(c) if opts.merge != nil { - if err := opts.merge(c, ko.confMap); err != nil { + // Deep-copy confMap so the custom merge function can safely call + // ko.Get*() methods (which acquire a read lock) without deadlocking. + dest := maps.Copy(ko.confMap) + + ko.mu.Unlock() + err := opts.merge(c, dest) + ko.mu.Lock() + + if err != nil { + ko.mu.Unlock() return err } + ko.confMap = dest } else if ko.conf.StrictMerge { if err := maps.MergeStrict(c, ko.confMap); err != nil { + ko.mu.Unlock() return err } } else { @@ -453,6 +463,7 @@ func (ko *Koanf) merge(c map[string]any, opts *options) error { ko.confMapFlat, ko.keyMap = maps.Flatten(ko.confMap, nil, ko.conf.Delim) ko.keyMap = populateKeyParts(ko.keyMap, ko.conf.Delim) + ko.mu.Unlock() return nil } diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/LICENSE b/vendor/github.com/moby/moby/api/LICENSE similarity index 99% rename from vendor/github.com/puzpuzpuz/xsync/v3/LICENSE rename to vendor/github.com/moby/moby/api/LICENSE index 261eeb9e9f8..d6456956733 100644 --- a/vendor/github.com/puzpuzpuz/xsync/v3/LICENSE +++ b/vendor/github.com/moby/moby/api/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go b/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go new file mode 100644 index 00000000000..931ae10ab1e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/moby/moby/api/types/build/build.go b/vendor/github.com/moby/moby/api/types/build/build.go new file mode 100644 index 00000000000..db983977340 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/build/build.go @@ -0,0 +1,16 @@ +package build + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit BuilderVersion = "2" +) + +// Result contains the image id of a successful build. +type Result struct { + ID string +} diff --git a/vendor/github.com/moby/moby/api/types/build/cache.go b/vendor/github.com/moby/moby/api/types/build/cache.go new file mode 100644 index 00000000000..39dd23a5f3a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/build/cache.go @@ -0,0 +1,35 @@ +package build + +import ( + "time" +) + +// CacheRecord contains information about a build cache record. +type CacheRecord struct { + // ID is the unique ID of the build cache record. + ID string + // Parents is the list of parent build cache record IDs. + Parents []string `json:" Parents,omitempty"` + // Type is the cache record type. + Type string + // Description is a description of the build-step that produced the build cache. + Description string + // InUse indicates if the build cache is in use. + InUse bool + // Shared indicates if the build cache is shared. + Shared bool + // Size is the amount of disk space used by the build cache (in bytes). + Size int64 + // CreatedAt is the date and time at which the build cache was created. + CreatedAt time.Time + // LastUsedAt is the date and time at which the build cache was last used. + LastUsedAt *time.Time + UsageCount int +} + +// CachePruneReport contains the response for Engine API: +// POST "/build/prune" +type CachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} diff --git a/vendor/github.com/moby/moby/api/types/build/disk_usage.go b/vendor/github.com/moby/moby/api/types/build/disk_usage.go new file mode 100644 index 00000000000..3613797dba7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/build/disk_usage.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package build + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DiskUsage represents system data usage for build cache resources. +// +// swagger:model DiskUsage +type DiskUsage struct { + + // Count of active build cache records. + // + // Example: 1 + ActiveCount int64 `json:"ActiveCount,omitempty"` + + // List of build cache records. + // + Items []CacheRecord `json:"Items,omitempty"` + + // Disk space that can be reclaimed by removing inactive build cache records. + // + // Example: 12345678 + Reclaimable int64 `json:"Reclaimable,omitempty"` + + // Count of all build cache records. + // + // Example: 4 + TotalCount int64 `json:"TotalCount,omitempty"` + + // Disk space in use by build cache records. + // + // Example: 98765432 + TotalSize int64 `json:"TotalSize,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/checkpoint/create_request.go b/vendor/github.com/moby/moby/api/types/checkpoint/create_request.go new file mode 100644 index 00000000000..c363783f2a6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/checkpoint/create_request.go @@ -0,0 +1,8 @@ +package checkpoint + +// CreateRequest holds parameters to create a checkpoint from a container. +type CreateRequest struct { + CheckpointID string + CheckpointDir string + Exit bool +} diff --git a/vendor/github.com/moby/moby/api/types/checkpoint/list.go b/vendor/github.com/moby/moby/api/types/checkpoint/list.go new file mode 100644 index 00000000000..94a9c0a47d3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/checkpoint/list.go @@ -0,0 +1,7 @@ +package checkpoint + +// Summary represents the details of a checkpoint when listing endpoints. +type Summary struct { + // Name is the name of the checkpoint. + Name string +} diff --git a/vendor/github.com/moby/moby/api/types/common/error_response.go b/vendor/github.com/moby/moby/api/types/common/error_response.go new file mode 100644 index 00000000000..b49d3eea0a2 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/common/error_response.go @@ -0,0 +1,17 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package common + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// Example: {"message":"Something went wrong."} +// +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/moby/moby/api/types/common/error_response_ext.go b/vendor/github.com/moby/moby/api/types/common/error_response_ext.go new file mode 100644 index 00000000000..c92dfe4b12e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/common/error_response_ext.go @@ -0,0 +1,6 @@ +package common + +// Error returns the error message +func (e ErrorResponse) Error() string { + return e.Message +} diff --git a/vendor/github.com/moby/moby/api/types/common/id_response.go b/vendor/github.com/moby/moby/api/types/common/id_response.go new file mode 100644 index 00000000000..7dfe4bf12b5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/common/id_response.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package common + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// +// swagger:model IDResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/change_type.go b/vendor/github.com/moby/moby/api/types/container/change_type.go new file mode 100644 index 00000000000..52fc99235b1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/change_type.go @@ -0,0 +1,17 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ChangeType Kind of change +// +// Can be one of: +// +// - `0`: Modified ("C") +// - `1`: Added ("A") +// - `2`: Deleted ("D") +// +// swagger:model ChangeType +type ChangeType uint8 diff --git a/vendor/github.com/moby/moby/api/types/container/change_types.go b/vendor/github.com/moby/moby/api/types/container/change_types.go new file mode 100644 index 00000000000..3a3a83866ec --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/change_types.go @@ -0,0 +1,23 @@ +package container + +const ( + // ChangeModify represents the modify operation. + ChangeModify ChangeType = 0 + // ChangeAdd represents the add operation. + ChangeAdd ChangeType = 1 + // ChangeDelete represents the delete operation. + ChangeDelete ChangeType = 2 +) + +func (ct ChangeType) String() string { + switch ct { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + default: + return "" + } +} diff --git a/vendor/github.com/moby/moby/api/types/container/commit.go b/vendor/github.com/moby/moby/api/types/container/commit.go new file mode 100644 index 00000000000..c5aab26ff4f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/commit.go @@ -0,0 +1,7 @@ +package container + +import "github.com/moby/moby/api/types/common" + +// CommitResponse response for the commit API call, containing the ID of the +// image that was produced. +type CommitResponse = common.IDResponse diff --git a/vendor/github.com/moby/moby/api/types/container/config.go b/vendor/github.com/moby/moby/api/types/container/config.go new file mode 100644 index 00000000000..78fa9f9105b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/config.go @@ -0,0 +1,50 @@ +package container + +import ( + "time" + + dockerspec "github.com/moby/docker-image-spec/specs-go/v1" + "github.com/moby/moby/api/types/network" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig = dockerspec.HealthcheckConfig + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts network.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd []string // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint []string // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + OnBuild []string `json:",omitempty"` // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell []string `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/moby/moby/api/types/container/container.go b/vendor/github.com/moby/moby/api/types/container/container.go new file mode 100644 index 00000000000..bffb3de8727 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/container.go @@ -0,0 +1,151 @@ +package container + +import ( + "os" + "time" + + "github.com/moby/moby/api/types/mount" + "github.com/moby/moby/api/types/storage" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// PruneReport contains the response for Engine API: +// POST "/containers/prune" +type PruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// PathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type PathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + // Type is the type of mount, see [mount.Type] definitions for details. + Type mount.Type `json:",omitempty"` + + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name. + Name string `json:",omitempty"` + + // Source is the source location of the mount. + // + // For volumes, this contains the storage location of the volume (within + // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + // the source (host) part of the bind-mount. For `tmpfs` mount points, this + // field is empty. + Source string + + // Destination is the path relative to the container root (`/`) where the + // Source is mounted inside the container. + Destination string + + // Driver is the volume driver used to create the volume (if it is a volume). + Driver string `json:",omitempty"` + + // Mode is a comma separated list of options supplied by the user when + // creating the bind/volume mount. + // + // The default is platform-specific (`"z"` on Linux, empty on Windows). + Mode string + + // RW indicates whether the mount is mounted writable (read-write). + RW bool + + // Propagation describes how mounts are propagated from the host into the + // mount point, and vice-versa. Refer to the Linux kernel documentation + // for details: + // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // + // This field is not used on Windows. + Propagation mount.Propagation +} + +// State stores container's running state +// it's part of ContainerJSONBase and returned by "inspect" command +type State struct { + Status ContainerState // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// Summary contains response of Engine API: +// GET "/containers/json" +type Summary struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"` + Command string + Created int64 + Ports []PortSummary + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State ContainerState + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + Annotations map[string]string `json:",omitempty"` + } + Health *HealthSummary `json:",omitempty"` + NetworkSettings *NetworkSettingsSummary + Mounts []MountPoint +} + +// InspectResponse is the response for the GET "/containers/{name:.*}/json" +// endpoint. +type InspectResponse struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *State + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *HostConfig + + // GraphDriver contains information about the container's graph driver. + GraphDriver *storage.DriverData `json:"GraphDriver,omitempty"` + + // Storage contains information about the storage used for the container's filesystem. + Storage *storage.Storage `json:"Storage,omitempty"` + + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` + Mounts []MountPoint + Config *Config + NetworkSettings *NetworkSettings + // ImageManifestDescriptor is the descriptor of a platform-specific manifest of the image used to create the container. + ImageManifestDescriptor *ocispec.Descriptor `json:"ImageManifestDescriptor,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/create_request.go b/vendor/github.com/moby/moby/api/types/container/create_request.go new file mode 100644 index 00000000000..decb208af0e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/create_request.go @@ -0,0 +1,13 @@ +package container + +import "github.com/moby/moby/api/types/network" + +// CreateRequest is the request message sent to the server for container +// create calls. It is a config wrapper that holds the container [Config] +// (portable) and the corresponding [HostConfig] (non-portable) and +// [network.NetworkingConfig]. +type CreateRequest struct { + *Config + HostConfig *HostConfig `json:"HostConfig,omitempty"` + NetworkingConfig *network.NetworkingConfig `json:"NetworkingConfig,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/create_response.go b/vendor/github.com/moby/moby/api/types/container/create_response.go new file mode 100644 index 00000000000..39d761aa968 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/create_response.go @@ -0,0 +1,24 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse ContainerCreateResponse +// +// # OK response to ContainerCreate operation +// +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created container + // Example: ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743 + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Example: [] + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/disk_usage.go b/vendor/github.com/moby/moby/api/types/container/disk_usage.go new file mode 100644 index 00000000000..c36721d3b00 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/disk_usage.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DiskUsage represents system data usage information for container resources. +// +// swagger:model DiskUsage +type DiskUsage struct { + + // Count of active containers. + // + // Example: 1 + ActiveCount int64 `json:"ActiveCount,omitempty"` + + // List of container summaries. + // + Items []Summary `json:"Items,omitempty"` + + // Disk space that can be reclaimed by removing inactive containers. + // + // Example: 12345678 + Reclaimable int64 `json:"Reclaimable,omitempty"` + + // Count of all containers. + // + // Example: 4 + TotalCount int64 `json:"TotalCount,omitempty"` + + // Disk space in use by containers. + // + // Example: 98765432 + TotalSize int64 `json:"TotalSize,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/errors.go b/vendor/github.com/moby/moby/api/types/container/errors.go new file mode 100644 index 00000000000..32c978037ea --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/errors.go @@ -0,0 +1,9 @@ +package container + +type errInvalidParameter struct{ error } + +func (e *errInvalidParameter) InvalidParameter() {} + +func (e *errInvalidParameter) Unwrap() error { + return e.error +} diff --git a/vendor/github.com/moby/moby/api/types/container/exec.go b/vendor/github.com/moby/moby/api/types/container/exec.go new file mode 100644 index 00000000000..6895926aefe --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/exec.go @@ -0,0 +1,35 @@ +package container + +import "github.com/moby/moby/api/types/common" + +// ExecCreateResponse is the response for a successful exec-create request. +// It holds the ID of the exec that was created. +// +// TODO(thaJeztah): make this a distinct type. +type ExecCreateResponse = common.IDResponse + +// ExecInspectResponse is the API response for the "GET /exec/{id}/json" +// endpoint and holds information about and exec. +type ExecInspectResponse struct { + ID string `json:"ID"` + Running bool `json:"Running"` + ExitCode *int `json:"ExitCode"` + ProcessConfig *ExecProcessConfig + OpenStdin bool `json:"OpenStdin"` + OpenStderr bool `json:"OpenStderr"` + OpenStdout bool `json:"OpenStdout"` + CanRemove bool `json:"CanRemove"` + ContainerID string `json:"ContainerID"` + DetachKeys []byte `json:"DetachKeys"` + Pid int `json:"Pid"` +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/exec_create_request.go b/vendor/github.com/moby/moby/api/types/container/exec_create_request.go new file mode 100644 index 00000000000..dd7437cd2f3 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/exec_create_request.go @@ -0,0 +1,17 @@ +package container + +// ExecCreateRequest is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecCreateRequest struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} diff --git a/vendor/github.com/moby/moby/api/types/container/exec_start_request.go b/vendor/github.com/moby/moby/api/types/container/exec_start_request.go new file mode 100644 index 00000000000..4c2ba0a77c1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/exec_start_request.go @@ -0,0 +1,12 @@ +package container + +// ExecStartRequest is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartRequest struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool + // Terminal size [height, width], unused if Tty == false + ConsoleSize *[2]uint `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/filesystem_change.go b/vendor/github.com/moby/moby/api/types/container/filesystem_change.go new file mode 100644 index 00000000000..b9ec83e5219 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/filesystem_change.go @@ -0,0 +1,21 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// FilesystemChange Change in the container's filesystem. +// +// swagger:model FilesystemChange +type FilesystemChange struct { + + // kind + // Required: true + Kind ChangeType `json:"Kind"` + + // Path to file or directory that has changed. + // + // Required: true + Path string `json:"Path"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/health.go b/vendor/github.com/moby/moby/api/types/container/health.go new file mode 100644 index 00000000000..1a1ba84b40d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/health.go @@ -0,0 +1,57 @@ +package container + +import ( + "fmt" + "strings" + "time" +) + +// HealthStatus is a string representation of the container's health. +type HealthStatus string + +// Health states +const ( + NoHealthcheck HealthStatus = "none" // Indicates there is no healthcheck + Starting HealthStatus = "starting" // Starting indicates that the container is not yet ready + Healthy HealthStatus = "healthy" // Healthy indicates that the container is running correctly + Unhealthy HealthStatus = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status HealthStatus // Status is one of [Starting], [Healthy] or [Unhealthy]. + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// HealthSummary stores a summary of the container's healthcheck results. +type HealthSummary struct { + Status HealthStatus // Status is one of [NoHealthcheck], [Starting], [Healthy] or [Unhealthy]. + FailingStreak int // FailingStreak is the number of consecutive failures +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +var validHealths = []string{ + string(NoHealthcheck), + string(Starting), + string(Healthy), + string(Unhealthy), +} + +// ValidateHealthStatus checks if the provided string is a valid +// container [HealthStatus]. +func ValidateHealthStatus(s HealthStatus) error { + switch s { + case NoHealthcheck, Starting, Healthy, Unhealthy: + return nil + default: + return errInvalidParameter{error: fmt.Errorf("invalid value for health (%s): must be one of %s", s, strings.Join(validHealths, ", "))} + } +} diff --git a/vendor/github.com/moby/moby/api/types/container/hostconfig.go b/vendor/github.com/moby/moby/api/types/container/hostconfig.go new file mode 100644 index 00000000000..0f889c65124 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig.go @@ -0,0 +1,495 @@ +package container + +import ( + "errors" + "fmt" + "net/netip" + "strings" + + "github.com/docker/go-units" + "github.com/moby/moby/api/types/blkiodev" + "github.com/moby/moby/api/types/mount" + "github.com/moby/moby/api/types/network" +) + +// CgroupnsMode represents the cgroup namespace mode of the container +type CgroupnsMode string + +// cgroup namespace modes for containers +const ( + CgroupnsModeEmpty CgroupnsMode = "" + CgroupnsModePrivate CgroupnsMode = "private" + CgroupnsModeHost CgroupnsMode = "host" +) + +// IsPrivate indicates whether the container uses its own private cgroup namespace +func (c CgroupnsMode) IsPrivate() bool { + return c == CgroupnsModePrivate +} + +// IsHost indicates whether the container shares the host's cgroup namespace +func (c CgroupnsMode) IsHost() bool { + return c == CgroupnsModeHost +} + +// IsEmpty indicates whether the container cgroup namespace mode is unset +func (c CgroupnsMode) IsEmpty() bool { + return c == CgroupnsModeEmpty +} + +// Valid indicates whether the cgroup namespace mode is valid +func (c CgroupnsMode) Valid() bool { + return c.IsEmpty() || c.IsPrivate() || c.IsHost() +} + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// Isolation modes for containers +const ( + IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) + IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon + IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode + IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode +) + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + // TODO consider making isolation-mode strict (case-sensitive) + v := Isolation(strings.ToLower(string(i))) + return v == IsolationDefault || v == IsolationEmpty +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationHyperV +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationProcess +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IpcMode constants +const ( + IPCModeNone IpcMode = "none" + IPCModeHost IpcMode = "host" + IPCModeContainer IpcMode = "container" + IPCModePrivate IpcMode = "private" + IPCModeShareable IpcMode = "shareable" +) + +// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. +func (n IpcMode) IsPrivate() bool { + return n == IPCModePrivate +} + +// IsHost indicates whether the container shares the host's ipc namespace. +func (n IpcMode) IsHost() bool { + return n == IPCModeHost +} + +// IsShareable indicates whether the container's ipc namespace can be shared with another container. +func (n IpcMode) IsShareable() bool { + return n == IPCModeShareable +} + +// IsContainer indicates whether the container uses another container's ipc namespace. +func (n IpcMode) IsContainer() bool { + _, ok := containerID(string(n)) + return ok +} + +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == IPCModeNone +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. +func (n IpcMode) Valid() bool { + // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() (idOrName string) { + idOrName, _ = containerID(string(n)) + return idOrName +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == network.NetworkNone +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == network.NetworkDefault +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !n.IsHost() && !n.IsContainer() +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + _, ok := containerID(string(n)) + return ok +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() (idOrName string) { + idOrName, _ = containerID(string(n)) + return idOrName +} + +// UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !n.IsHost() +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + return n == "" || n.IsHost() +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + _, ok := containerID(string(c)) + return ok +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. + return c == "" || c.IsContainer() +} + +// Container returns the ID or name of the container whose cgroup will be used. +func (c CgroupSpec) Container() (idOrName string) { + idOrName, _ = containerID(string(c)) + return idOrName +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !n.IsHost() +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + return n == "" || n.IsHost() +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !n.IsHost() && !n.IsContainer() +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + _, ok := containerID(string(n)) + return ok +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + return n == "" || n.IsHost() || validContainer(string(n)) +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() (idOrName string) { + idOrName, _ = containerID(string(n)) + return idOrName +} + +// DeviceRequest represents a request for devices from a device driver. +// Used by GPU device drivers. +type DeviceRequest struct { + Driver string // Name of device driver + Count int // Number of devices to request (-1 = All) + DeviceIDs []string // List of device IDs as recognizable by the device driver + Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") + Options map[string]string // Options to pass onto the device driver +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name RestartPolicyMode + MaximumRetryCount int +} + +type RestartPolicyMode string + +const ( + RestartPolicyDisabled RestartPolicyMode = "no" + RestartPolicyAlways RestartPolicyMode = "always" + RestartPolicyOnFailure RestartPolicyMode = "on-failure" + RestartPolicyUnlessStopped RestartPolicyMode = "unless-stopped" +) + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == RestartPolicyDisabled || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == RestartPolicyAlways +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == RestartPolicyOnFailure +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == RestartPolicyUnlessStopped +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// ValidateRestartPolicy validates the given RestartPolicy. +func ValidateRestartPolicy(policy RestartPolicy) error { + switch policy.Name { + case RestartPolicyAlways, RestartPolicyUnlessStopped, RestartPolicyDisabled: + if policy.MaximumRetryCount != 0 { + msg := "invalid restart policy: maximum retry count can only be used with 'on-failure'" + if policy.MaximumRetryCount < 0 { + msg += " and cannot be negative" + } + return &errInvalidParameter{errors.New(msg)} + } + return nil + case RestartPolicyOnFailure: + if policy.MaximumRetryCount < 0 { + return &errInvalidParameter{errors.New("invalid restart policy: maximum retry count cannot be negative")} + } + return nil + case "": + // Versions before v25.0.0 created an empty restart-policy "name" as + // default. Allow an empty name with "any" MaximumRetryCount for + // backward-compatibility. + return nil + default: + return &errInvalidParameter{fmt.Errorf("invalid restart policy: unknown policy '%s'; use one of '%s', '%s', '%s', or '%s'", policy.Name, RestartPolicyDisabled, RestartPolicyAlways, RestartPolicyOnFailure, RestartPolicyUnlessStopped)} + } +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset LogMode = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Ulimit is an alias for [units.Ulimit], which may be moving to a different +// location or become a local type. This alias is to help transitioning. +// +// Users are recommended to use this alias instead of using [units.Ulimit] directly. +type Ulimit = units.Ulimit + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DeviceRequests []DeviceRequest // List of device requests for device drivers + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings network.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + ConsoleSize [2]uint // Initial console size (height,width) + Annotations map[string]string `json:",omitempty"` // Arbitrary non-identifying metadata attached to container and provided to the runtime + + // Applicable to UNIX platforms + CapAdd []string // List of kernel capabilities to add to the container + CapDrop []string // List of kernel capabilities to remove from the container + CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container + DNS []netip.Addr `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} + +// containerID splits "container:" values. It returns the container +// ID or name, and whether an ID/name was found. It returns an empty string and +// a "false" if the value does not have a "container:" prefix. Further validation +// of the returned, including checking if the value is empty, should be handled +// by the caller. +func containerID(val string) (idOrName string, ok bool) { + k, v, hasSep := strings.Cut(val, ":") + if !hasSep || k != "container" { + return "", false + } + return v, true +} + +// validContainer checks if the given value is a "container:" mode with +// a non-empty name/ID. +func validContainer(val string) bool { + id, ok := containerID(val) + return ok && id != "" +} diff --git a/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go b/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go new file mode 100644 index 00000000000..326a5da7ebe --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig_unix.go @@ -0,0 +1,45 @@ +//go:build !windows + +package container + +import "github.com/moby/moby/api/types/network" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == network.NetworkBridge +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == network.NetworkHost +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + switch { + case n.IsDefault(): + return network.NetworkDefault + case n.IsBridge(): + return network.NetworkBridge + case n.IsHost(): + return network.NetworkHost + case n.IsNone(): + return network.NetworkNone + case n.IsContainer(): + return "container" + case n.IsUserDefined(): + return n.UserDefined() + default: + return "" + } +} diff --git a/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go b/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go new file mode 100644 index 00000000000..977a3760237 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/hostconfig_windows.go @@ -0,0 +1,47 @@ +package container + +import "github.com/moby/moby/api/types/network" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == network.NetworkNat +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + switch { + case n.IsDefault(): + return network.NetworkDefault + case n.IsBridge(): + return network.NetworkNat + case n.IsHost(): + // Windows currently doesn't support host network-mode, so + // this would currently never happen.. + return network.NetworkHost + case n.IsNone(): + return network.NetworkNone + case n.IsContainer(): + return "container" + case n.IsUserDefined(): + return n.UserDefined() + default: + return "" + } +} diff --git a/vendor/github.com/moby/moby/api/types/container/network_settings.go b/vendor/github.com/moby/moby/api/types/container/network_settings.go new file mode 100644 index 00000000000..c51c0839d23 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/network_settings.go @@ -0,0 +1,22 @@ +package container + +import ( + "github.com/moby/moby/api/types/network" +) + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + SandboxID string // SandboxID uniquely represents a container's network stack + SandboxKey string // SandboxKey identifies the sandbox + + // Ports is a collection of [network.PortBinding] indexed by [network.Port] + Ports network.PortMap + + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsSummary provides a summary of container's networks +// in /containers/json +type NetworkSettingsSummary struct { + Networks map[string]*network.EndpointSettings +} diff --git a/vendor/github.com/moby/moby/api/types/container/port_summary.go b/vendor/github.com/moby/moby/api/types/container/port_summary.go new file mode 100644 index 00000000000..68148eece46 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/port_summary.go @@ -0,0 +1,33 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// PortSummary Describes a port-mapping between the container and the host. +// +// Example: {"PrivatePort":8080,"PublicPort":80,"Type":"tcp"} +// +// swagger:model PortSummary +type PortSummary struct { + + // Host IP address that the container's port is mapped to + IP netip.Addr `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + // Enum: ["tcp","udp","sctp"] + Type string `json:"Type"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/state.go b/vendor/github.com/moby/moby/api/types/container/state.go new file mode 100644 index 00000000000..47c6d124902 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/state.go @@ -0,0 +1,40 @@ +package container + +import ( + "fmt" + "strings" +) + +// ContainerState is a string representation of the container's current state. +type ContainerState string + +const ( + StateCreated ContainerState = "created" // StateCreated indicates the container is created, but not (yet) started. + StateRunning ContainerState = "running" // StateRunning indicates that the container is running. + StatePaused ContainerState = "paused" // StatePaused indicates that the container's current state is paused. + StateRestarting ContainerState = "restarting" // StateRestarting indicates that the container is currently restarting. + StateRemoving ContainerState = "removing" // StateRemoving indicates that the container is being removed. + StateExited ContainerState = "exited" // StateExited indicates that the container exited. + StateDead ContainerState = "dead" // StateDead indicates that the container failed to be deleted. Containers in this state are attempted to be cleaned up when the daemon restarts. +) + +var validStates = []string{ + string(StateCreated), + string(StateRunning), + string(StatePaused), + string(StateRestarting), + string(StateRemoving), + string(StateExited), + string(StateDead), +} + +// ValidateContainerState checks if the provided string is a valid +// container [ContainerState]. +func ValidateContainerState(s ContainerState) error { + switch s { + case StateCreated, StateRunning, StatePaused, StateRestarting, StateRemoving, StateExited, StateDead: + return nil + default: + return errInvalidParameter{error: fmt.Errorf("invalid value for state (%s): must be one of %s", s, strings.Join(validStates, ", "))} + } +} diff --git a/vendor/github.com/moby/moby/api/types/container/stats.go b/vendor/github.com/moby/moby/api/types/container/stats.go new file mode 100644 index 00000000000..6a34f6ab76c --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/stats.go @@ -0,0 +1,224 @@ +package container + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// StatsResponse aggregates all types of stats of one container. +type StatsResponse struct { + // ID is the ID of the container for which the stats were collected. + ID string `json:"id,omitempty"` + + // Name is the name of the container for which the stats were collected. + Name string `json:"name,omitempty"` + + // OSType is the OS of the container ("linux" or "windows") to allow + // platform-specific handling of stats. + OSType string `json:"os_type,omitempty"` + + // Read is the date and time at which this sample was collected. + Read time.Time `json:"read"` + + // CPUStats contains CPU related info of the container. + CPUStats CPUStats `json:"cpu_stats,omitempty"` + + // MemoryStats aggregates all memory stats since container inception on Linux. + // Windows returns stats for commit and private working set only. + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + + // Networks contains Nntwork statistics for the container per interface. + // + // This field is omitted if the container has no networking enabled. + Networks map[string]NetworkStats `json:"networks,omitempty"` + + // ------------------------------------------------------------------------- + // Linux-specific stats, not populated on Windows. + // ------------------------------------------------------------------------- + + // PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + // + // This field is Linux-specific and omitted for Windows containers. + PidsStats PidsStats `json:"pids_stats,omitempty"` + + // BlkioStats stores all IO service stats for data read and write. + // + // This type is Linux-specific and holds many fields that are specific + // to cgroups v1. + // + // On a cgroup v2 host, all fields other than "io_service_bytes_recursive" + // are omitted or "null". + // + // This type is only populated on Linux and omitted for Windows containers. + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // ------------------------------------------------------------------------- + // Windows-specific stats, not populated on Linux. + // ------------------------------------------------------------------------- + + // NumProcs is the number of processors on the system. + // + // This field is Windows-specific and always zero for Linux containers. + NumProcs uint32 `json:"num_procs"` + + // StorageStats is the disk I/O stats for read/write on Windows. + // + // This type is Windows-specific and omitted for Linux containers. + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // ------------------------------------------------------------------------- + // PreRead and PreCPUStats contain the previous sample of stats for + // the container, and can be used to perform delta-calculation. + // ------------------------------------------------------------------------- + + // PreRead is the date and time at which this first sample was collected. + // This field is not propagated if the "one-shot" option is set. If the + // "one-shot" option is set, this field may be omitted, empty, or set + // to a default date (`0001-01-01T00:00:00Z`). + PreRead time.Time `json:"preread"` + + // PreCPUStats contains the CPUStats of the previous sample. + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/top_response.go b/vendor/github.com/moby/moby/api/types/container/top_response.go new file mode 100644 index 00000000000..966603617f1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/top_response.go @@ -0,0 +1,23 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// TopResponse ContainerTopResponse +// +// Container "top" response. +// +// swagger:model TopResponse +type TopResponse struct { + + // Each process running in the container, where each process + // is an array of values corresponding to the titles. + // Example: {"Processes":[["root","13642","882","0","17:03","pts/0","00:00:00","/bin/bash"],["root","13735","13642","0","17:06","pts/0","00:00:00","sleep 10"]]} + Processes [][]string `json:"Processes"` + + // The ps column titles + // Example: {"Titles":["UID","PID","PPID","C","STIME","TTY","TIME","CMD"]} + Titles []string `json:"Titles"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/update_response.go b/vendor/github.com/moby/moby/api/types/container/update_response.go new file mode 100644 index 00000000000..2f7263b1411 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/update_response.go @@ -0,0 +1,18 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// UpdateResponse ContainerUpdateResponse +// +// Response for a successful container-update. +// +// swagger:model UpdateResponse +type UpdateResponse struct { + + // Warnings encountered when updating the container. + // Example: ["Published ports are discarded when using host network mode"] + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/wait_exit_error.go b/vendor/github.com/moby/moby/api/types/container/wait_exit_error.go new file mode 100644 index 00000000000..96a7770c34a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/wait_exit_error.go @@ -0,0 +1,15 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitExitError container waiting error, if any +// +// swagger:model WaitExitError +type WaitExitError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/wait_response.go b/vendor/github.com/moby/moby/api/types/container/wait_response.go new file mode 100644 index 00000000000..68d3c387242 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/wait_response.go @@ -0,0 +1,21 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitResponse ContainerWaitResponse +// +// # OK response to ContainerWait operation +// +// swagger:model WaitResponse +type WaitResponse struct { + + // error + Error *WaitExitError `json:"Error,omitempty"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/moby/moby/api/types/container/waitcondition.go b/vendor/github.com/moby/moby/api/types/container/waitcondition.go new file mode 100644 index 00000000000..64820fe3583 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/vendor/github.com/moby/moby/api/types/events/events.go b/vendor/github.com/moby/moby/api/types/events/events.go new file mode 100644 index 00000000000..b8393addd86 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/events/events.go @@ -0,0 +1,121 @@ +package events + +// Type is used for event-types. +type Type string + +// List of known event types. +const ( + BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates. + ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate. + ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate. + DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate. + ImageEventType Type = "image" // ImageEventType is the event type that images generate. + NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate. + NodeEventType Type = "node" // NodeEventType is the event type that nodes generate. + PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate. + SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate. + ServiceEventType Type = "service" // ServiceEventType is the event type that services generate. + VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. +) + +// Action is used for event-actions. +type Action string + +const ( + ActionCreate Action = "create" + ActionStart Action = "start" + ActionRestart Action = "restart" + ActionStop Action = "stop" + ActionCheckpoint Action = "checkpoint" + ActionPause Action = "pause" + ActionUnPause Action = "unpause" + ActionAttach Action = "attach" + ActionDetach Action = "detach" + ActionResize Action = "resize" + ActionUpdate Action = "update" + ActionRename Action = "rename" + ActionKill Action = "kill" + ActionDie Action = "die" + ActionOOM Action = "oom" + ActionDestroy Action = "destroy" + ActionRemove Action = "remove" + ActionCommit Action = "commit" + ActionTop Action = "top" + ActionCopy Action = "copy" + ActionArchivePath Action = "archive-path" + ActionExtractToDir Action = "extract-to-dir" + ActionExport Action = "export" + ActionImport Action = "import" + ActionSave Action = "save" + ActionLoad Action = "load" + ActionTag Action = "tag" + ActionUnTag Action = "untag" + ActionPush Action = "push" + ActionPull Action = "pull" + ActionPrune Action = "prune" + ActionDelete Action = "delete" + ActionEnable Action = "enable" + ActionDisable Action = "disable" + ActionConnect Action = "connect" + ActionDisconnect Action = "disconnect" + ActionReload Action = "reload" + ActionMount Action = "mount" + ActionUnmount Action = "unmount" + + // ActionExecCreate is the prefix used for exec_create events. These + // event-actions are commonly followed by a colon and space (": "), + // and the command that's defined for the exec, for example: + // + // exec_create: /bin/sh -c 'echo hello' + // + // This is far from ideal; it's a compromise to allow filtering and + // to preserve backward-compatibility. + ActionExecCreate Action = "exec_create" + // ActionExecStart is the prefix used for exec_create events. These + // event-actions are commonly followed by a colon and space (": "), + // and the command that's defined for the exec, for example: + // + // exec_start: /bin/sh -c 'echo hello' + // + // This is far from ideal; it's a compromise to allow filtering and + // to preserve backward-compatibility. + ActionExecStart Action = "exec_start" + ActionExecDie Action = "exec_die" + ActionExecDetach Action = "exec_detach" + + // ActionHealthStatus is the prefix to use for health_status events. + // + // Health-status events can either have a pre-defined status, in which + // case the "health_status" action is followed by a colon, or can be + // "free-form", in which case they're followed by the output of the + // health-check output. + // + // This is far form ideal, and a compromise to allow filtering, and + // to preserve backward-compatibility. + ActionHealthStatus Action = "health_status" + ActionHealthStatusRunning Action = "health_status: running" + ActionHealthStatusHealthy Action = "health_status: healthy" + ActionHealthStatusUnhealthy Action = "health_status: unhealthy" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set of attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + Type Type + Action Action + Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/build_identity.go b/vendor/github.com/moby/moby/api/types/image/build_identity.go new file mode 100644 index 00000000000..1e827dc430d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/build_identity.go @@ -0,0 +1,15 @@ +package image + +import ( + "time" +) + +// BuildIdentity contains build reference information if image was created via build. +type BuildIdentity struct { + // Ref is the identifier for the build request. This reference can be used to + // look up the build details in BuildKit history API. + Ref string `json:"Ref,omitempty"` + + // CreatedAt is the time when the build ran. + CreatedAt time.Time `json:"CreatedAt,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/delete_response.go b/vendor/github.com/moby/moby/api/types/image/delete_response.go new file mode 100644 index 00000000000..b19119a381e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/delete_response.go @@ -0,0 +1,18 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package image + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DeleteResponse delete response +// +// swagger:model DeleteResponse +type DeleteResponse struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/disk_usage.go b/vendor/github.com/moby/moby/api/types/image/disk_usage.go new file mode 100644 index 00000000000..7297813c1d5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/disk_usage.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package image + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DiskUsage represents system data usage for image resources. +// +// swagger:model DiskUsage +type DiskUsage struct { + + // Count of active images. + // + // Example: 1 + ActiveCount int64 `json:"ActiveCount,omitempty"` + + // List of image summaries. + // + Items []Summary `json:"Items,omitempty"` + + // Disk space that can be reclaimed by removing unused images. + // + // Example: 12345678 + Reclaimable int64 `json:"Reclaimable,omitempty"` + + // Count of all images. + // + // Example: 4 + TotalCount int64 `json:"TotalCount,omitempty"` + + // Disk space in use by images. + // + // Example: 98765432 + TotalSize int64 `json:"TotalSize,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/history_response_item.go b/vendor/github.com/moby/moby/api/types/image/history_response_item.go new file mode 100644 index 00000000000..3de3181ab95 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/history_response_item.go @@ -0,0 +1,38 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package image + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// HistoryResponseItem HistoryResponseItem +// +// individual image layer information in response to ImageHistory operation +// +// swagger:model HistoryResponseItem +type HistoryResponseItem struct { + + // comment + // Required: true + Comment string `json:"Comment"` + + // created + // Required: true + Created int64 `json:"Created"` + + // created by + // Required: true + CreatedBy string `json:"CreatedBy"` + + // Id + // Required: true + ID string `json:"Id"` + + // size + // Required: true + Size int64 `json:"Size"` + + // tags + // Required: true + Tags []string `json:"Tags"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/identity.go b/vendor/github.com/moby/moby/api/types/image/identity.go new file mode 100644 index 00000000000..3e030456348 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/identity.go @@ -0,0 +1,15 @@ +package image + +// Identity holds information about the identity and origin of the image. +// This is trusted information verified by the daemon and cannot be modified +// by tagging an image to a different name. +type Identity struct { + // Signature contains the properties of verified signatures for the image. + Signature []SignatureIdentity `json:"Signature,omitzero"` + // Pull contains remote location information if image was created via pull. + // If image was pulled via mirror, this contains the original repository location. + // After successful push this images also contains the pushed repository location. + Pull []PullIdentity `json:"Pull,omitzero"` + // Build contains build reference information if image was created via build. + Build []BuildIdentity `json:"Build,omitzero"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/image.go b/vendor/github.com/moby/moby/api/types/image/image.go new file mode 100644 index 00000000000..1c8990ae900 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/image.go @@ -0,0 +1,18 @@ +package image + +import ( + "time" +) + +// Metadata contains engine-local data about the image. +type Metadata struct { + // LastTagTime is the date and time at which the image was last tagged. + LastTagTime time.Time `json:",omitempty"` +} + +// PruneReport contains the response for Engine API: +// POST "/images/prune" +type PruneReport struct { + ImagesDeleted []DeleteResponse + SpaceReclaimed uint64 +} diff --git a/vendor/github.com/moby/moby/api/types/image/image_inspect.go b/vendor/github.com/moby/moby/api/types/image/image_inspect.go new file mode 100644 index 00000000000..df09c9511bd --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/image_inspect.go @@ -0,0 +1,137 @@ +package image + +import ( + dockerspec "github.com/moby/docker-image-spec/specs-go/v1" + "github.com/moby/moby/api/types/storage" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string `json:",omitempty"` + Layers []string `json:",omitempty"` +} + +// InspectResponse contains response of Engine API: +// GET "/images/{name:.*}/json" +type InspectResponse struct { + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + ID string `json:"Id"` + + // RepoTags is a list of image names/tags in the local image cache that + // reference this image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + RepoTags []string + + // RepoDigests is a list of content-addressable digests of locally available + // image manifests that the image is referenced from. Multiple manifests can + // refer to the same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + RepoDigests []string + + // Comment is an optional message that can be set when committing or + // importing the image. This field is omitted if not set. + Comment string `json:",omitempty"` + + // Created is the date and time at which the image was created, formatted in + // RFC 3339 nano-seconds (time.RFC3339Nano). + // + // This information is only available if present in the image, + // and omitted otherwise. + Created string `json:",omitempty"` + + // Author is the name of the author that was specified when committing the + // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. + // This field is omitted if not set. + Author string `json:",omitempty"` + Config *dockerspec.DockerOCIImageConfig + + // Architecture is the hardware CPU architecture that the image runs on. + Architecture string + + // Variant is the CPU architecture variant (presently ARM-only). + Variant string `json:",omitempty"` + + // OS is the Operating System the image is built to run on. + Os string + + // OsVersion is the version of the Operating System the image is built to + // run on (especially for Windows). + OsVersion string `json:",omitempty"` + + // Size is the total size of the image including all layers it is composed of. + Size int64 + + // GraphDriver holds information about the storage driver used to store the + // container's and image's filesystem. + GraphDriver *storage.DriverData `json:"GraphDriver,omitempty"` + + // RootFS contains information about the image's RootFS, including the + // layer IDs. + RootFS RootFS + + // Metadata of the image in the local cache. + // + // This information is local to the daemon, and not part of the image itself. + Metadata Metadata + + // Descriptor is the OCI descriptor of the image target. + // It's only set if the daemon provides a multi-platform image store. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"` + + // Manifests is a list of image manifests available in this image. It + // provides a more detailed view of the platform-specific image manifests or + // other image-attached data like build attestations. + // + // Only available if the daemon provides a multi-platform image store, the client + // requests manifests AND does not request a specific platform. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Manifests []ManifestSummary `json:"Manifests,omitempty"` + + // Identity holds information about the identity and origin of the image. + // This is trusted information verified by the daemon and cannot be modified + // by tagging an image to a different name. + Identity *Identity `json:"Identity,omitempty"` +} + +// SignatureTimestampType is the type of timestamp used in the signature. +type SignatureTimestampType string + +const ( + SignatureTimestampTlog SignatureTimestampType = "Tlog" + SignatureTimestampAuthority SignatureTimestampType = "TimestampAuthority" +) + +// SignatureType is the type of signature format. +type SignatureType string + +const ( + SignatureTypeBundleV03 SignatureType = "bundle-v0.3" + SignatureTypeSimpleSigningV1 SignatureType = "simplesigning-v1" +) + +// KnownSignerIdentity is an identifier for a special signer identity that is known to the implementation. +type KnownSignerIdentity string + +const ( + // KnownSignerDHI is the known identity for Docker Hardened Images. + KnownSignerDHI KnownSignerIdentity = "DHI" +) diff --git a/vendor/github.com/moby/moby/api/types/image/manifest.go b/vendor/github.com/moby/moby/api/types/image/manifest.go new file mode 100644 index 00000000000..bcd00a07970 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/manifest.go @@ -0,0 +1,104 @@ +package image + +import ( + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ManifestKind string + +const ( + ManifestKindImage ManifestKind = "image" + ManifestKindAttestation ManifestKind = "attestation" + ManifestKindUnknown ManifestKind = "unknown" +) + +type ManifestSummary struct { + // ID is the content-addressable ID of an image and is the same as the + // digest of the image manifest. + // + // Required: true + ID string `json:"ID"` + + // Descriptor is the OCI descriptor of the image. + // + // Required: true + Descriptor ocispec.Descriptor `json:"Descriptor"` + + // Indicates whether all the child content (image config, layers) is + // fully available locally + // + // Required: true + Available bool `json:"Available"` + + // Size is the size information of the content related to this manifest. + // Note: These sizes only take the locally available content into account. + // + // Required: true + Size struct { + // Content is the size (in bytes) of all the locally present + // content in the content store (e.g. image config, layers) + // referenced by this manifest and its children. + // This only includes blobs in the content store. + Content int64 `json:"Content"` + + // Total is the total size (in bytes) of all the locally present + // data (both distributable and non-distributable) that's related to + // this manifest and its children. + // This equal to the sum of [Content] size AND all the sizes in the + // [Size] struct present in the Kind-specific data struct. + // For example, for an image kind (Kind == ManifestKindImage), + // this would include the size of the image content and unpacked + // image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Total int64 `json:"Total"` + } `json:"Size"` + + // Kind is the kind of the image manifest. + // + // Required: true + Kind ManifestKind `json:"Kind"` + + // Fields below are specific to the kind of the image manifest. + + // Present only if Kind == ManifestKindImage. + ImageData *ImageProperties `json:"ImageData,omitempty"` + + // Present only if Kind == ManifestKindAttestation. + AttestationData *AttestationProperties `json:"AttestationData,omitempty"` +} + +type ImageProperties struct { + // Platform is the OCI platform object describing the platform of the image. + // + // Required: true + Platform ocispec.Platform `json:"Platform"` + + // Identity holds information about the identity and origin of the image. + // For image list responses, this can duplicate Build/Pull fields across + // image manifests, because those parts of identity are image-level metadata. + Identity *Identity `json:"Identity,omitempty"` + + Size struct { + // Unpacked is the size (in bytes) of the locally unpacked + // (uncompressed) image content that's directly usable by the containers + // running this image. + // It's independent of the distributable content - e.g. + // the image might still have an unpacked data that's still used by + // some container even when the distributable/compressed content is + // already gone. + // + // Required: true + Unpacked int64 `json:"Unpacked"` + } + + // Containers is an array containing the IDs of the containers that are + // using this image. + // + // Required: true + Containers []string `json:"Containers"` +} + +type AttestationProperties struct { + // For is the digest of the image manifest that this attestation is for. + For digest.Digest `json:"For"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/pull_identity.go b/vendor/github.com/moby/moby/api/types/image/pull_identity.go new file mode 100644 index 00000000000..711492b5c56 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/pull_identity.go @@ -0,0 +1,8 @@ +package image + +// PullIdentity contains remote location information if image was created via pull. +// If image was pulled via mirror, this contains the original repository location. +type PullIdentity struct { + // Repository is the remote repository location the image was pulled from. + Repository string `json:"Repository,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/signature_identity.go b/vendor/github.com/moby/moby/api/types/image/signature_identity.go new file mode 100644 index 00000000000..243c2997c40 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/signature_identity.go @@ -0,0 +1,26 @@ +package image + +// SignatureIdentity contains the properties of verified signatures for the image. +type SignatureIdentity struct { + // Name is a textual description summarizing the type of signature. + Name string `json:"Name,omitempty"` + // Timestamps contains a list of verified signed timestamps for the signature. + Timestamps []SignatureTimestamp `json:"Timestamps,omitzero"` + // KnownSigner is an identifier for a special signer identity that is known to the implementation. + KnownSigner KnownSignerIdentity `json:"KnownSigner,omitempty"` + // DockerReference is the Docker image reference associated with the signature. + // This is an optional field only present in older hashedrecord signatures. + DockerReference string `json:"DockerReference,omitempty"` + // Signer contains information about the signer certificate used to sign the image. + Signer *SignerIdentity `json:"Signer,omitempty"` + // SignatureType is the type of signature format. E.g. "bundle-v0.3" or "hashedrecord". + SignatureType SignatureType `json:"SignatureType,omitempty"` + + // Error contains error information if signature verification failed. + // Other fields will be empty in this case. + Error string `json:"Error,omitempty"` + // Warnings contains any warnings that occurred during signature verification. + // For example, if there was no internet connectivity and cached trust roots were used. + // Warning does not indicate a failed verification but may point to configuration issues. + Warnings []string `json:"Warnings,omitzero"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/signature_timestamp.go b/vendor/github.com/moby/moby/api/types/image/signature_timestamp.go new file mode 100644 index 00000000000..a975ef0eeaa --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/signature_timestamp.go @@ -0,0 +1,12 @@ +package image + +import ( + "time" +) + +// SignatureTimestamp contains information about a verified signed timestamp for an image signature. +type SignatureTimestamp struct { + Type SignatureTimestampType `json:"Type"` + URI string `json:"URI"` + Timestamp time.Time `json:"Timestamp"` +} diff --git a/vendor/github.com/moby/moby/api/types/image/signer_identity.go b/vendor/github.com/moby/moby/api/types/image/signer_identity.go new file mode 100644 index 00000000000..87419e1484d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/signer_identity.go @@ -0,0 +1,57 @@ +package image + +// SignerIdentity contains information about the signer certificate used to sign the image. +// This is [certificate.Summary] with deprecated fields removed and keys in Moby uppercase style. +// +// [certificate.Summary]: https://pkg.go.dev/github.com/sigstore/sigstore-go/pkg/fulcio/certificate#Summary +type SignerIdentity struct { + CertificateIssuer string `json:"CertificateIssuer"` + SubjectAlternativeName string `json:"SubjectAlternativeName"` + // The OIDC issuer. Should match `iss` claim of ID token or, in the case of + // a federated login like Dex it should match the issuer URL of the + // upstream issuer. The issuer is not set the extensions are invalid and + // will fail to render. + Issuer string `json:"Issuer,omitempty"` // OID 1.3.6.1.4.1.57264.1.8 and 1.3.6.1.4.1.57264.1.1 (Deprecated) + + // Reference to specific build instructions that are responsible for signing. + BuildSignerURI string `json:"BuildSignerURI,omitempty"` // 1.3.6.1.4.1.57264.1.9 + + // Immutable reference to the specific version of the build instructions that is responsible for signing. + BuildSignerDigest string `json:"BuildSignerDigest,omitempty"` // 1.3.6.1.4.1.57264.1.10 + + // Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure. + RunnerEnvironment string `json:"RunnerEnvironment,omitempty"` // 1.3.6.1.4.1.57264.1.11 + + // Source repository URL that the build was based on. + SourceRepositoryURI string `json:"SourceRepositoryURI,omitempty"` // 1.3.6.1.4.1.57264.1.12 + + // Immutable reference to a specific version of the source code that the build was based upon. + SourceRepositoryDigest string `json:"SourceRepositoryDigest,omitempty"` // 1.3.6.1.4.1.57264.1.13 + + // Source Repository Ref that the build run was based upon. + SourceRepositoryRef string `json:"SourceRepositoryRef,omitempty"` // 1.3.6.1.4.1.57264.1.14 + + // Immutable identifier for the source repository the workflow was based upon. + SourceRepositoryIdentifier string `json:"SourceRepositoryIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.15 + + // Source repository owner URL of the owner of the source repository that the build was based on. + SourceRepositoryOwnerURI string `json:"SourceRepositoryOwnerURI,omitempty"` // 1.3.6.1.4.1.57264.1.16 + + // Immutable identifier for the owner of the source repository that the workflow was based upon. + SourceRepositoryOwnerIdentifier string `json:"SourceRepositoryOwnerIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.17 + + // Build Config URL to the top-level/initiating build instructions. + BuildConfigURI string `json:"BuildConfigURI,omitempty"` // 1.3.6.1.4.1.57264.1.18 + + // Immutable reference to the specific version of the top-level/initiating build instructions. + BuildConfigDigest string `json:"BuildConfigDigest,omitempty"` // 1.3.6.1.4.1.57264.1.19 + + // Event or action that initiated the build. + BuildTrigger string `json:"BuildTrigger,omitempty"` // 1.3.6.1.4.1.57264.1.20 + + // Run Invocation URL to uniquely identify the build execution. + RunInvocationURI string `json:"RunInvocationURI,omitempty"` // 1.3.6.1.4.1.57264.1.21 + + // Source repository visibility at the time of signing the certificate. + SourceRepositoryVisibilityAtSigning string `json:"SourceRepositoryVisibilityAtSigning,omitempty"` // 1.3.6.1.4.1.57264.1.22 +} diff --git a/vendor/github.com/moby/moby/api/types/image/summary.go b/vendor/github.com/moby/moby/api/types/image/summary.go new file mode 100644 index 00000000000..3d4dd165a32 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/image/summary.go @@ -0,0 +1,95 @@ +package image + +import ocispec "github.com/opencontainers/image-spec/specs-go/v1" + +type Summary struct { + // Number of containers using this image. Includes both stopped and running + // containers. + // + // This size is not calculated by default, and depends on which API endpoint + // is used. `-1` indicates that the value has not been set / calculated. + // + // Required: true + Containers int64 `json:"Containers"` + + // Date and time at which the image was created as a Unix timestamp + // (number of seconds since EPOCH). + // + // Required: true + Created int64 `json:"Created"` + + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + // + // Required: true + ID string `json:"Id"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + // + // Required: true + ParentID string `json:"ParentId"` + + // Descriptor is the OCI descriptor of the image target. + // It's only set if the daemon provides a multi-platform image store. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"` + + // Manifests is a list of image manifests available in this image. It + // provides a more detailed view of the platform-specific image manifests or + // other image-attached data like build attestations. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Manifests []ManifestSummary `json:"Manifests,omitempty"` + + // List of content-addressable digests of locally available image manifests + // that the image is referenced from. Multiple manifests can refer to the + // same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + // + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // List of image names/tags in the local image cache that reference this + // image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + // + // Required: true + RepoTags []string `json:"RepoTags"` + + // Total size of image layers that are shared between this image and other + // images. + // + // This size is not calculated by default. `-1` indicates that the value + // has not been set / calculated. + // + // Required: true + SharedSize int64 `json:"SharedSize"` + + // Total size of the image including all layers it is composed of. + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/moby/moby/api/types/jsonstream/json_error.go b/vendor/github.com/moby/moby/api/types/jsonstream/json_error.go new file mode 100644 index 00000000000..0dcc9337dba --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/jsonstream/json_error.go @@ -0,0 +1,15 @@ +package jsonstream + +// Error wraps a concrete Code and Message, Code is +// an integer error code, Message is the error message. +type Error struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *Error) Error() string { + if e == nil { + return "" + } + return e.Message +} diff --git a/vendor/github.com/moby/moby/api/types/jsonstream/message.go b/vendor/github.com/moby/moby/api/types/jsonstream/message.go new file mode 100644 index 00000000000..6b74bd93270 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/jsonstream/message.go @@ -0,0 +1,15 @@ +package jsonstream + +import "encoding/json" + +// Message defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. +type Message struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *Progress `json:"progressDetail,omitempty"` + ID string `json:"id,omitempty"` + Error *Error `json:"errorDetail,omitempty"` + Aux *json.RawMessage `json:"aux,omitempty"` // Aux contains out-of-band data, such as digests for push signing and image id after building. +} diff --git a/vendor/github.com/moby/moby/api/types/jsonstream/progress.go b/vendor/github.com/moby/moby/api/types/jsonstream/progress.go new file mode 100644 index 00000000000..5c38b3b5efc --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/jsonstream/progress.go @@ -0,0 +1,10 @@ +package jsonstream + +// Progress describes a progress message in a JSON stream. +type Progress struct { + Current int64 `json:"current,omitempty"` // Current is the current status and value of the progress made towards Total. + Total int64 `json:"total,omitempty"` // Total is the end value describing when we made 100% progress for an operation. + Start int64 `json:"start,omitempty"` // Start is the initial value for the operation. + HideCounts bool `json:"hidecounts,omitempty"` // HideCounts. if true, hides the progress count indicator (xB/yB). + Units string `json:"units,omitempty"` // Units is the unit to print for progress. It defaults to "bytes" if empty. +} diff --git a/vendor/github.com/moby/moby/api/types/mount/mount.go b/vendor/github.com/moby/moby/api/types/mount/mount.go new file mode 100644 index 00000000000..090d436c626 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/mount/mount.go @@ -0,0 +1,157 @@ +package mount + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" + // TypeNamedPipe is the type for mounting Windows named pipes + TypeNamedPipe Type = "npipe" + // TypeCluster is the type for Swarm Cluster Volumes. + TypeCluster Type = "cluster" + // TypeImage is the type for mounting another image's filesystem + TypeImage Type = "image" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` // attempts recursive read-only if possible + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + ImageOptions *ImageOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` + ClusterOptions *ClusterOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` + CreateMountpoint bool `json:",omitempty"` + // ReadOnlyNonRecursive makes the mount non-recursively read-only, but still leaves the mount recursive + // (unless NonRecursive is set to true in conjunction). + ReadOnlyNonRecursive bool `json:",omitempty"` + // ReadOnlyForceRecursive raises an error if the mount cannot be made recursively read-only. + ReadOnlyForceRecursive bool `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Subpath string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +type ImageOptions struct { + Subpath string `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + // Options to be passed to the tmpfs mount. An array of arrays. Flag + // options should be provided as 1-length arrays. Other types should be + // provided as 2-length arrays, where the first item is the key and the + // second the value. + Options [][]string `json:",omitempty"` + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} + +// ClusterOptions specifies options for a Cluster volume. +type ClusterOptions struct { + // intentionally empty +} diff --git a/vendor/github.com/moby/moby/api/types/network/config_reference.go b/vendor/github.com/moby/moby/api/types/network/config_reference.go new file mode 100644 index 00000000000..1158afe655e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/config_reference.go @@ -0,0 +1,20 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ConfigReference The config-only network source to provide the configuration for +// this network. +// +// swagger:model ConfigReference +type ConfigReference struct { + + // The name of the config-only network that provides the network's + // configuration. The specified network must be an existing config-only + // network. Only network names are allowed, not network IDs. + // + // Example: config_only_network_01 + Network string `json:"Network"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/connect_request.go b/vendor/github.com/moby/moby/api/types/network/connect_request.go new file mode 100644 index 00000000000..2ff14d36030 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/connect_request.go @@ -0,0 +1,20 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ConnectRequest NetworkConnectRequest represents the data to be used to connect a container to a network. +// +// swagger:model ConnectRequest +type ConnectRequest struct { + + // The ID or name of the container to connect to the network. + // Example: 3613f73ba0e4 + // Required: true + Container string `json:"Container"` + + // endpoint config + EndpointConfig *EndpointSettings `json:"EndpointConfig,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/create_response.go b/vendor/github.com/moby/moby/api/types/network/create_response.go new file mode 100644 index 00000000000..19970599129 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/create_response.go @@ -0,0 +1,23 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse NetworkCreateResponse +// +// # OK response to NetworkCreate operation +// +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created network. + // Example: b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warning string `json:"Warning"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/disconnect_request.go b/vendor/github.com/moby/moby/api/types/network/disconnect_request.go new file mode 100644 index 00000000000..7b1f521e775 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/disconnect_request.go @@ -0,0 +1,21 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DisconnectRequest NetworkDisconnectRequest represents the data to be used to disconnect a container from a network. +// +// swagger:model DisconnectRequest +type DisconnectRequest struct { + + // The ID or name of the container to disconnect from the network. + // Example: 3613f73ba0e4 + // Required: true + Container string `json:"Container"` + + // Force the container to disconnect from the network. + // Example: false + Force bool `json:"Force"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/endpoint.go b/vendor/github.com/moby/moby/api/types/network/endpoint.go new file mode 100644 index 00000000000..c4c1766cf29 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/endpoint.go @@ -0,0 +1,74 @@ +package network + +import ( + "maps" + "net/netip" + "slices" +) + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configuration data + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint. + DriverOpts map[string]string + + // GwPriority determines which endpoint will provide the default gateway + // for the container. The endpoint with the highest priority will be used. + // If multiple endpoints have the same priority, they are lexicographically + // sorted based on their network name, and the one that sorts first is picked. + GwPriority int + + // Operational data + + NetworkID string + EndpointID string + Gateway netip.Addr + IPAddress netip.Addr + + // MacAddress may be used to specify a MAC address when the container is created. + // Once the container is running, it becomes operational data (it may contain a + // generated address). + MacAddress HardwareAddr + IPPrefixLen int + IPv6Gateway netip.Addr + GlobalIPv6Address netip.Addr + GlobalIPv6PrefixLen int + // DNSNames holds all the (non fully qualified) DNS names associated to this + // endpoint. The first entry is used to generate PTR records. + DNSNames []string +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + if es == nil { + return nil + } + + epCopy := *es + epCopy.IPAMConfig = es.IPAMConfig.Copy() + epCopy.Links = slices.Clone(es.Links) + epCopy.Aliases = slices.Clone(es.Aliases) + epCopy.DNSNames = slices.Clone(es.DNSNames) + epCopy.DriverOpts = maps.Clone(es.DriverOpts) + + return &epCopy +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address netip.Addr `json:"IPv4Address,omitzero"` + IPv6Address netip.Addr `json:"IPv6Address,omitzero"` + LinkLocalIPs []netip.Addr `json:"LinkLocalIPs,omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + if cfg == nil { + return nil + } + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = slices.Clone(cfg.LinkLocalIPs) + return &cfgCopy +} diff --git a/vendor/github.com/moby/moby/api/types/network/endpoint_resource.go b/vendor/github.com/moby/moby/api/types/network/endpoint_resource.go new file mode 100644 index 00000000000..bf493ad5ddc --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/endpoint_resource.go @@ -0,0 +1,35 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// EndpointResource contains network resources allocated and used for a container in a network. +// +// swagger:model EndpointResource +type EndpointResource struct { + + // name + // Example: container_1 + Name string `json:"Name"` + + // endpoint ID + // Example: 628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a + EndpointID string `json:"EndpointID"` + + // mac address + // Example: 02:42:ac:13:00:02 + MacAddress HardwareAddr `json:"MacAddress"` + + // IPv4 address + // Example: 172.19.0.2/16 + IPv4Address netip.Prefix `json:"IPv4Address"` + + // IPv6 address + IPv6Address netip.Prefix `json:"IPv6Address"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/hwaddr.go b/vendor/github.com/moby/moby/api/types/network/hwaddr.go new file mode 100644 index 00000000000..b2a4dfb1a12 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/hwaddr.go @@ -0,0 +1,39 @@ +package network + +import ( + "encoding" + "fmt" + "net" +) + +// A HardwareAddr represents a physical hardware address. +// It implements [encoding.TextMarshaler] and [encoding.TextUnmarshaler] +// in the absence of go.dev/issue/29678. +type HardwareAddr net.HardwareAddr + +var ( + _ encoding.TextMarshaler = (HardwareAddr)(nil) + _ encoding.TextUnmarshaler = (*HardwareAddr)(nil) + _ fmt.Stringer = (HardwareAddr)(nil) +) + +func (m *HardwareAddr) UnmarshalText(text []byte) error { + if len(text) == 0 { + *m = nil + return nil + } + hw, err := net.ParseMAC(string(text)) + if err != nil { + return err + } + *m = HardwareAddr(hw) + return nil +} + +func (m HardwareAddr) MarshalText() ([]byte, error) { + return []byte(net.HardwareAddr(m).String()), nil +} + +func (m HardwareAddr) String() string { + return net.HardwareAddr(m).String() +} diff --git a/vendor/github.com/moby/moby/api/types/network/inspect.go b/vendor/github.com/moby/moby/api/types/network/inspect.go new file mode 100644 index 00000000000..cded5e6081d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/inspect.go @@ -0,0 +1,27 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Inspect The body of the "get network" http response message. +// +// swagger:model Inspect +type Inspect struct { + Network + + // Contains endpoints attached to the network. + // + // Example: {"19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c":{"EndpointID":"628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a","IPv4Address":"172.19.0.2/16","IPv6Address":"","MacAddress":"02:42:ac:13:00:02","Name":"test"}} + Containers map[string]EndpointResource `json:"Containers"` + + // List of services using the network. This field is only present for + // swarm scope networks, and omitted for local scope networks. + // + Services map[string]ServiceInfo `json:"Services,omitempty"` + + // provides runtime information about the network such as the number of allocated IPs. + // + Status *Status `json:"Status,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/ipam.go b/vendor/github.com/moby/moby/api/types/network/ipam.go new file mode 100644 index 00000000000..3fb357fc6c4 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/ipam.go @@ -0,0 +1,22 @@ +package network + +import ( + "net/netip" +) + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string // Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet netip.Prefix `json:"Subnet,omitzero"` + IPRange netip.Prefix `json:"IPRange,omitzero"` + Gateway netip.Addr `json:"Gateway,omitzero"` + AuxAddress map[string]netip.Addr `json:"AuxiliaryAddresses,omitempty"` +} + +type SubnetStatuses = map[netip.Prefix]SubnetStatus diff --git a/vendor/github.com/moby/moby/api/types/network/ipam_status.go b/vendor/github.com/moby/moby/api/types/network/ipam_status.go new file mode 100644 index 00000000000..7eb4e8487e9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/ipam_status.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IPAMStatus IPAM status +// +// swagger:model IPAMStatus +type IPAMStatus struct { + + // subnets + // Example: {"172.16.0.0/16":{"DynamicIPsAvailable":65533,"IPsInUse":3},"2001:db8:abcd:0012::0/96":{"DynamicIPsAvailable":4294967291,"IPsInUse":5}} + Subnets SubnetStatuses `json:"Subnets,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/network.go b/vendor/github.com/moby/moby/api/types/network/network.go new file mode 100644 index 00000000000..a7d9c0f6ad5 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/network.go @@ -0,0 +1,100 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + timeext "time" +) + +// Network network +// +// swagger:model Network +type Network struct { + + // Name of the network. + // + // Example: my_network + Name string `json:"Name"` + + // ID that uniquely identifies a network on a single machine. + // + // Example: 7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 + ID string `json:"Id"` + + // Date and time at which the network was created in + // [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + // + // Example: 2016-10-19T04:33:30.360899459Z + Created timeext.Time `json:"Created"` + + // The level at which the network exists (e.g. `swarm` for cluster-wide + // or `local` for machine level) + // + // Example: local + Scope string `json:"Scope"` + + // The name of the driver used to create the network (e.g. `bridge`, + // `overlay`). + // + // Example: overlay + Driver string `json:"Driver"` + + // Whether the network was created with IPv4 enabled. + // + // Example: true + EnableIPv4 bool `json:"EnableIPv4"` + + // Whether the network was created with IPv6 enabled. + // + // Example: false + EnableIPv6 bool `json:"EnableIPv6"` + + // The network's IP Address Management. + // + IPAM IPAM `json:"IPAM"` + + // Whether the network is created to only allow internal networking + // connectivity. + // + // Example: false + Internal bool `json:"Internal"` + + // Whether a global / swarm scope network is manually attachable by regular + // containers from workers in swarm mode. + // + // Example: false + Attachable bool `json:"Attachable"` + + // Whether the network is providing the routing-mesh for the swarm cluster. + // + // Example: false + Ingress bool `json:"Ingress"` + + // config from + ConfigFrom ConfigReference `json:"ConfigFrom"` + + // Whether the network is a config-only network. Config-only networks are + // placeholder networks for network configurations to be used by other + // networks. Config-only networks cannot be used directly to run containers + // or services. + // + ConfigOnly bool `json:"ConfigOnly"` + + // Network-specific options uses when creating the network. + // + // Example: {"com.docker.network.bridge.default_bridge":"true","com.docker.network.bridge.enable_icc":"true","com.docker.network.bridge.enable_ip_masquerade":"true","com.docker.network.bridge.host_binding_ipv4":"0.0.0.0","com.docker.network.bridge.name":"docker0","com.docker.network.driver.mtu":"1500"} + Options map[string]string `json:"Options"` + + // Metadata specific to the network being created. + // + // Example: {"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"} + Labels map[string]string `json:"Labels"` + + // List of peer nodes for an overlay network. This field is only present + // for overlay networks, and omitted for other network types. + // + Peers []PeerInfo `json:"Peers,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/network_types.go b/vendor/github.com/moby/moby/api/types/network/network_types.go new file mode 100644 index 00000000000..5401f55f826 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/network_types.go @@ -0,0 +1,43 @@ +package network + +const ( + // NetworkDefault is a platform-independent alias to choose the platform-specific default network stack. + NetworkDefault = "default" + // NetworkHost is the name of the predefined network used when the NetworkMode host is selected (only available on Linux) + NetworkHost = "host" + // NetworkNone is the name of the predefined network used when the NetworkMode none is selected (available on both Linux and Windows) + NetworkNone = "none" + // NetworkBridge is the name of the default network on Linux + NetworkBridge = "bridge" + // NetworkNat is the name of the default network on Windows + NetworkNat = "nat" +) + +// CreateRequest is the request message sent to the server for network create call. +type CreateRequest struct { + Name string // Name is the requested name of the network. + Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`) + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). + EnableIPv4 *bool `json:",omitempty"` // EnableIPv4 represents whether to enable IPv4. + EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6. + IPAM *IPAM // IPAM is the network's IP Address Management. + Internal bool // Internal represents if the network is used internal only. + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + ConfigFrom *ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly]. + Options map[string]string // Options specifies the network-specific options to use for when creating the network. + Labels map[string]string // Labels holds metadata specific to the network being created. +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// PruneReport contains the response for Engine API: +// POST "/networks/prune" +type PruneReport struct { + NetworksDeleted []string +} diff --git a/vendor/github.com/moby/moby/api/types/network/peer_info.go b/vendor/github.com/moby/moby/api/types/network/peer_info.go new file mode 100644 index 00000000000..dc88ec16fa9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/peer_info.go @@ -0,0 +1,24 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// PeerInfo represents one peer of an overlay network. +// +// swagger:model PeerInfo +type PeerInfo struct { + + // ID of the peer-node in the Swarm cluster. + // Example: 6869d7c1732b + Name string `json:"Name"` + + // IP-address of the peer-node in the Swarm cluster. + // Example: 10.133.77.91 + IP netip.Addr `json:"IP"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/port.go b/vendor/github.com/moby/moby/api/types/network/port.go new file mode 100644 index 00000000000..171d9f51d35 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/port.go @@ -0,0 +1,346 @@ +package network + +import ( + "errors" + "fmt" + "iter" + "net/netip" + "strconv" + "strings" + "unique" +) + +// IPProtocol represents a network protocol for a port. +type IPProtocol string + +const ( + TCP IPProtocol = "tcp" + UDP IPProtocol = "udp" + SCTP IPProtocol = "sctp" +) + +// Sentinel port proto value for zero Port and PortRange values. +var protoZero unique.Handle[IPProtocol] + +// Port is a type representing a single port number and protocol in the format "/[]". +// +// The zero port value, i.e. Port{}, is invalid; use [ParsePort] to create a valid Port value. +type Port struct { + num uint16 + proto unique.Handle[IPProtocol] +} + +// ParsePort parses s as a [Port]. +// +// It normalizes the provided protocol such that "80/tcp", "80/TCP", and "80/tCp" are equivalent. +// If a port number is provided, but no protocol, the default ("tcp") protocol is returned. +func ParsePort(s string) (Port, error) { + if s == "" { + return Port{}, errors.New("invalid port: value is empty") + } + + port, proto, _ := strings.Cut(s, "/") + + portNum, err := parsePortNumber(port) + if err != nil { + return Port{}, fmt.Errorf("invalid port '%s': %w", port, err) + } + + normalizedPortProto := normalizePortProto(proto) + return Port{num: portNum, proto: normalizedPortProto}, nil +} + +// MustParsePort calls [ParsePort](s) and panics on error. +// +// It is intended for use in tests with hard-coded strings. +func MustParsePort(s string) Port { + p, err := ParsePort(s) + if err != nil { + panic(err) + } + return p +} + +// PortFrom returns a [Port] with the given number and protocol. +// +// If no protocol is specified (i.e. proto == ""), then PortFrom returns Port{}, false. +func PortFrom(num uint16, proto IPProtocol) (p Port, ok bool) { + if proto == "" { + return Port{}, false + } + normalized := normalizePortProto(string(proto)) + return Port{num: num, proto: normalized}, true +} + +// Num returns p's port number. +func (p Port) Num() uint16 { + return p.num +} + +// Proto returns p's network protocol. +func (p Port) Proto() IPProtocol { + return p.proto.Value() +} + +// IsZero reports whether p is the zero value. +func (p Port) IsZero() bool { + return p.proto == protoZero +} + +// IsValid reports whether p is an initialized valid port (not the zero value). +func (p Port) IsValid() bool { + return p.proto != protoZero +} + +// String returns a string representation of the port in the format "/". +// If the port is the zero value, it returns "invalid port". +func (p Port) String() string { + switch p.proto { + case protoZero: + return "invalid port" + default: + return string(p.AppendTo(nil)) + } +} + +// AppendText implements [encoding.TextAppender] interface. +// It is the same as [Port.AppendTo] but returns an error to satisfy the interface. +func (p Port) AppendText(b []byte) ([]byte, error) { + return p.AppendTo(b), nil +} + +// AppendTo appends a text encoding of p to b and returns the extended buffer. +func (p Port) AppendTo(b []byte) []byte { + if p.IsZero() { + return b + } + return fmt.Appendf(b, "%d/%s", p.num, p.proto.Value()) +} + +// MarshalText implements [encoding.TextMarshaler] interface. +func (p Port) MarshalText() ([]byte, error) { + return p.AppendText(nil) +} + +// UnmarshalText implements [encoding.TextUnmarshaler] interface. +func (p *Port) UnmarshalText(text []byte) error { + if len(text) == 0 { + *p = Port{} + return nil + } + + port, err := ParsePort(string(text)) + if err != nil { + return err + } + + *p = port + return nil +} + +// Range returns a [PortRange] representing the single port. +func (p Port) Range() PortRange { + return PortRange{start: p.num, end: p.num, proto: p.proto} +} + +// PortSet is a collection of structs indexed by [Port]. +type PortSet = map[Port]struct{} + +// PortBinding represents a binding between a Host IP address and a Host Port. +type PortBinding struct { + // HostIP is the host IP Address + HostIP netip.Addr `json:"HostIp"` + // HostPort is the host port number + HostPort string `json:"HostPort"` +} + +// PortMap is a collection of [PortBinding] indexed by [Port]. +type PortMap = map[Port][]PortBinding + +// PortRange represents a range of port numbers and a protocol in the format "8000-9000/tcp". +// +// The zero port range value, i.e. PortRange{}, is invalid; use [ParsePortRange] to create a valid PortRange value. +type PortRange struct { + start uint16 + end uint16 + proto unique.Handle[IPProtocol] +} + +// ParsePortRange parses s as a [PortRange]. +// +// It normalizes the provided protocol such that "80-90/tcp", "80-90/TCP", and "80-90/tCp" are equivalent. +// If a port number range is provided, but no protocol, the default ("tcp") protocol is returned. +func ParsePortRange(s string) (PortRange, error) { + if s == "" { + return PortRange{}, errors.New("invalid port range: value is empty") + } + + portRange, proto, _ := strings.Cut(s, "/") + + start, end, ok := strings.Cut(portRange, "-") + startVal, err := parsePortNumber(start) + if err != nil { + return PortRange{}, fmt.Errorf("invalid start port '%s': %w", start, err) + } + + portProto := normalizePortProto(proto) + + if !ok || start == end { + return PortRange{start: startVal, end: startVal, proto: portProto}, nil + } + + endVal, err := parsePortNumber(end) + if err != nil { + return PortRange{}, fmt.Errorf("invalid end port '%s': %w", end, err) + } + if endVal < startVal { + return PortRange{}, errors.New("invalid port range: " + s) + } + return PortRange{start: startVal, end: endVal, proto: portProto}, nil +} + +// MustParsePortRange calls [ParsePortRange](s) and panics on error. +// It is intended for use in tests with hard-coded strings. +func MustParsePortRange(s string) PortRange { + pr, err := ParsePortRange(s) + if err != nil { + panic(err) + } + return pr +} + +// PortRangeFrom returns a [PortRange] with the given start and end port numbers and protocol. +// +// If end < start or no protocol is specified (i.e. proto == ""), then PortRangeFrom returns PortRange{}, false. +func PortRangeFrom(start, end uint16, proto IPProtocol) (pr PortRange, ok bool) { + if end < start || proto == "" { + return PortRange{}, false + } + normalized := normalizePortProto(string(proto)) + return PortRange{start: start, end: end, proto: normalized}, true +} + +// Start returns pr's start port number. +func (pr PortRange) Start() uint16 { + return pr.start +} + +// End returns pr's end port number. +func (pr PortRange) End() uint16 { + return pr.end +} + +// Proto returns pr's network protocol. +func (pr PortRange) Proto() IPProtocol { + return pr.proto.Value() +} + +// IsZero reports whether pr is the zero value. +func (pr PortRange) IsZero() bool { + return pr.proto == protoZero +} + +// IsValid reports whether pr is an initialized valid port range (not the zero value). +func (pr PortRange) IsValid() bool { + return pr.proto != protoZero +} + +// String returns a string representation of the port range in the format "-/" or "/" if start == end. +// If the port range is the zero value, it returns "invalid port range". +func (pr PortRange) String() string { + switch pr.proto { + case protoZero: + return "invalid port range" + default: + return string(pr.AppendTo(nil)) + } +} + +// AppendText implements [encoding.TextAppender] interface. +// It is the same as [PortRange.AppendTo] but returns an error to satisfy the interface. +func (pr PortRange) AppendText(b []byte) ([]byte, error) { + return pr.AppendTo(b), nil +} + +// AppendTo appends a text encoding of pr to b and returns the extended buffer. +func (pr PortRange) AppendTo(b []byte) []byte { + if pr.IsZero() { + return b + } + if pr.start == pr.end { + return fmt.Appendf(b, "%d/%s", pr.start, pr.proto.Value()) + } + return fmt.Appendf(b, "%d-%d/%s", pr.start, pr.end, pr.proto.Value()) +} + +// MarshalText implements [encoding.TextMarshaler] interface. +func (pr PortRange) MarshalText() ([]byte, error) { + return pr.AppendText(nil) +} + +// UnmarshalText implements [encoding.TextUnmarshaler] interface. +func (pr *PortRange) UnmarshalText(text []byte) error { + if len(text) == 0 { + *pr = PortRange{} + return nil + } + + portRange, err := ParsePortRange(string(text)) + if err != nil { + return err + } + *pr = portRange + return nil +} + +// Range returns pr. +func (pr PortRange) Range() PortRange { + return pr +} + +// All returns an iterator over all the individual ports in the range. +// +// For example: +// +// for port := range pr.All() { +// // ... +// } +func (pr PortRange) All() iter.Seq[Port] { + return func(yield func(Port) bool) { + for i := uint32(pr.Start()); i <= uint32(pr.End()); i++ { + if !yield(Port{num: uint16(i), proto: pr.proto}) { + return + } + } + } +} + +// parsePortNumber parses rawPort into an int, unwrapping strconv errors +// and returning a single "out of range" error for any value outside 0–65535. +func parsePortNumber(rawPort string) (uint16, error) { + if rawPort == "" { + return 0, errors.New("value is empty") + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + var numErr *strconv.NumError + if errors.As(err, &numErr) { + err = numErr.Err + } + return 0, err + } + + return uint16(port), nil +} + +// normalizePortProto normalizes the protocol string such that "tcp", "TCP", and "tCp" are equivalent. +// If proto is not specified, it defaults to "tcp". +func normalizePortProto(proto string) unique.Handle[IPProtocol] { + if proto == "" { + return unique.Make(TCP) + } + + proto = strings.ToLower(proto) + + return unique.Make(IPProtocol(proto)) +} diff --git a/vendor/github.com/moby/moby/api/types/network/service_info.go b/vendor/github.com/moby/moby/api/types/network/service_info.go new file mode 100644 index 00000000000..fdd92f16115 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/service_info.go @@ -0,0 +1,28 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// ServiceInfo represents service parameters with the list of service's tasks +// +// swagger:model ServiceInfo +type ServiceInfo struct { + + // v IP + VIP netip.Addr `json:"VIP"` + + // ports + Ports []string `json:"Ports"` + + // local l b index + LocalLBIndex int `json:"LocalLBIndex"` + + // tasks + Tasks []Task `json:"Tasks"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/status.go b/vendor/github.com/moby/moby/api/types/network/status.go new file mode 100644 index 00000000000..94f4b4b2ec8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/status.go @@ -0,0 +1,15 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Status provides runtime information about the network such as the number of allocated IPs. +// +// swagger:model Status +type Status struct { + + // IPAM + IPAM IPAMStatus `json:"IPAM"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/subnet_status.go b/vendor/github.com/moby/moby/api/types/network/subnet_status.go new file mode 100644 index 00000000000..dd62429f5f8 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/subnet_status.go @@ -0,0 +1,20 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// SubnetStatus subnet status +// +// swagger:model SubnetStatus +type SubnetStatus struct { + + // Number of IP addresses in the subnet that are in use or reserved and are therefore unavailable for allocation, saturating at 264 - 1. + // + IPsInUse uint64 `json:"IPsInUse"` + + // Number of IP addresses within the network's IPRange for the subnet that are available for allocation, saturating at 264 - 1. + // + DynamicIPsAvailable uint64 `json:"DynamicIPsAvailable"` +} diff --git a/vendor/github.com/moby/moby/api/types/network/summary.go b/vendor/github.com/moby/moby/api/types/network/summary.go new file mode 100644 index 00000000000..3f50ce22786 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/summary.go @@ -0,0 +1,13 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Summary Network list response item +// +// swagger:model Summary +type Summary struct { + Network +} diff --git a/vendor/github.com/moby/moby/api/types/network/task.go b/vendor/github.com/moby/moby/api/types/network/task.go new file mode 100644 index 00000000000..a547523a44e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/network/task.go @@ -0,0 +1,28 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/netip" +) + +// Task carries the information about one backend task +// +// swagger:model Task +type Task struct { + + // name + Name string `json:"Name"` + + // endpoint ID + EndpointID string `json:"EndpointID"` + + // endpoint IP + EndpointIP netip.Addr `json:"EndpointIP"` + + // info + Info map[string]string `json:"Info"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/.gitignore b/vendor/github.com/moby/moby/api/types/plugin/.gitignore new file mode 100644 index 00000000000..5cea8434d77 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/.gitignore @@ -0,0 +1 @@ +testdata/rapid/** diff --git a/vendor/github.com/moby/moby/api/types/plugin/capability.go b/vendor/github.com/moby/moby/api/types/plugin/capability.go new file mode 100644 index 00000000000..d53f77a1f11 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/capability.go @@ -0,0 +1,55 @@ +package plugin + +import ( + "bytes" + "encoding" + "fmt" + "strings" +) + +type CapabilityID struct { + Capability string + Prefix string + Version string +} + +var ( + _ fmt.Stringer = CapabilityID{} + _ encoding.TextUnmarshaler = (*CapabilityID)(nil) + _ encoding.TextMarshaler = CapabilityID{} +) + +// String implements [fmt.Stringer] for CapabilityID +func (t CapabilityID) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// UnmarshalText implements [encoding.TextUnmarshaler] for CapabilityID +func (t *CapabilityID) UnmarshalText(p []byte) error { + fqcap, version, _ := bytes.Cut(p, []byte{'/'}) + idx := bytes.LastIndexByte(fqcap, '.') + if idx < 0 { + t.Prefix = "" + t.Capability = string(fqcap) + } else { + t.Prefix = string(fqcap[:idx]) + t.Capability = string(fqcap[idx+1:]) + } + t.Version = string(version) + return nil +} + +// MarshalText implements [encoding.TextMarshaler] for CapabilityID +func (t CapabilityID) MarshalText() ([]byte, error) { + // Assert that the value can be round-tripped successfully. + if strings.Contains(t.Capability, ".") { + return nil, fmt.Errorf("capability %q cannot contain a dot", t.Capability) + } + if strings.Contains(t.Prefix, "/") { + return nil, fmt.Errorf("prefix %q cannot contain a slash", t.Prefix) + } + if strings.Contains(t.Capability, "/") { + return nil, fmt.Errorf("capability %q cannot contain a slash", t.Capability) + } + return []byte(t.String()), nil +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/device.go b/vendor/github.com/moby/moby/api/types/plugin/device.go new file mode 100644 index 00000000000..ae961770474 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/device.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package plugin + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Device device +// +// swagger:model Device +type Device struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Example: /dev/fuse + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/env.go b/vendor/github.com/moby/moby/api/types/plugin/env.go new file mode 100644 index 00000000000..dcbe0b762d7 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/env.go @@ -0,0 +1,28 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package plugin + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Env env +// +// swagger:model Env +type Env struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/mount.go b/vendor/github.com/moby/moby/api/types/plugin/mount.go new file mode 100644 index 00000000000..7970306cc85 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/mount.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package plugin + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Mount mount +// +// swagger:model Mount +type Mount struct { + + // description + // Example: This is a mount that's used by the plugin. + // Required: true + Description string `json:"Description"` + + // destination + // Example: /mnt/state + // Required: true + Destination string `json:"Destination"` + + // name + // Example: some-mount + // Required: true + Name string `json:"Name"` + + // options + // Example: ["rbind","rw"] + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Example: /var/lib/docker/plugins/ + // Required: true + Source *string `json:"Source"` + + // type + // Example: bind + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/plugin.go b/vendor/github.com/moby/moby/api/types/plugin/plugin.go new file mode 100644 index 00000000000..3305170d5ee --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/plugin.go @@ -0,0 +1,237 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package plugin + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config Config `json:"Config"` + + // True if the plugin is running. False if the plugin is not running, only installed. + // Example: true + // Required: true + Enabled bool `json:"Enabled"` + + // Id + // Example: 5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078 + ID string `json:"Id,omitempty"` + + // name + // Example: tiborvass/sample-volume-plugin + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + // Example: localhost:5000/tiborvass/sample-volume-plugin:latest + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings Settings `json:"Settings"` +} + +// Config The config of a plugin. +// +// swagger:model Config +type Config struct { + + // args + // Required: true + Args Args `json:"Args"` + + // description + // Example: A sample volume plugin for Docker + // Required: true + Description string `json:"Description"` + + // documentation + // Example: https://docs.docker.com/engine/extend/plugins/ + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Example: ["/usr/bin/sample-volume-plugin","/data"] + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Example: [{"Description":"If set, prints debug messages","Name":"DEBUG","Settable":null,"Value":"0"}] + // Required: true + Env []Env `json:"Env"` + + // interface + // Required: true + Interface Interface `json:"Interface"` + + // ipc host + // Example: false + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux LinuxConfig `json:"Linux"` + + // mounts + // Required: true + Mounts []Mount `json:"Mounts"` + + // network + // Required: true + Network NetworkConfig `json:"Network"` + + // pid host + // Example: false + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Example: /mnt/volumes + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User User `json:"User,omitempty"` + + // work dir + // Example: /bin/ + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *RootFS `json:"rootfs,omitempty"` +} + +// Args args +// +// swagger:model Args +type Args struct { + + // description + // Example: command line arguments + // Required: true + Description string `json:"Description"` + + // name + // Example: args + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// Interface The interface between Docker and the plugin +// +// swagger:model Interface +type Interface struct { + + // Protocol to use for clients connecting to the plugin. + // Example: some.protocol/v1.0 + // Enum: ["","moby.plugins.http/v1"] + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + + // socket + // Example: plugins.sock + // Required: true + Socket string `json:"Socket"` + + // types + // Example: ["docker.volumedriver/1.0"] + // Required: true + Types []CapabilityID `json:"Types"` +} + +// LinuxConfig linux config +// +// swagger:model LinuxConfig +type LinuxConfig struct { + + // allow all devices + // Example: false + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Example: ["CAP_SYS_ADMIN","CAP_SYSLOG"] + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []Device `json:"Devices"` +} + +// NetworkConfig network config +// +// swagger:model NetworkConfig +type NetworkConfig struct { + + // type + // Example: host + // Required: true + Type string `json:"Type"` +} + +// RootFS root f s +// +// swagger:model RootFS +type RootFS struct { + + // diff ids + // Example: ["sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887","sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"] + DiffIds []string `json:"diff_ids"` + + // type + // Example: layers + Type string `json:"type,omitempty"` +} + +// User user +// +// swagger:model User +type User struct { + + // g ID + // Example: 1000 + GID uint32 `json:"GID,omitempty"` + + // UID + // Example: 1000 + UID uint32 `json:"UID,omitempty"` +} + +// Settings user-configurable settings for the plugin. +// +// swagger:model Settings +type Settings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []Device `json:"Devices"` + + // env + // Example: ["DEBUG=0"] + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []Mount `json:"Mounts"` +} diff --git a/vendor/github.com/moby/moby/api/types/plugin/plugin_responses.go b/vendor/github.com/moby/moby/api/types/plugin/plugin_responses.go new file mode 100644 index 00000000000..91b327eb473 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/plugin/plugin_responses.go @@ -0,0 +1,33 @@ +package plugin + +import ( + "sort" +) + +// ListResponse contains the response for the Engine API +type ListResponse []Plugin + +// Privilege describes a permission the user has to accept +// upon installing a plugin. +type Privilege struct { + Name string + Description string + Value []string +} + +// Privileges is a list of Privilege +type Privileges []Privilege + +func (s Privileges) Len() int { + return len(s) +} + +func (s Privileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s Privileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/moby/moby/api/types/registry/auth_response.go b/vendor/github.com/moby/moby/api/types/registry/auth_response.go new file mode 100644 index 00000000000..94c2e1bb36a --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/auth_response.go @@ -0,0 +1,21 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package registry + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// AuthResponse An identity token was generated successfully. +// +// swagger:model AuthResponse +type AuthResponse struct { + + // An opaque token used to authenticate a user after a successful login + // Example: 9cbaf023786cd7... + IdentityToken string `json:"IdentityToken,omitempty"` + + // The status of the authentication + // Example: Login Succeeded + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/moby/moby/api/types/registry/authconfig.go b/vendor/github.com/moby/moby/api/types/registry/authconfig.go new file mode 100644 index 00000000000..b612feebaae --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/authconfig.go @@ -0,0 +1,35 @@ +package registry + +import "context" + +// AuthHeader is the name of the header used to send encoded registry +// authorization credentials for registry operations (push/pull). +const AuthHeader = "X-Registry-Auth" + +// RequestAuthConfig is a function interface that clients can supply +// to retry operations after getting an authorization error. +// +// The function must return the [AuthHeader] value ([AuthConfig]), encoded +// in base64url format ([RFC4648, section 5]), which can be decoded by +// [DecodeAuthConfig]. +// +// It must return an error if the privilege request fails. +// +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 +type RequestAuthConfig func(context.Context) (string, error) + +// AuthConfig contains authorization information for connecting to a Registry. +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/registry/registry.go b/vendor/github.com/moby/moby/api/types/registry/registry.go new file mode 100644 index 00000000000..7361228d66e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/registry.go @@ -0,0 +1,67 @@ +package registry + +import ( + "net/netip" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + InsecureRegistryCIDRs []netip.Prefix `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor ocispec.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []ocispec.Platform +} diff --git a/vendor/github.com/moby/moby/api/types/registry/search.go b/vendor/github.com/moby/moby/api/types/registry/search.go new file mode 100644 index 00000000000..bd79462f607 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/registry/search.go @@ -0,0 +1,27 @@ +package registry + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated. + // + // Deprecated: the "is_automated" field is deprecated and will always be "false". + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} diff --git a/vendor/github.com/moby/moby/api/types/storage/driver_data.go b/vendor/github.com/moby/moby/api/types/storage/driver_data.go new file mode 100644 index 00000000000..65d5b4c20ea --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/storage/driver_data.go @@ -0,0 +1,27 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DriverData Information about the storage driver used to store the container's and +// image's filesystem. +// +// swagger:model DriverData +type DriverData struct { + + // Low-level storage metadata, provided as key/value pairs. + // + // This information is driver-specific, and depends on the storage-driver + // in use, and should be used for informational purposes only. + // + // Example: {"MergedDir":"/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged","UpperDir":"/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff","WorkDir":"/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work"} + // Required: true + Data map[string]string `json:"Data"` + + // Name of the storage driver. + // Example: overlay2 + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage.go b/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage.go new file mode 100644 index 00000000000..d82f2b6bcbc --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// RootFSStorage Information about the storage used for the container's root filesystem. +// +// swagger:model RootFSStorage +type RootFSStorage struct { + + // Information about the snapshot used for the container's root filesystem. + // + Snapshot *RootFSStorageSnapshot `json:"Snapshot,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage_snapshot.go b/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage_snapshot.go new file mode 100644 index 00000000000..dd2b82d245f --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/storage/root_f_s_storage_snapshot.go @@ -0,0 +1,15 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// RootFSStorageSnapshot Information about a snapshot backend of the container's root filesystem. +// +// swagger:model RootFSStorageSnapshot +type RootFSStorageSnapshot struct { + + // Name of the snapshotter. + Name string `json:"Name,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/storage/storage.go b/vendor/github.com/moby/moby/api/types/storage/storage.go new file mode 100644 index 00000000000..77843db9708 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/storage/storage.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package storage + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Storage Information about the storage used by the container. +// +// swagger:model Storage +type Storage struct { + + // Information about the storage used for the container's root filesystem. + // + RootFS *RootFSStorage `json:"RootFS,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/common.go b/vendor/github.com/moby/moby/api/types/swarm/common.go new file mode 100644 index 00000000000..b42812e03f1 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/common.go @@ -0,0 +1,48 @@ +package swarm + +import ( + "strconv" + "time" +) + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// String implements fmt.Stringer interface. +func (v Version) String() string { + return strconv.FormatUint(v.Index, 10) +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging, secrets backend). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/config.go b/vendor/github.com/moby/moby/api/types/swarm/config.go new file mode 100644 index 00000000000..b029f2af856 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/config.go @@ -0,0 +1,55 @@ +package swarm + +import ( + "os" +) + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + + // Data is the data to store as a config. + // + // The maximum allowed size is 1000KB, as defined in [MaxConfigSize]. + // + // [MaxConfigSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize + Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReferenceRuntimeTarget is a target for a config specifying that it +// isn't mounted into the container but instead has some other purpose. +type ConfigReferenceRuntimeTarget struct{} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget `json:",omitempty"` + Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` + ConfigID string + ConfigName string +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/container.go b/vendor/github.com/moby/moby/api/types/swarm/container.go new file mode 100644 index 00000000000..268565ec8ae --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/container.go @@ -0,0 +1,120 @@ +package swarm + +import ( + "net/netip" + "time" + + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/mount" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []netip.Addr `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// SeccompMode is the type used for the enumeration of possible seccomp modes +// in SeccompOpts +type SeccompMode string + +const ( + SeccompModeDefault SeccompMode = "default" + SeccompModeUnconfined SeccompMode = "unconfined" + SeccompModeCustom SeccompMode = "custom" +) + +// SeccompOpts defines the options for configuring seccomp on a swarm-managed +// container. +type SeccompOpts struct { + // Mode is the SeccompMode used for the container. + Mode SeccompMode `json:",omitempty"` + // Profile is the custom seccomp profile as a json object to be used with + // the container. Mode should be set to SeccompModeCustom when using a + // custom profile in this manner. + Profile []byte `json:",omitempty"` +} + +// AppArmorMode is type used for the enumeration of possible AppArmor modes in +// AppArmorOpts +type AppArmorMode string + +const ( + AppArmorModeDefault AppArmorMode = "default" + AppArmorModeDisabled AppArmorMode = "disabled" +) + +// AppArmorOpts defines the options for configuring AppArmor on a swarm-managed +// container. Currently, custom AppArmor profiles are not supported. +type AppArmorOpts struct { + Mode AppArmorMode `json:",omitempty"` +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + Config string + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext + Seccomp *SeccompOpts `json:",omitempty"` + AppArmor *AppArmorOpts `json:",omitempty"` + NoNewPrivileges bool +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` + CapabilityAdd []string `json:",omitempty"` + CapabilityDrop []string `json:",omitempty"` + Ulimits []*container.Ulimit `json:",omitempty"` + OomScoreAdj int64 `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/network.go b/vendor/github.com/moby/moby/api/types/swarm/network.go new file mode 100644 index 00000000000..b32c308f69e --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/network.go @@ -0,0 +1,139 @@ +package swarm + +import ( + "cmp" + "net/netip" + + "github.com/moby/moby/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol network.IPProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// Compare returns the lexical ordering of p and other, and can be used +// with [slices.SortFunc]. +// +// The comparison is performed in the following priority order: +// 1. PublishedPort (host port) +// 2. TargetPort (container port) +// 3. Protocol +// 4. PublishMode +func (p PortConfig) Compare(other PortConfig) int { + if n := cmp.Compare(p.PublishedPort, other.PublishedPort); n != 0 { + return n + } + if n := cmp.Compare(p.TargetPort, other.TargetPort); n != 0 { + return n + } + if n := cmp.Compare(p.Protocol, other.Protocol); n != 0 { + return n + } + return cmp.Compare(p.PublishMode, other.PublishMode) +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + + // Addr is the virtual ip address. + // This field accepts CIDR notation, for example `10.0.0.1/24`, to maintain backwards + // compatibility, but only the IP address is used. + Addr netip.Prefix `json:"Addr,omitzero"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + + // Addresses contains the IP addresses associated with the endpoint in the network. + // This field accepts CIDR notation, for example `10.0.0.1/24`, to maintain backwards + // compatibility, but only the IP address is used. + Addresses []netip.Prefix `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet netip.Prefix `json:"Subnet,omitzero"` + Range netip.Prefix `json:"Range,omitzero"` + Gateway netip.Addr `json:"Gateway,omitzero"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/node.go b/vendor/github.com/moby/moby/api/types/swarm/node.go new file mode 100644 index 00000000000..9523799b679 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/node.go @@ -0,0 +1,139 @@ +package swarm + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` + CSIInfo []NodeCSIInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// NodeCSIInfo represents information about a CSI plugin available on the node +type NodeCSIInfo struct { + // PluginName is the name of the CSI plugin. + PluginName string `json:",omitempty"` + // NodeID is the ID of the node as reported by the CSI plugin. This is + // different from the swarm node ID. + NodeID string `json:",omitempty"` + // MaxVolumesPerNode is the maximum number of volumes that may be published + // to this node + MaxVolumesPerNode int64 `json:",omitempty"` + // AccessibleTopology indicates the location of this node in the CSI + // plugin's topology + AccessibleTopology *Topology `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) + +// Topology defines the CSI topology of this node. This type is a duplicate of +// [github.com/moby/moby/api/types/volume.Topology]. Because the type definition +// is so simple and to avoid complicated structure or circular imports, we just +// duplicate it here. See that type for full documentation +type Topology struct { + Segments map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/runtime.go b/vendor/github.com/moby/moby/api/types/swarm/runtime.go new file mode 100644 index 00000000000..23ea712c477 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/runtime.go @@ -0,0 +1,45 @@ +package swarm + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} + +// RuntimeSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type RuntimeSpec struct { + Name string `json:"name,omitempty"` + Remote string `json:"remote,omitempty"` + Privileges []*RuntimePrivilege `json:"privileges,omitempty"` + Disabled bool `json:"disabled,omitempty"` + Env []string `json:"env,omitempty"` +} + +// RuntimePrivilege describes a permission the user has to accept +// upon installing a plugin. +type RuntimePrivilege struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Value []string `json:"value,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/secret.go b/vendor/github.com/moby/moby/api/types/swarm/secret.go new file mode 100644 index 00000000000..0e27ed9b07b --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/secret.go @@ -0,0 +1,59 @@ +package swarm + +import ( + "os" +) + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + + // Data is the data to store as a secret. It must be empty if a + // [Driver] is used, in which case the data is loaded from an external + // secret store. The maximum allowed size is 500KB, as defined in + // [MaxSecretSize]. + // + // This field is only used to create the secret, and is not returned + // by other endpoints. + // + // [MaxSecretSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize + Data []byte `json:",omitempty"` + + // Driver is the name of the secrets driver used to fetch the secret's + // value from an external secret store. If not set, the default built-in + // store is used. + Driver *Driver `json:",omitempty"` + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/service.go b/vendor/github.com/moby/moby/api/types/swarm/service.go new file mode 100644 index 00000000000..0b678dea335 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/service.go @@ -0,0 +1,218 @@ +package swarm + +import ( + "time" +) + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` + + // ServiceStatus is an optional, extra field indicating the number of + // desired and running tasks. It is provided primarily as a shortcut to + // calculating these values client-side, which otherwise would require + // listing all tasks for a service, an operation that could be + // computation and network expensive. + ServiceStatus *ServiceStatus `json:",omitempty"` + + // JobStatus is the status of a Service which is in one of ReplicatedJob or + // GlobalJob modes. It is absent on Replicated and Global services. + JobStatus *JobStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` + ReplicatedJob *ReplicatedJob `json:",omitempty"` + GlobalJob *GlobalJob `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +// ReplicatedJob is the a type of Service which executes a defined Tasks +// in parallel until the specified number of Tasks have succeeded. +type ReplicatedJob struct { + // MaxConcurrent indicates the maximum number of Tasks that should be + // executing simultaneously for this job at any given time. There may be + // fewer Tasks that MaxConcurrent executing simultaneously; for example, if + // there are fewer than MaxConcurrent tasks needed to reach + // TotalCompletions. + // + // If this field is empty, it will default to a max concurrency of 1. + MaxConcurrent *uint64 `json:",omitempty"` + + // TotalCompletions is the total number of Tasks desired to run to + // completion. + // + // If this field is empty, the value of MaxConcurrent will be used. + TotalCompletions *uint64 `json:",omitempty"` +} + +// GlobalJob is the type of a Service which executes a Task on every Node +// matching the Service's placement constraints. These tasks run to completion +// and then exit. +// +// This type is deliberately empty. +type GlobalJob struct{} + +// FailureAction is the action to perform when updating a service fails. +type FailureAction string + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause FailureAction = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue FailureAction = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback FailureAction = "rollback" +) + +// UpdateOrder is the order of operations when rolling out or rolling back +// an updated tasks for a service. +type UpdateOrder string + +const ( + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst UpdateOrder = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst UpdateOrder = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction FailureAction `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order UpdateOrder +} + +// ServiceStatus represents the number of running tasks in a service and the +// number of tasks desired to be running. +type ServiceStatus struct { + // RunningTasks is the number of tasks for the service actually in the + // Running state + RunningTasks uint64 + + // DesiredTasks is the number of tasks desired to be running by the + // service. For replicated services, this is the replica count. For global + // services, this is computed by taking the number of tasks with desired + // state of not-Shutdown. + DesiredTasks uint64 + + // CompletedTasks is the number of tasks in the state Completed, if this + // service is in ReplicatedJob or GlobalJob mode. This field must be + // cross-referenced with the service type, because the default value of 0 + // may mean that a service is not in a job mode, or it may mean that the + // job has yet to complete any tasks. + CompletedTasks uint64 +} + +// JobStatus is the status of a job-type service. +type JobStatus struct { + // JobIteration is a value increased each time a Job is executed, + // successfully or otherwise. "Executed", in this case, means the job as a + // whole has been started, not that an individual Task has been launched. A + // job is "Executed" when its ServiceSpec is updated. JobIteration can be + // used to disambiguate Tasks belonging to different executions of a job. + // + // Though JobIteration will increase with each subsequent execution, it may + // not necessarily increase by 1, and so JobIteration should not be used to + // keep track of the number of times a job has been executed. + JobIteration Version + + // LastExecution is the time that the job was last executed, as observed by + // Swarm manager. + LastExecution time.Time `json:",omitempty"` +} + +// RegistryAuthSource defines options for the "registryAuthFrom" query parameter +// on service update. +type RegistryAuthSource string + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec RegistryAuthSource = "spec" + RegistryAuthFromPreviousSpec RegistryAuthSource = "previous-spec" +) diff --git a/vendor/github.com/moby/moby/api/types/swarm/service_create_response.go b/vendor/github.com/moby/moby/api/types/swarm/service_create_response.go new file mode 100644 index 00000000000..ebbc097d90d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/service_create_response.go @@ -0,0 +1,24 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package swarm + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceCreateResponse contains the information returned to a client on the +// creation of a new service. +// +// swagger:model ServiceCreateResponse +type ServiceCreateResponse struct { + + // The ID of the created service. + // Example: ak7w3gjqoa3kuz8xcpnyy0pvl + ID string `json:"ID,omitempty"` + + // Optional warning message. + // + // FIXME(thaJeztah): this should have "omitempty" in the generated type. + // + // Example: ["unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"] + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/service_update_response.go b/vendor/github.com/moby/moby/api/types/swarm/service_update_response.go new file mode 100644 index 00000000000..b7649096a07 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/service_update_response.go @@ -0,0 +1,16 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package swarm + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// Example: {"Warnings":["unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"]} +// +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/swarm.go b/vendor/github.com/moby/moby/api/types/swarm/swarm.go new file mode 100644 index 00000000000..84218503101 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/swarm.go @@ -0,0 +1,228 @@ +package swarm + +import ( + "net/netip" + "time" +) + +// ClusterInfo represents info about the cluster for outputting in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool + DefaultAddrPool []netip.Prefix + SubnetSize uint32 + DataPathPort uint32 +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate and key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + DataPathPort uint32 + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability + DefaultAddrPool []netip.Prefix + SubnetSize uint32 +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` + + Warnings []string `json:",omitempty"` +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type UnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} diff --git a/vendor/github.com/moby/moby/api/types/swarm/task.go b/vendor/github.com/moby/moby/api/types/swarm/task.go new file mode 100644 index 00000000000..e2633037df9 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/swarm/task.go @@ -0,0 +1,234 @@ +package swarm + +import ( + "time" +) + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" + // TaskStateRemove REMOVE + TaskStateRemove TaskState = "remove" + // TaskStateOrphaned ORPHANED + TaskStateOrphaned TaskState = "orphaned" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` + + // JobIteration is the JobIteration of the Service that this Task was + // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is + // used to determine which Tasks belong to which run of the job. This field + // is absent if the Service mode is Replicated or Global. + JobIteration *Version `json:",omitempty"` + + // Volumes is the list of VolumeAttachments for this task. It specifies + // which particular volumes are to be used by this particular task, and + // fulfilling what mounts in the spec. + Volumes []VolumeAttachment +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *RuntimeSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` + + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory) which can be advertised by a +// node and requested to be reserved for a task. +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// Limit describes limits on resources which can be requested by a task. +type Limit struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + Pids int64 `json:",omitempty"` +} + +// GenericResource represents a "user-defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user-defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user-defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Limit `json:",omitempty"` + Reservations *Resources `json:",omitempty"` + + // Amount of swap in bytes - can only be used together with a memory limit + // -1 means unlimited + // a null pointer keeps the default behaviour of granting twice the memory + // amount in swap + SwapBytes *int64 `json:"SwapBytes,omitzero"` + + // Tune container memory swappiness (0 to 100) - if not specified, defaults + // to the container OS's default - generally 60, or the value predefined in + // the image; set to -1 to unset a previously set value + MemorySwappiness *int64 `json:"MemorySwappiness,omitzero"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + MaxReplicas uint64 `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus *ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string + PID int + ExitCode int +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} + +// VolumeAttachment contains the associating a Volume to a Task. +type VolumeAttachment struct { + // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. + ID string `json:",omitempty"` + + // Source, together with Target, indicates the Mount, as specified in the + // ContainerSpec, that this volume fulfills. + Source string `json:",omitempty"` + + // Target, together with Source, indicates the Mount, as specified + // in the ContainerSpec, that this volume fulfills. + Target string `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/system/disk_usage.go b/vendor/github.com/moby/moby/api/types/system/disk_usage.go new file mode 100644 index 00000000000..33230aed231 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/system/disk_usage.go @@ -0,0 +1,31 @@ +package system + +import ( + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/image" + "github.com/moby/moby/api/types/volume" +) + +// DiskUsageObject represents an object type used for disk usage query filtering. +type DiskUsageObject string + +const ( + // ContainerObject represents a container DiskUsageObject. + ContainerObject DiskUsageObject = "container" + // ImageObject represents an image DiskUsageObject. + ImageObject DiskUsageObject = "image" + // VolumeObject represents a volume DiskUsageObject. + VolumeObject DiskUsageObject = "volume" + // BuildCacheObject represents a build-cache DiskUsageObject. + BuildCacheObject DiskUsageObject = "build-cache" +) + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + ImageUsage *image.DiskUsage `json:"ImageUsage,omitempty"` + ContainerUsage *container.DiskUsage `json:"ContainerUsage,omitempty"` + VolumeUsage *volume.DiskUsage `json:"VolumeUsage,omitempty"` + BuildCacheUsage *build.DiskUsage `json:"BuildCacheUsage,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/system/info.go b/vendor/github.com/moby/moby/api/types/system/info.go new file mode 100644 index 00000000000..20df949e422 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/system/info.go @@ -0,0 +1,171 @@ +package system + +import ( + "net/netip" + + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/api/types/swarm" +) + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + PidsLimit bool + IPv4Forwarding bool + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + CgroupVersion string `json:",omitempty"` + NEventsListener int + KernelVersion string + OperatingSystem string + OSVersion string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + Runtimes map[string]RuntimeWithStatus + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + FirewallBackend *FirewallInfo `json:"FirewallBackend,omitempty"` + CDISpecDirs []string + DiscoveredDevices []DeviceInfo `json:",omitempty"` + NRI *NRIInfo `json:",omitempty"` + + Containerd *ContainerdInfo `json:",omitempty"` + + // Warnings contains a slice of warnings that occurred while collecting + // system information. These warnings are intended to be informational + // messages for the user, and are not intended to be parsed / used for + // other purposes, as they do not have a fixed format. + Warnings []string +} + +// ContainerdInfo holds information about the containerd instance used by the daemon. +type ContainerdInfo struct { + // Address is the path to the containerd socket. + Address string `json:",omitempty"` + // Namespaces is the containerd namespaces used by the daemon. + Namespaces ContainerdNamespaces +} + +// ContainerdNamespaces reflects the containerd namespaces used by the daemon. +// +// These namespaces can be configured in the daemon configuration, and are +// considered to be used exclusively by the daemon, +// +// As these namespaces are considered to be exclusively accessed +// by the daemon, it is not recommended to change these values, +// or to change them to a value that is used by other systems, +// such as cri-containerd. +type ContainerdNamespaces struct { + // Containers holds the default containerd namespace used for + // containers managed by the daemon. + // + // The default namespace for containers is "moby", but will be + // suffixed with the `.` of the remapped `root` if + // user-namespaces are enabled and the containerd image-store + // is used. + Containers string + + // Plugins holds the default containerd namespace used for + // plugins managed by the daemon. + // + // The default namespace for plugins is "moby", but will be + // suffixed with the `.` of the remapped `root` if + // user-namespaces are enabled and the containerd image-store + // is used. + Plugins string +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by [Info] struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + // ID is the actual commit ID or version of external tool. + ID string +} + +// NetworkAddressPool is a temp struct used by [Info] struct. +type NetworkAddressPool struct { + Base netip.Prefix + Size int +} + +// FirewallInfo describes the firewall backend. +type FirewallInfo struct { + // Driver is the name of the firewall backend driver. + Driver string `json:"Driver"` + // Info is a list of label/value pairs, containing information related to the firewall. + Info [][2]string `json:"Info,omitempty"` +} + +// DeviceInfo represents a discoverable device from a device driver. +type DeviceInfo struct { + // Source indicates the origin device driver. + Source string `json:"Source"` + // ID is the unique identifier for the device. + // Example: CDI FQDN like "vendor.com/gpu=0", or other driver-specific device ID + ID string `json:"ID"` +} + +// NRIInfo describes the NRI configuration. +type NRIInfo struct { + Info [][2]string `json:"Info,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/system/runtime.go b/vendor/github.com/moby/moby/api/types/system/runtime.go new file mode 100644 index 00000000000..33cad367469 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/system/runtime.go @@ -0,0 +1,20 @@ +package system + +// Runtime describes an OCI runtime +type Runtime struct { + // "Legacy" runtime configuration for runc-compatible runtimes. + + Path string `json:"path,omitempty"` + Args []string `json:"runtimeArgs,omitempty"` + + // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. + + Type string `json:"runtimeType,omitempty"` + Options map[string]any `json:"options,omitempty"` +} + +// RuntimeWithStatus extends [Runtime] to hold [RuntimeStatus]. +type RuntimeWithStatus struct { + Runtime + Status map[string]string `json:"status,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/system/version_response.go b/vendor/github.com/moby/moby/api/types/system/version_response.go new file mode 100644 index 00000000000..61cd1b6e2fe --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/system/version_response.go @@ -0,0 +1,58 @@ +package system + +// VersionResponse contains information about the Docker server host. +// GET "/version" +type VersionResponse struct { + // Platform is the platform (product name) the server is running on. + Platform PlatformInfo `json:",omitempty"` + + // Version is the version of the daemon. + Version string + + // APIVersion is the highest API version supported by the server. + APIVersion string `json:"ApiVersion"` + + // MinAPIVersion is the minimum API version the server supports. + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + + // Os is the operating system the server runs on. + Os string + + // Arch is the hardware architecture the server runs on. + Arch string + + // Components contains version information for the components making + // up the server. Information in this field is for informational + // purposes, and not part of the API contract. + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + GitCommit string `json:",omitempty"` + GoVersion string `json:",omitempty"` + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// PlatformInfo holds information about the platform (product name) the +// server is running on. +type PlatformInfo struct { + // Name is the name of the platform (for example, "Docker Engine - Community", + // or "Docker Desktop 4.49.0 (208003)") + Name string +} + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + + // Details contains Key/value pairs of strings with additional information + // about the component. These values are intended for informational purposes + // only, and their content is not defined, and not part of the API + // specification. + // + // These messages can be printed by the client as information to the user. + Details map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/types.go b/vendor/github.com/moby/moby/api/types/types.go new file mode 100644 index 00000000000..5da64796e8d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/types.go @@ -0,0 +1,33 @@ +package types + +// MediaType represents an HTTP media type (MIME type) used in API +// Content-Type and Accept headers. +// +// In addition to standard media types (for example, "application/json"), +// this package defines vendor-specific vendor media types for streaming +// endpoints, such as raw TTY streams and multiplexed stdout/stderr streams. +type MediaType = string + +const ( + // MediaTypeRawStream is a vendor-specific media type for raw TTY streams. + MediaTypeRawStream MediaType = "application/vnd.docker.raw-stream" + + // MediaTypeMultiplexedStream is a vendor-specific media type for streams + // where stdin, stdout, and stderr are multiplexed into a single byte stream. + // + // Use stdcopy.StdCopy (https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy) + // to demultiplex the stream. + MediaTypeMultiplexedStream MediaType = "application/vnd.docker.multiplexed-stream" + + // MediaTypeJSON is the media type for JSON objects. + MediaTypeJSON MediaType = "application/json" + + // MediaTypeNDJSON is the media type for newline-delimited JSON streams (https://github.com/ndjson/ndjson-spec). + MediaTypeNDJSON MediaType = "application/x-ndjson" + + // MediaTypeJSONLines is the media type for JSON Lines streams (https://jsonlines.org/). + MediaTypeJSONLines MediaType = "application/jsonl" + + // MediaTypeJSONSequence is the media type for JSON text sequences (RFC 7464). + MediaTypeJSONSequence MediaType = "application/json-seq" +) diff --git a/vendor/github.com/moby/moby/api/types/volume/cluster_volume.go b/vendor/github.com/moby/moby/api/types/volume/cluster_volume.go new file mode 100644 index 00000000000..07b75d12a01 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/cluster_volume.go @@ -0,0 +1,420 @@ +package volume + +import ( + "github.com/moby/moby/api/types/swarm" +) + +// ClusterVolume contains options and information specific to, and only present +// on, Swarm CSI cluster volumes. +type ClusterVolume struct { + // ID is the Swarm ID of the volume. Because cluster volumes are Swarm + // objects, they have an ID, unlike non-cluster volumes, which only have a + // Name. This ID can be used to refer to the cluster volume. + ID string + + // Meta is the swarm metadata about this volume. + swarm.Meta + + // Spec is the cluster-specific options from which this volume is derived. + Spec ClusterVolumeSpec + + // PublishStatus contains the status of the volume as it pertains to its + // publishing on Nodes. + PublishStatus []*PublishStatus `json:",omitempty"` + + // Info is information about the global status of the volume. + Info *Info `json:",omitempty"` +} + +// ClusterVolumeSpec contains the spec used to create this volume. +type ClusterVolumeSpec struct { + // Group defines the volume group of this volume. Volumes belonging to the + // same group can be referred to by group name when creating Services. + // Referring to a volume by group instructs swarm to treat volumes in that + // group interchangeably for the purpose of scheduling. Volumes with an + // empty string for a group technically all belong to the same, emptystring + // group. + Group string `json:",omitempty"` + + // AccessMode defines how the volume is used by tasks. + AccessMode *AccessMode `json:",omitempty"` + + // AccessibilityRequirements specifies where in the cluster a volume must + // be accessible from. + // + // This field must be empty if the plugin does not support + // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the + // plugin does not support it, volume will not be created. + // + // If AccessibilityRequirements is empty, but the plugin does support + // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire + // cluster is a valid target for the volume. + AccessibilityRequirements *TopologyRequirement `json:",omitempty"` + + // CapacityRange defines the desired capacity that the volume should be + // created with. If nil, the plugin will decide the capacity. + CapacityRange *CapacityRange `json:",omitempty"` + + // Secrets defines Swarm Secrets that are passed to the CSI storage plugin + // when operating on this volume. + Secrets []Secret `json:",omitempty"` + + // Availability is the Volume's desired availability. Analogous to Node + // Availability, this allows the user to take volumes offline in order to + // update or delete them. + Availability Availability `json:",omitempty"` +} + +// Availability specifies the availability of the volume. +type Availability string + +const ( + // AvailabilityActive indicates that the volume is active and fully + // schedulable on the cluster. + AvailabilityActive Availability = "active" + + // AvailabilityPause indicates that no new workloads should use the + // volume, but existing workloads can continue to use it. + AvailabilityPause Availability = "pause" + + // AvailabilityDrain indicates that all workloads using this volume + // should be rescheduled, and the volume unpublished from all nodes. + AvailabilityDrain Availability = "drain" +) + +// AccessMode defines the access mode of a volume. +type AccessMode struct { + // Scope defines the set of nodes this volume can be used on at one time. + Scope Scope `json:",omitempty"` + + // Sharing defines the number and way that different tasks can use this + // volume at one time. + Sharing SharingMode `json:",omitempty"` + + // MountVolume defines options for using this volume as a Mount-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + MountVolume *TypeMount `json:",omitempty"` + + // BlockVolume defines options for using this volume as a Block-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + BlockVolume *TypeBlock `json:",omitempty"` +} + +// Scope defines the Scope of a Cluster Volume. This is how many nodes a +// Volume can be accessed simultaneously on. +type Scope string + +const ( + // ScopeSingleNode indicates the volume can be used on one node at a + // time. + ScopeSingleNode Scope = "single" + + // ScopeMultiNode indicates the volume can be used on many nodes at + // the same time. + ScopeMultiNode Scope = "multi" +) + +// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a +// Volume at the same time can use it. +type SharingMode string + +const ( + // SharingNone indicates that only one Task may use the Volume at a + // time. + SharingNone SharingMode = "none" + + // SharingReadOnly indicates that the Volume may be shared by any + // number of Tasks, but they must be read-only. + SharingReadOnly SharingMode = "readonly" + + // SharingOneWriter indicates that the Volume may be shared by any + // number of Tasks, but all after the first must be read-only. + SharingOneWriter SharingMode = "onewriter" + + // SharingAll means that the Volume may be shared by any number of + // Tasks, as readers or writers. + SharingAll SharingMode = "all" +) + +// TypeBlock defines options for using a volume as a block-type volume. +// +// Intentionally empty. +type TypeBlock struct{} + +// TypeMount contains options for using a volume as a Mount-type +// volume. +type TypeMount struct { + // FsType specifies the filesystem type for the mount volume. Optional. + FsType string `json:",omitempty"` + + // MountFlags defines flags to pass when mounting the volume. Optional. + MountFlags []string `json:",omitempty"` +} + +// TopologyRequirement expresses the user's requirements for a volume's +// accessible topology. +type TopologyRequirement struct { + // Requisite specifies a list of Topologies, at least one of which the + // volume must be accessible from. + // + // Taken verbatim from the CSI Spec: + // + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []Topology `json:",omitempty"` + + // Preferred is a list of Topologies that the volume should attempt to be + // provisioned in. + // + // Taken from the CSI spec: + // + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []Topology `json:",omitempty"` +} + +// Topology is a map of topological domains to topological segments. +// +// This description is taken verbatim from the CSI Spec: +// +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `json:",omitempty"` +} + +// CapacityRange describes the minimum and maximum capacity a volume should be +// created with +type CapacityRange struct { + // RequiredBytes specifies that a volume must be at least this big. The + // value of 0 indicates an unspecified minimum. + RequiredBytes int64 + + // LimitBytes specifies that a volume must not be bigger than this. The + // value of 0 indicates an unspecified maximum + LimitBytes int64 +} + +// Secret represents a Swarm Secret value that must be passed to the CSI +// storage plugin when operating on this Volume. It represents one key-value +// pair of possibly many. +type Secret struct { + // Key is the name of the key of the key-value pair passed to the plugin. + Key string + + // Secret is the swarm Secret object from which to read data. This can be a + // Secret name or ID. The Secret data is retrieved by Swarm and used as the + // value of the key-value pair passed to the plugin. + Secret string +} + +// PublishState represents the state of a Volume as it pertains to its +// use on a particular Node. +type PublishState string + +const ( + // StatePending indicates that the volume should be published on + // this node, but the call to ControllerPublishVolume has not been + // successfully completed yet and the result recorded by swarmkit. + StatePending PublishState = "pending-publish" + + // StatePublished means the volume is published successfully to the node. + StatePublished PublishState = "published" + + // StatePendingNodeUnpublish indicates that the Volume should be + // unpublished on the Node, and we're waiting for confirmation that it has + // done so. After the Node has confirmed that the Volume has been + // unpublished, the state will move to StatePendingUnpublish. + StatePendingNodeUnpublish PublishState = "pending-node-unpublish" + + // StatePendingUnpublish means the volume is still published to the node + // by the controller, awaiting the operation to unpublish it. + StatePendingUnpublish PublishState = "pending-controller-unpublish" +) + +// PublishStatus represents the status of the volume as published to an +// individual node +type PublishStatus struct { + // NodeID is the ID of the swarm node this Volume is published to. + NodeID string `json:",omitempty"` + + // State is the publish state of the volume. + State PublishState `json:",omitempty"` + + // PublishContext is the PublishContext returned by the CSI plugin when + // a volume is published. + PublishContext map[string]string `json:",omitempty"` +} + +// Info contains information about the Volume as a whole as provided by +// the CSI storage plugin. +type Info struct { + // CapacityBytes is the capacity of the volume in bytes. A value of 0 + // indicates that the capacity is unknown. + CapacityBytes int64 `json:",omitempty"` + + // VolumeContext is the context originating from the CSI storage plugin + // when the Volume is created. + VolumeContext map[string]string `json:",omitempty"` + + // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This + // is distinct from the Volume's Swarm ID, which is the ID used by all of + // the Docker Engine to refer to the Volume. If this field is blank, then + // the Volume has not been successfully created yet. + VolumeID string `json:",omitempty"` + + // AccessibleTopology is the topology this volume is actually accessible + // from. + AccessibleTopology []Topology `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/volume/create_request.go b/vendor/github.com/moby/moby/api/types/volume/create_request.go new file mode 100644 index 00000000000..3217df82709 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/create_request.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateRequest VolumeConfig +// +// # Volume configuration +// +// swagger:model CreateRequest +type CreateRequest struct { + + // cluster volume spec + ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` + + // Name of the volume driver to use. + // Example: custom + Driver string `json:"Driver,omitempty"` + + // A mapping of driver options and values. These options are + // passed directly to the driver and are driver specific. + // + // Example: {"device":"tmpfs","o":"size=100m,uid=1000","type":"tmpfs"} + DriverOpts map[string]string `json:"DriverOpts,omitempty"` + + // User-defined key/value metadata. + // Example: {"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"} + Labels map[string]string `json:"Labels,omitempty"` + + // The new volume's name. If not specified, Docker generates a name. + // + // Example: tardis + Name string `json:"Name,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/volume/disk_usage.go b/vendor/github.com/moby/moby/api/types/volume/disk_usage.go new file mode 100644 index 00000000000..e2afbac6591 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/disk_usage.go @@ -0,0 +1,36 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// DiskUsage represents system data usage for volume resources. +// +// swagger:model DiskUsage +type DiskUsage struct { + + // Count of active volumes. + // + // Example: 1 + ActiveCount int64 `json:"ActiveCount,omitempty"` + + // List of volumes. + // + Items []Volume `json:"Items,omitempty"` + + // Disk space that can be reclaimed by removing inactive volumes. + // + // Example: 12345678 + Reclaimable int64 `json:"Reclaimable,omitempty"` + + // Count of all volumes. + // + // Example: 4 + TotalCount int64 `json:"TotalCount,omitempty"` + + // Disk space in use by volumes. + // + // Example: 98765432 + TotalSize int64 `json:"TotalSize,omitempty"` +} diff --git a/vendor/github.com/moby/moby/api/types/volume/list_response.go b/vendor/github.com/moby/moby/api/types/volume/list_response.go new file mode 100644 index 00000000000..f257762f09d --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/list_response.go @@ -0,0 +1,22 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ListResponse VolumeListResponse +// +// # Volume list response +// +// swagger:model ListResponse +type ListResponse struct { + + // List of volumes + Volumes []Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes. + // + // Example: [] + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/moby/moby/api/types/volume/prune_report.go b/vendor/github.com/moby/moby/api/types/volume/prune_report.go new file mode 100644 index 00000000000..7f501d01a7c --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/prune_report.go @@ -0,0 +1,8 @@ +package volume + +// PruneReport contains the response for Engine API: +// POST "/volumes/prune" +type PruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} diff --git a/vendor/github.com/moby/moby/api/types/volume/volume.go b/vendor/github.com/moby/moby/api/types/volume/volume.go new file mode 100644 index 00000000000..524ebfb8ac6 --- /dev/null +++ b/vendor/github.com/moby/moby/api/types/volume/volume.go @@ -0,0 +1,87 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// +// swagger:model Volume +type Volume struct { + + // cluster volume + ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` + + // Date/Time the volume was created. + // Example: 2016-06-07T20:31:11.853781916Z + CreatedAt string `json:"CreatedAt,omitempty"` + + // Name of the volume driver used by the volume. + // Example: custom + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Example: {"com.example.some-label":"some-value","com.example.some-other-label":"some-other-value"} + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Example: /var/lib/docker/volumes/tardis + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Example: tardis + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // + // Example: {"device":"tmpfs","o":"size=100m,uid=1000","type":"tmpfs"} + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, + // or `local` for machine level. + // + // Example: local + // Required: true + // Enum: ["local","global"] + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + // Example: {"hello":"world"} + Status map[string]any `json:"Status,omitempty"` + + // usage data + UsageData *UsageData `json:"UsageData,omitempty"` +} + +// UsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// swagger:model UsageData +type UsageData struct { + + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // + // Required: true + RefCount int64 `json:"RefCount"` + + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/go.mongodb.org/mongo-driver/LICENSE b/vendor/github.com/moby/moby/client/LICENSE similarity index 99% rename from vendor/go.mongodb.org/mongo-driver/LICENSE rename to vendor/github.com/moby/moby/client/LICENSE index 261eeb9e9f8..d6456956733 100644 --- a/vendor/go.mongodb.org/mongo-driver/LICENSE +++ b/vendor/github.com/moby/moby/client/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/github.com/moby/moby/client/README.md b/vendor/github.com/moby/moby/client/README.md new file mode 100644 index 00000000000..115e604dbc6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/README.md @@ -0,0 +1,52 @@ +# Go client for the Docker Engine API + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/moby/moby/client)](https://pkg.go.dev/github.com/moby/moby/client) +![GitHub License](https://img.shields.io/github/license/moby/moby) +[![Go Report Card](https://goreportcard.com/badge/github.com/moby/moby/client)](https://goreportcard.com/report/github.com/moby/moby/client) +[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/moby/moby/badge)](https://scorecard.dev/viewer/?uri=github.com/moby/moby) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/10989/badge)](https://www.bestpractices.dev/projects/10989) + +The `docker` command uses this package to communicate with the daemon. It can +also be used by your own Go applications to do anything the command-line +interface does; running containers, pulling or pushing images, etc. + +For example, to list all containers (the equivalent of `docker ps --all`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/moby/moby/client" +) + +func main() { + // Create a new client that handles common environment variables + // for configuration (DOCKER_HOST, DOCKER_API_VERSION), and does + // API-version negotiation to allow downgrading the API version + // when connecting with an older daemon version. + apiClient, err := client.New(client.FromEnv) + if err != nil { + panic(err) + } + defer apiClient.Close() + + // List all containers (both stopped and running). + result, err := apiClient.ContainerList(context.Background(), client.ContainerListOptions{ + All: true, + }) + if err != nil { + panic(err) + } + + // Print each container's ID, status and the image it was created from. + fmt.Printf("%s %-22s %s\n", "ID", "STATUS", "IMAGE") + for _, ctr := range result.Items { + fmt.Printf("%s %-22s %s\n", ctr.ID, ctr.Status, ctr.Image) + } +} +``` + +[Full documentation is available on pkg.go.dev.](https://pkg.go.dev/github.com/moby/moby/client) diff --git a/vendor/github.com/moby/moby/client/auth.go b/vendor/github.com/moby/moby/client/auth.go new file mode 100644 index 00000000000..8baf39d2cfa --- /dev/null +++ b/vendor/github.com/moby/moby/client/auth.go @@ -0,0 +1,14 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/registry" +) + +// staticAuth creates a privilegeFn from the given registryAuth. +func staticAuth(registryAuth string) registry.RequestAuthConfig { + return func(ctx context.Context) (string, error) { + return registryAuth, nil + } +} diff --git a/vendor/github.com/moby/moby/client/build_cancel.go b/vendor/github.com/moby/moby/client/build_cancel.go new file mode 100644 index 00000000000..f6cfc6bc915 --- /dev/null +++ b/vendor/github.com/moby/moby/client/build_cancel.go @@ -0,0 +1,21 @@ +package client + +import ( + "context" + "net/url" +) + +type BuildCancelOptions struct{} + +type BuildCancelResult struct{} + +// BuildCancel requests the daemon to cancel the ongoing build request +// with the given id. +func (cli *Client) BuildCancel(ctx context.Context, id string, _ BuildCancelOptions) (BuildCancelResult, error) { + query := url.Values{} + query.Set("id", id) + + resp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + defer ensureReaderClosed(resp) + return BuildCancelResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/build_prune.go b/vendor/github.com/moby/moby/client/build_prune.go new file mode 100644 index 00000000000..a22e9685e2f --- /dev/null +++ b/vendor/github.com/moby/moby/client/build_prune.go @@ -0,0 +1,67 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "strconv" + + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/client/pkg/versions" +) + +// BuildCachePruneOptions hold parameters to prune the build cache. +type BuildCachePruneOptions struct { + All bool + ReservedSpace int64 + MaxUsedSpace int64 + MinFreeSpace int64 + Filters Filters +} + +// BuildCachePruneResult holds the result from the BuildCachePrune method. +type BuildCachePruneResult struct { + Report build.CachePruneReport +} + +// BuildCachePrune requests the daemon to delete unused cache data. +func (cli *Client) BuildCachePrune(ctx context.Context, opts BuildCachePruneOptions) (BuildCachePruneResult, error) { + var out BuildCachePruneResult + query := url.Values{} + if opts.All { + query.Set("all", "1") + } + + if opts.ReservedSpace != 0 { + // Prior to API v1.48, 'keep-storage' was used to set the reserved space for the build cache. + // TODO(austinvazquez): remove once API v1.47 is no longer supported. See https://github.com/moby/moby/issues/50902 + if versions.LessThanOrEqualTo(cli.version, "1.47") { + query.Set("keep-storage", strconv.Itoa(int(opts.ReservedSpace))) + } else { + query.Set("reserved-space", strconv.Itoa(int(opts.ReservedSpace))) + } + } + if opts.MaxUsedSpace != 0 { + query.Set("max-used-space", strconv.Itoa(int(opts.MaxUsedSpace))) + } + if opts.MinFreeSpace != 0 { + query.Set("min-free-space", strconv.Itoa(int(opts.MinFreeSpace))) + } + opts.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(resp) + + if err != nil { + return BuildCachePruneResult{}, err + } + + report := build.CachePruneReport{} + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return BuildCachePruneResult{}, fmt.Errorf("error retrieving disk usage: %w", err) + } + + out.Report = report + return out, nil +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_create.go b/vendor/github.com/moby/moby/client/checkpoint_create.go new file mode 100644 index 00000000000..b3ba5459d00 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_create.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/checkpoint" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container. +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointCreateResult holds the result from [client.CheckpointCreate]. +type CheckpointCreateResult struct { + // Add future fields here +} + +// CheckpointCreate creates a checkpoint from the given container. +func (cli *Client) CheckpointCreate(ctx context.Context, containerID string, options CheckpointCreateOptions) (CheckpointCreateResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return CheckpointCreateResult{}, err + } + requestBody := checkpoint.CreateRequest{ + CheckpointID: options.CheckpointID, + CheckpointDir: options.CheckpointDir, + Exit: options.Exit, + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/checkpoints", nil, requestBody, nil) + defer ensureReaderClosed(resp) + return CheckpointCreateResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_list.go b/vendor/github.com/moby/moby/client/checkpoint_list.go new file mode 100644 index 00000000000..5815f836a16 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/checkpoint" +) + +// CheckpointListOptions holds parameters to list checkpoints for a container. +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointListResult holds the result from the CheckpointList method. +type CheckpointListResult struct { + Items []checkpoint.Summary +} + +// CheckpointList returns the checkpoints of the given container in the docker host. +func (cli *Client) CheckpointList(ctx context.Context, container string, options CheckpointListOptions) (CheckpointListResult, error) { + var out CheckpointListResult + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return out, err + } + + err = json.NewDecoder(resp.Body).Decode(&out.Items) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/checkpoint_remove.go b/vendor/github.com/moby/moby/client/checkpoint_remove.go new file mode 100644 index 00000000000..8042c508822 --- /dev/null +++ b/vendor/github.com/moby/moby/client/checkpoint_remove.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "net/url" +) + +// CheckpointRemoveOptions holds parameters to delete a checkpoint from a container. +type CheckpointRemoveOptions struct { + CheckpointID string + CheckpointDir string +} + +// CheckpointRemoveResult represents the result of [Client.CheckpointRemove]. +type CheckpointRemoveResult struct { + // No fields currently; placeholder for future use. +} + +// CheckpointRemove deletes the checkpoint with the given name from the given container. +func (cli *Client) CheckpointRemove(ctx context.Context, containerID string, options CheckpointRemoveOptions) (CheckpointRemoveResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return CheckpointRemoveResult{}, err + } + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + defer ensureReaderClosed(resp) + return CheckpointRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/client.go b/vendor/github.com/moby/moby/client/client.go new file mode 100644 index 00000000000..2d1e0db79a8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client.go @@ -0,0 +1,433 @@ +/* +Package client is a Go client for the Docker Engine API. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/reference/api/engine/ + +# Usage + +You use the library by constructing a client object using [New] +and calling methods on it. The client can be configured from environment +variables by passing the [FromEnv] option. Other options can be configured +manually by passing any of the available [Opt] options. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + "log" + + "github.com/moby/moby/client" + ) + + func main() { + // Create a new client that handles common environment variables + // for configuration (DOCKER_HOST, DOCKER_API_VERSION), and does + // API-version negotiation to allow downgrading the API version + // when connecting with an older daemon version. + apiClient, err := client.New(client.FromEnv) + if err != nil { + log.Fatal(err) + } + + // List all containers (both stopped and running). + result, err := apiClient.ContainerList(context.Background(), client.ContainerListOptions{ + All: true, + }) + if err != nil { + log.Fatal(err) + } + + // Print each container's ID, status and the image it was created from. + fmt.Printf("%s %-22s %s\n", "ID", "STATUS", "IMAGE") + for _, ctr := range result.Items { + fmt.Printf("%s %-22s %s\n", ctr.ID, ctr.Status, ctr.Image) + } + } +*/ +package client + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "path" + "slices" + "strings" + "sync" + "sync/atomic" + "time" + + cerrdefs "github.com/containerd/errdefs" + "github.com/docker/go-connections/sockets" + "github.com/moby/moby/client/pkg/versions" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +// DummyHost is a hostname used for local communication. +// +// It acts as a valid formatted hostname for local connections (such as "unix://" +// or "npipe://") which do not require a hostname. It should never be resolved, +// but uses the special-purpose ".localhost" TLD (as defined in [RFC 2606, Section 2] +// and [RFC 6761, Section 6.3]). +// +// [RFC 7230, Section 5.4] defines that an empty header must be used for such +// cases: +// +// If the authority component is missing or undefined for the target URI, +// then a client MUST send a Host header field with an empty field-value. +// +// However, [Go stdlib] enforces the semantics of HTTP(S) over TCP, does not +// allow an empty header to be used, and requires req.URL.Scheme to be either +// "http" or "https". +// +// For further details, refer to: +// +// - https://github.com/docker/engine-api/issues/189 +// - https://github.com/golang/go/issues/13624 +// - https://github.com/golang/go/issues/61076 +// - https://github.com/moby/moby/issues/45935 +// +// [RFC 2606, Section 2]: https://www.rfc-editor.org/rfc/rfc2606.html#section-2 +// [RFC 6761, Section 6.3]: https://www.rfc-editor.org/rfc/rfc6761#section-6.3 +// [RFC 7230, Section 5.4]: https://datatracker.ietf.org/doc/html/rfc7230#section-5.4 +// [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569 +const DummyHost = "api.moby.localhost" + +// MaxAPIVersion is the highest REST API version supported by the client. +// If API-version negotiation is enabled, the client may downgrade its API version. +// Similarly, the [WithAPIVersion] and [WithAPIVersionFromEnv] options allow +// overriding the version and disable API-version negotiation. +// +// This version may be lower than the version of the api library module used. +const MaxAPIVersion = "1.54" + +// MinAPIVersion is the minimum API version supported by the client. API versions +// below this version are not considered when performing API-version negotiation. +const MinAPIVersion = "1.40" + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + clientConfig + + // negotiated indicates that API version negotiation took place + negotiated atomic.Bool + + // negotiateLock is used to single-flight the version negotiation process + negotiateLock sync.Mutex + + // When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections). + // Store the original transport as the http.Client transport will be wrapped with tracing libs. + baseTransport *http.Transport +} + +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// CheckRedirect specifies the policy for dealing with redirect responses. It +// can be set on [http.Client.CheckRedirect] to prevent HTTP redirects for +// non-GET requests. It returns an [ErrRedirect] for non-GET request, otherwise +// returns a [http.ErrUseLastResponse], which is special-cased by http.Client +// to use the last response. +// +// Go 1.8 changed behavior for HTTP redirects (specifically 301, 307, and 308) +// in the client. The client (and by extension API client) can be made to send +// a request like "POST /containers//start" where what would normally be in the +// name section of the URL is empty. This triggers an HTTP 301 from the daemon. +// +// In go 1.8 this 301 is converted to a GET request, and ends up getting +// a 404 from the daemon. This behavior change manifests in the client in that +// before, the 301 was not followed and the client did not generate an error, +// but now results in a message like "Error response from daemon: page not found". +func CheckRedirect(_ *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + +// NewClientWithOpts initializes a new API client. +// +// Deprecated: use New. This function will be removed in the next release. +func NewClientWithOpts(ops ...Opt) (*Client, error) { + return New(ops...) +} + +// New initializes a new API client with a default HTTPClient, and +// default API host and version. It also initializes the custom HTTP headers to +// add to each request. +// +// It takes an optional list of [Opt] functional arguments, which are applied in +// the order they're provided, which allows modifying the defaults when creating +// the client. For example, the following initializes a client that configures +// itself with values from environment variables ([FromEnv]). +// +// By default, the client automatically negotiates the API version to use when +// making requests. API version negotiation is performed on the first request; +// subsequent requests do not re-negotiate. Use [WithAPIVersion] or +// [WithAPIVersionFromEnv] to configure the client with a fixed API version +// and disable API version negotiation. +// +// cli, err := client.New(client.FromEnv) +func New(ops ...Opt) (*Client, error) { + hostURL, err := ParseHostURL(DefaultDockerHost) + if err != nil { + return nil, err + } + + client, err := defaultHTTPClient(hostURL) + if err != nil { + return nil, err + } + c := &Client{ + clientConfig: clientConfig{ + host: DefaultDockerHost, + version: MaxAPIVersion, + client: client, + proto: hostURL.Scheme, + addr: hostURL.Host, + traceOpts: []otelhttp.Option{ + otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { + return req.Method + " " + req.URL.Path + }), + }, + }, + } + cfg := &c.clientConfig + + for _, op := range ops { + if err := op(cfg); err != nil { + return nil, err + } + } + + if cfg.envAPIVersion != "" { + c.setAPIVersion(cfg.envAPIVersion) + } else if cfg.manualAPIVersion != "" { + c.setAPIVersion(cfg.manualAPIVersion) + } + + if tr, ok := c.client.Transport.(*http.Transport); ok { + // Store the base transport before we wrap it in tracing libs below + // This is used, as an example, to close idle connections when the client is closed + c.baseTransport = tr + } + + if c.scheme == "" { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + if c.tlsConfig() != nil { + c.scheme = "https" + } else { + c.scheme = "http" + } + } + + c.client.Transport = otelhttp.NewTransport(c.client.Transport, c.traceOpts...) + + if len(cfg.responseHooks) > 0 { + c.client.Transport = &responseHookTransport{ + base: c.client.Transport, + hooks: slices.Clone(cfg.responseHooks), + } + } + + return c, nil +} + +func (cli *Client) tlsConfig() *tls.Config { + if cli.baseTransport == nil { + return nil + } + return cli.baseTransport.TLSClientConfig +} + +func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { + transport := &http.Transport{} + // Necessary to prevent long-lived processes using the + // client from leaking connections due to idle connections + // not being released. + // TODO: see if we can also address this from the server side, + // or in go-connections. + // see: https://github.com/moby/moby/issues/45539 + transport.MaxIdleConns = 6 + transport.IdleConnTimeout = 30 * time.Second + err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) + if err != nil { + return nil, err + } + return &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, + }, nil +} + +// Close the transport used by the client +func (cli *Client) Close() error { + if cli.baseTransport != nil { + cli.baseTransport.CloseIdleConnections() + return nil + } + return nil +} + +// checkVersion manually triggers API version negotiation (if configured). +// This allows for version-dependent code to use the same version as will +// be negotiated when making the actual requests, and for which cases +// we cannot do the negotiation lazily. +func (cli *Client) checkVersion(ctx context.Context) error { + if cli.negotiated.Load() { + return nil + } + _, err := cli.Ping(ctx, PingOptions{ + NegotiateAPIVersion: true, + }) + return err +} + +// getAPIPath returns the versioned request path to call the API. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { + var apiPath string + _ = cli.checkVersion(ctx) + if cli.version != "" { + apiPath = path.Join(cli.basePath, "/v"+strings.TrimPrefix(cli.version, "v"), p) + } else { + apiPath = path.Join(cli.basePath, p) + } + return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() +} + +// ClientVersion returns the API version used by this client. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// negotiateAPIVersion updates the version to match the API version from +// the ping response. +// +// It returns an error if version is invalid, or lower than the minimum +// supported API version in which case the client's API version is not +// updated, and negotiation is not marked as completed. +func (cli *Client) negotiateAPIVersion(pingVersion string) error { + var err error + pingVersion, err = parseAPIVersion(pingVersion) + if err != nil { + return err + } + + if versions.LessThan(pingVersion, MinAPIVersion) { + return cerrdefs.ErrInvalidArgument.WithMessage(fmt.Sprintf("API version %s is not supported by this client: the minimum supported API version is %s", pingVersion, MinAPIVersion)) + } + + // if the client is not initialized with a version, start with the latest supported version + negotiatedVersion := cli.version + if negotiatedVersion == "" { + negotiatedVersion = MaxAPIVersion + } + + // if server version is lower than the client version, downgrade + if versions.LessThan(pingVersion, negotiatedVersion) { + negotiatedVersion = pingVersion + } + + // Store the results, so that automatic API version negotiation (if enabled) + // won't be performed on the next request. + cli.setAPIVersion(negotiatedVersion) + return nil +} + +// setAPIVersion sets the client's API version and marks API version negotiation +// as completed, so that automatic API version negotiation (if enabled) won't +// be performed on the next request. +func (cli *Client) setAPIVersion(version string) { + cli.version = version + cli.negotiated.Store(true) +} + +// DaemonHost returns the host address used by the client +func (cli *Client) DaemonHost() string { + return cli.host +} + +// ParseHostURL parses a url string, validates the string is a host url, and +// returns the parsed URL +func ParseHostURL(host string) (*url.URL, error) { + proto, addr, ok := strings.Cut(host, "://") + if !ok || addr == "" { + return nil, fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return nil, err + } + addr = parsed.Host + basePath = parsed.Path + } + return &url.URL{ + Scheme: proto, + Host: addr, + Path: basePath, + }, nil +} + +func (cli *Client) dialerFromTransport() func(context.Context, string, string) (net.Conn, error) { + if cli.baseTransport == nil || cli.baseTransport.DialContext == nil { + return nil + } + + if cli.baseTransport.TLSClientConfig != nil { + // When using a tls config we don't use the configured dialer but instead a fallback dialer... + // Note: It seems like this should use the normal dialer and wrap the returned net.Conn in a tls.Conn + // I honestly don't know why it doesn't do that, but it doesn't and such a change is entirely unrelated to the change in this commit. + return nil + } + return cli.baseTransport.DialContext +} + +// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, +// that can be used for proxying the daemon connection. It is used by +// ["docker dial-stdio"]. +// +// ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014 +func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return cli.dialer() +} + +func (cli *Client) dialer() func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + if dialFn := cli.dialerFromTransport(); dialFn != nil { + return dialFn(ctx, cli.proto, cli.addr) + } + switch cli.proto { + case "unix": + return net.Dial(cli.proto, cli.addr) + case "npipe": + ctx, cancel := context.WithTimeout(ctx, 32*time.Second) + defer cancel() + return dialPipeContext(ctx, cli.addr) + default: + if tlsConfig := cli.tlsConfig(); tlsConfig != nil { + return tls.Dial(cli.proto, cli.addr, tlsConfig) + } + return net.Dial(cli.proto, cli.addr) + } + } +} diff --git a/vendor/github.com/moby/moby/client/client_interfaces.go b/vendor/github.com/moby/moby/client/client_interfaces.go new file mode 100644 index 00000000000..4bbd45a6e59 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_interfaces.go @@ -0,0 +1,242 @@ +package client + +import ( + "context" + "io" + "net" +) + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + stableAPIClient + CheckpointAPIClient // CheckpointAPIClient is still experimental. +} + +type stableAPIClient interface { + ConfigAPIClient + ContainerAPIClient + DistributionAPIClient + RegistrySearchClient + ExecAPIClient + ImageBuildAPIClient + ImageAPIClient + NetworkAPIClient + PluginAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + DaemonHost() string + ServerVersion(ctx context.Context, options ServerVersionOptions) (ServerVersionResult, error) + HijackDialer + Dialer() func(context.Context) (net.Conn, error) + Close() error + SwarmManagementAPIClient +} + +// SwarmManagementAPIClient defines all methods for managing Swarm-specific +// objects. +type SwarmManagementAPIClient interface { + SwarmAPIClient + NodeAPIClient + ServiceAPIClient + TaskAPIClient + SecretAPIClient + ConfigAPIClient +} + +// HijackDialer defines methods for a hijack dialer. +type HijackDialer interface { + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) +} + +// CheckpointAPIClient defines API client methods for the checkpoints. +// +// Experimental: checkpoint and restore is still an experimental feature, +// and only available if the daemon is running with experimental features +// enabled. +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options CheckpointCreateOptions) (CheckpointCreateResult, error) + CheckpointRemove(ctx context.Context, container string, options CheckpointRemoveOptions) (CheckpointRemoveResult, error) + CheckpointList(ctx context.Context, container string, options CheckpointListOptions) (CheckpointListResult, error) +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerCreate(ctx context.Context, options ContainerCreateOptions) (ContainerCreateResult, error) + ContainerInspect(ctx context.Context, container string, options ContainerInspectOptions) (ContainerInspectResult, error) + ContainerList(ctx context.Context, options ContainerListOptions) (ContainerListResult, error) + ContainerUpdate(ctx context.Context, container string, updateConfig ContainerUpdateOptions) (ContainerUpdateResult, error) + ContainerRemove(ctx context.Context, container string, options ContainerRemoveOptions) (ContainerRemoveResult, error) + ContainerPrune(ctx context.Context, opts ContainerPruneOptions) (ContainerPruneResult, error) + + ContainerLogs(ctx context.Context, container string, options ContainerLogsOptions) (ContainerLogsResult, error) + + ContainerStart(ctx context.Context, container string, options ContainerStartOptions) (ContainerStartResult, error) + ContainerStop(ctx context.Context, container string, options ContainerStopOptions) (ContainerStopResult, error) + ContainerRestart(ctx context.Context, container string, options ContainerRestartOptions) (ContainerRestartResult, error) + ContainerPause(ctx context.Context, container string, options ContainerPauseOptions) (ContainerPauseResult, error) + ContainerUnpause(ctx context.Context, container string, options ContainerUnpauseOptions) (ContainerUnpauseResult, error) + ContainerWait(ctx context.Context, container string, options ContainerWaitOptions) ContainerWaitResult + ContainerKill(ctx context.Context, container string, options ContainerKillOptions) (ContainerKillResult, error) + + ContainerRename(ctx context.Context, container string, options ContainerRenameOptions) (ContainerRenameResult, error) + ContainerResize(ctx context.Context, container string, options ContainerResizeOptions) (ContainerResizeResult, error) + ContainerAttach(ctx context.Context, container string, options ContainerAttachOptions) (ContainerAttachResult, error) + ContainerCommit(ctx context.Context, container string, options ContainerCommitOptions) (ContainerCommitResult, error) + ContainerDiff(ctx context.Context, container string, options ContainerDiffOptions) (ContainerDiffResult, error) + ContainerExport(ctx context.Context, container string, options ContainerExportOptions) (ContainerExportResult, error) + + ContainerStats(ctx context.Context, container string, options ContainerStatsOptions) (ContainerStatsResult, error) + ContainerTop(ctx context.Context, container string, options ContainerTopOptions) (ContainerTopResult, error) + + ContainerStatPath(ctx context.Context, container string, options ContainerStatPathOptions) (ContainerStatPathResult, error) + CopyFromContainer(ctx context.Context, container string, options CopyFromContainerOptions) (CopyFromContainerResult, error) + CopyToContainer(ctx context.Context, container string, options CopyToContainerOptions) (CopyToContainerResult, error) +} + +type ExecAPIClient interface { + ExecCreate(ctx context.Context, container string, options ExecCreateOptions) (ExecCreateResult, error) + ExecInspect(ctx context.Context, execID string, options ExecInspectOptions) (ExecInspectResult, error) + ExecResize(ctx context.Context, execID string, options ExecResizeOptions) (ExecResizeResult, error) + + ExecStart(ctx context.Context, execID string, options ExecStartOptions) (ExecStartResult, error) + ExecAttach(ctx context.Context, execID string, options ExecAttachOptions) (ExecAttachResult, error) +} + +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image string, options DistributionInspectOptions) (DistributionInspectResult, error) +} + +type RegistrySearchClient interface { + ImageSearch(ctx context.Context, term string, options ImageSearchOptions) (ImageSearchResult, error) +} + +// ImageBuildAPIClient defines API client methods for building images +// using the REST API. +type ImageBuildAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options ImageBuildOptions) (ImageBuildResult, error) + BuildCachePrune(ctx context.Context, opts BuildCachePruneOptions) (BuildCachePruneResult, error) + BuildCancel(ctx context.Context, id string, opts BuildCancelOptions) (BuildCancelResult, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageImport(ctx context.Context, source ImageImportSource, ref string, options ImageImportOptions) (ImageImportResult, error) + + ImageList(ctx context.Context, options ImageListOptions) (ImageListResult, error) + ImagePull(ctx context.Context, ref string, options ImagePullOptions) (ImagePullResponse, error) + ImagePush(ctx context.Context, ref string, options ImagePushOptions) (ImagePushResponse, error) + ImageRemove(ctx context.Context, image string, options ImageRemoveOptions) (ImageRemoveResult, error) + ImageTag(ctx context.Context, options ImageTagOptions) (ImageTagResult, error) + ImagePrune(ctx context.Context, opts ImagePruneOptions) (ImagePruneResult, error) + + ImageInspect(ctx context.Context, image string, _ ...ImageInspectOption) (ImageInspectResult, error) + ImageHistory(ctx context.Context, image string, _ ...ImageHistoryOption) (ImageHistoryResult, error) + + ImageLoad(ctx context.Context, input io.Reader, _ ...ImageLoadOption) (ImageLoadResult, error) + ImageSave(ctx context.Context, images []string, _ ...ImageSaveOption) (ImageSaveResult, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkCreate(ctx context.Context, name string, options NetworkCreateOptions) (NetworkCreateResult, error) + NetworkInspect(ctx context.Context, network string, options NetworkInspectOptions) (NetworkInspectResult, error) + NetworkList(ctx context.Context, options NetworkListOptions) (NetworkListResult, error) + NetworkRemove(ctx context.Context, network string, options NetworkRemoveOptions) (NetworkRemoveResult, error) + NetworkPrune(ctx context.Context, opts NetworkPruneOptions) (NetworkPruneResult, error) + + NetworkConnect(ctx context.Context, network string, options NetworkConnectOptions) (NetworkConnectResult, error) + NetworkDisconnect(ctx context.Context, network string, options NetworkDisconnectOptions) (NetworkDisconnectResult, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspect(ctx context.Context, nodeID string, options NodeInspectOptions) (NodeInspectResult, error) + NodeList(ctx context.Context, options NodeListOptions) (NodeListResult, error) + NodeUpdate(ctx context.Context, nodeID string, options NodeUpdateOptions) (NodeUpdateResult, error) + NodeRemove(ctx context.Context, nodeID string, options NodeRemoveOptions) (NodeRemoveResult, error) +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginCreate(ctx context.Context, createContext io.Reader, options PluginCreateOptions) (PluginCreateResult, error) + PluginInstall(ctx context.Context, name string, options PluginInstallOptions) (PluginInstallResult, error) + PluginInspect(ctx context.Context, name string, options PluginInspectOptions) (PluginInspectResult, error) + PluginList(ctx context.Context, options PluginListOptions) (PluginListResult, error) + PluginRemove(ctx context.Context, name string, options PluginRemoveOptions) (PluginRemoveResult, error) + + PluginEnable(ctx context.Context, name string, options PluginEnableOptions) (PluginEnableResult, error) + PluginDisable(ctx context.Context, name string, options PluginDisableOptions) (PluginDisableResult, error) + PluginUpgrade(ctx context.Context, name string, options PluginUpgradeOptions) (PluginUpgradeResult, error) + PluginPush(ctx context.Context, name string, options PluginPushOptions) (PluginPushResult, error) + PluginSet(ctx context.Context, name string, options PluginSetOptions) (PluginSetResult, error) +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, options ServiceCreateOptions) (ServiceCreateResult, error) + ServiceInspect(ctx context.Context, serviceID string, options ServiceInspectOptions) (ServiceInspectResult, error) + ServiceList(ctx context.Context, options ServiceListOptions) (ServiceListResult, error) + ServiceUpdate(ctx context.Context, serviceID string, options ServiceUpdateOptions) (ServiceUpdateResult, error) + ServiceRemove(ctx context.Context, serviceID string, options ServiceRemoveOptions) (ServiceRemoveResult, error) + + ServiceLogs(ctx context.Context, serviceID string, options ServiceLogsOptions) (ServiceLogsResult, error) +} + +// TaskAPIClient defines API client methods to manage swarm tasks. +type TaskAPIClient interface { + TaskInspect(ctx context.Context, taskID string, options TaskInspectOptions) (TaskInspectResult, error) + TaskList(ctx context.Context, options TaskListOptions) (TaskListResult, error) + + TaskLogs(ctx context.Context, taskID string, options TaskLogsOptions) (TaskLogsResult, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, options SwarmInitOptions) (SwarmInitResult, error) + SwarmJoin(ctx context.Context, options SwarmJoinOptions) (SwarmJoinResult, error) + SwarmInspect(ctx context.Context, options SwarmInspectOptions) (SwarmInspectResult, error) + SwarmUpdate(ctx context.Context, options SwarmUpdateOptions) (SwarmUpdateResult, error) + SwarmLeave(ctx context.Context, options SwarmLeaveOptions) (SwarmLeaveResult, error) + + SwarmGetUnlockKey(ctx context.Context) (SwarmGetUnlockKeyResult, error) + SwarmUnlock(ctx context.Context, options SwarmUnlockOptions) (SwarmUnlockResult, error) +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options EventsListOptions) EventsResult + Info(ctx context.Context, options InfoOptions) (SystemInfoResult, error) + RegistryLogin(ctx context.Context, auth RegistryLoginOptions) (RegistryLoginResult, error) + DiskUsage(ctx context.Context, options DiskUsageOptions) (DiskUsageResult, error) + Ping(ctx context.Context, options PingOptions) (PingResult, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options VolumeCreateOptions) (VolumeCreateResult, error) + VolumeInspect(ctx context.Context, volumeID string, options VolumeInspectOptions) (VolumeInspectResult, error) + VolumeList(ctx context.Context, options VolumeListOptions) (VolumeListResult, error) + VolumeUpdate(ctx context.Context, volumeID string, options VolumeUpdateOptions) (VolumeUpdateResult, error) + VolumeRemove(ctx context.Context, volumeID string, options VolumeRemoveOptions) (VolumeRemoveResult, error) + VolumePrune(ctx context.Context, options VolumePruneOptions) (VolumePruneResult, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretCreate(ctx context.Context, options SecretCreateOptions) (SecretCreateResult, error) + SecretInspect(ctx context.Context, id string, options SecretInspectOptions) (SecretInspectResult, error) + SecretList(ctx context.Context, options SecretListOptions) (SecretListResult, error) + SecretUpdate(ctx context.Context, id string, options SecretUpdateOptions) (SecretUpdateResult, error) + SecretRemove(ctx context.Context, id string, options SecretRemoveOptions) (SecretRemoveResult, error) +} + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigCreate(ctx context.Context, options ConfigCreateOptions) (ConfigCreateResult, error) + ConfigInspect(ctx context.Context, id string, options ConfigInspectOptions) (ConfigInspectResult, error) + ConfigList(ctx context.Context, options ConfigListOptions) (ConfigListResult, error) + ConfigUpdate(ctx context.Context, id string, options ConfigUpdateOptions) (ConfigUpdateResult, error) + ConfigRemove(ctx context.Context, id string, options ConfigRemoveOptions) (ConfigRemoveResult, error) +} diff --git a/vendor/github.com/moby/moby/client/client_options.go b/vendor/github.com/moby/moby/client/client_options.go new file mode 100644 index 00000000000..d92a16a455f --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_options.go @@ -0,0 +1,375 @@ +package client + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel/trace" +) + +type clientConfig struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // userAgent is the User-Agent header to use for HTTP requests. It takes + // precedence over User-Agent headers set in customHTTPHeaders, and other + // header variables. When set to an empty string, the User-Agent header + // is removed, and no header is sent. + userAgent *string + // custom HTTP headers configured by users. + customHTTPHeaders map[string]string + + // manualAPIVersion contains the API version set by users. This field + // will only be non-empty if a valid-formed version was set through + // [WithAPIVersion]. + // + // If both manualAPIVersion and envAPIVersion are set, manualAPIVersion + // takes precedence. Either field disables API-version negotiation. + manualAPIVersion string + + // envAPIVersion contains the API version set by users. This field + // will only be non-empty if a valid-formed version was set through + // [WithAPIVersionFromEnv]. + // + // If both manualAPIVersion and envAPIVersion are set, manualAPIVersion + // takes precedence. Either field disables API-version negotiation. + envAPIVersion string + + // responseHooks is a list of custom response hooks to call on responses. + responseHooks []ResponseHook + + // traceOpts is a list of options to configure the tracing span. + traceOpts []otelhttp.Option +} + +// ResponseHook is called for each HTTP response returned by the daemon. +// Hooks are invoked in the order they were added. +// +// Hooks must not read or close resp.Body. +type ResponseHook func(*http.Response) + +// Opt is a configuration option to initialize a [Client]. +type Opt func(*clientConfig) error + +// FromEnv configures the client with values from environment variables. It +// is the equivalent of using the [WithTLSClientConfigFromEnv], [WithHostFromEnv], +// and [WithAPIVersionFromEnv] options. +// +// FromEnv uses the following environment variables: +// +// - DOCKER_HOST ([EnvOverrideHost]) to set the URL to the docker server. +// - DOCKER_API_VERSION ([EnvOverrideAPIVersion]) to set the version of the +// API to use, leave empty for latest. +// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from +// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem'). +// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification +// (off by default). +func FromEnv(c *clientConfig) error { + ops := []Opt{ + WithTLSClientConfigFromEnv(), + WithHostFromEnv(), + WithAPIVersionFromEnv(), + } + for _, op := range ops { + if err := op(c); err != nil { + return err + } + } + return nil +} + +// WithDialContext applies the dialer to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. It returns +// an error if the client does not have a [http.Transport] configured. +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { + return func(c *clientConfig) error { + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.DialContext = dialContext + return nil + } + return fmt.Errorf("cannot apply dialer to transport: %T", c.client.Transport) + } +} + +// WithHost overrides the client host with the specified one. +func WithHost(host string) Opt { + return func(c *clientConfig) error { + hostURL, err := ParseHostURL(host) + if err != nil { + return err + } + c.host = host + c.proto = hostURL.Scheme + c.addr = hostURL.Host + c.basePath = hostURL.Path + if transport, ok := c.client.Transport.(*http.Transport); ok { + return sockets.ConfigureTransport(transport, c.proto, c.addr) + } + // For test transports, we skip transport configuration but still + // set the host fields so that the client can use them for headers + if _, ok := c.client.Transport.(testRoundTripper); ok { + return nil + } + return fmt.Errorf("cannot apply host to transport: %T", c.client.Transport) + } +} + +// testRoundTripper allows us to inject a mock-transport for testing. We define it +// here so we can detect the tlsconfig and return nil for only this type. +type testRoundTripper func(*http.Request) (*http.Response, error) + +func (tf testRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return tf(req) +} + +// WithHostFromEnv overrides the client host with the host specified in the +// DOCKER_HOST ([EnvOverrideHost]) environment variable. If DOCKER_HOST is not set, +// or set to an empty value, the host is not modified. +func WithHostFromEnv() Opt { + return func(c *clientConfig) error { + if host := os.Getenv(EnvOverrideHost); host != "" { + return WithHost(host)(c) + } + return nil + } +} + +// WithHTTPClient overrides the client's HTTP client with the specified one. +func WithHTTPClient(client *http.Client) Opt { + return func(c *clientConfig) error { + if client != nil { + // Make a clone of client so modifications do not affect + // the caller's client. Clone here instead of in New() + // as other options (WithHost) also mutate c.client. + // Cloned clients share the same CookieJar as the + // original. + hc := *client + if ht, ok := hc.Transport.(*http.Transport); ok { + hc.Transport = ht.Clone() + } + c.client = &hc + } + return nil + } +} + +// WithTimeout configures the time limit for requests made by the HTTP client. +func WithTimeout(timeout time.Duration) Opt { + return func(c *clientConfig) error { + c.client.Timeout = timeout + return nil + } +} + +// WithUserAgent configures the User-Agent header to use for HTTP requests. +// It overrides any User-Agent set in headers. When set to an empty string, +// the User-Agent header is removed, and no header is sent. +func WithUserAgent(ua string) Opt { + return func(c *clientConfig) error { + c.userAgent = &ua + return nil + } +} + +// WithHTTPHeaders appends custom HTTP headers to the client's default headers. +// It does not allow for built-in headers (such as "User-Agent", if set) to +// be overridden. Also see [WithUserAgent]. +func WithHTTPHeaders(headers map[string]string) Opt { + return func(c *clientConfig) error { + c.customHTTPHeaders = headers + return nil + } +} + +// WithScheme overrides the client scheme with the specified one. +func WithScheme(scheme string) Opt { + return func(c *clientConfig) error { + c.scheme = scheme + return nil + } +} + +// WithTLSClientConfig applies a TLS config to the client transport. +func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { + return func(c *clientConfig) error { + transport, ok := c.client.Transport.(*http.Transport) + if !ok { + return fmt.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } + config, err := tlsconfig.Client(tlsconfig.Options{ + CAFile: cacertPath, + CertFile: certPath, + KeyFile: keyPath, + ExclusiveRootPools: true, + }) + if err != nil { + return fmt.Errorf("failed to create tls config: %w", err) + } + transport.TLSClientConfig = config + return nil + } +} + +// WithTLSClientConfigFromEnv configures the client's TLS settings with the +// settings in the DOCKER_CERT_PATH ([EnvOverrideCertPath]) and DOCKER_TLS_VERIFY +// ([EnvTLSVerify]) environment variables. If DOCKER_CERT_PATH is not set or empty, +// TLS configuration is not modified. +// +// WithTLSClientConfigFromEnv uses the following environment variables: +// +// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from +// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem"). +// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification +// (off by default). +func WithTLSClientConfigFromEnv() Opt { + return func(c *clientConfig) error { + dockerCertPath := os.Getenv(EnvOverrideCertPath) + if dockerCertPath == "" { + return nil + } + tlsc, err := tlsconfig.Client(tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", + }) + if err != nil { + return err + } + + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, + } + return nil + } +} + +// WithAPIVersion overrides the client's API version with the specified one, +// and disables API version negotiation. If an empty version is provided, +// this option is ignored to allow version negotiation. The given version +// should be formatted "." (for example, "1.52"). It returns +// an error if the given value not in the correct format. +// +// WithAPIVersion does not validate if the client supports the given version, +// and callers should verify if the version lower than the maximum supported +// version as defined by [MaxAPIVersion]. +// +// [WithAPIVersionFromEnv] takes precedence if [WithAPIVersion] and +// [WithAPIVersionFromEnv] are both set. +func WithAPIVersion(version string) Opt { + return func(c *clientConfig) error { + version = strings.TrimSpace(version) + if val := strings.TrimPrefix(version, "v"); val != "" { + ver, err := parseAPIVersion(val) + if err != nil { + return fmt.Errorf("invalid API version (%s): %w", version, err) + } + c.manualAPIVersion = ver + } + return nil + } +} + +// WithVersion overrides the client version with the specified one. +// +// Deprecated: use [WithAPIVersion] instead. +func WithVersion(version string) Opt { + return WithAPIVersion(version) +} + +// WithAPIVersionFromEnv overrides the client version with the version specified in +// the DOCKER_API_VERSION ([EnvOverrideAPIVersion]) environment variable. +// If DOCKER_API_VERSION is not set, or set to an empty value, the version +// is not modified. +// +// WithAPIVersion does not validate if the client supports the given version, +// and callers should verify if the version lower than the maximum supported +// version as defined by [MaxAPIVersion]. +// +// [WithAPIVersionFromEnv] takes precedence if [WithAPIVersion] and +// [WithAPIVersionFromEnv] are both set. +func WithAPIVersionFromEnv() Opt { + return func(c *clientConfig) error { + version := strings.TrimSpace(os.Getenv(EnvOverrideAPIVersion)) + if val := strings.TrimPrefix(version, "v"); val != "" { + ver, err := parseAPIVersion(val) + if err != nil { + return fmt.Errorf("invalid API version (%s): %w", version, err) + } + c.envAPIVersion = ver + } + return nil + } +} + +// WithVersionFromEnv overrides the client version with the version specified in +// the DOCKER_API_VERSION ([EnvOverrideAPIVersion]) environment variable. +// +// Deprecated: use [WithAPIVersionFromEnv] instead. +func WithVersionFromEnv() Opt { + return WithAPIVersionFromEnv() +} + +// WithAPIVersionNegotiation enables automatic API version negotiation for the client. +// With this option enabled, the client automatically negotiates the API version +// to use when making requests. API version negotiation is performed on the first +// request; subsequent requests do not re-negotiate. +// +// Deprecated: API-version negotiation is now enabled by default. Use [WithAPIVersion] +// or [WithAPIVersionFromEnv] to disable API version negotiation. +func WithAPIVersionNegotiation() Opt { + return func(c *clientConfig) error { + return nil + } +} + +// WithTraceProvider sets the trace provider for the client. +// If this is not set then the global trace provider is used. +func WithTraceProvider(provider trace.TracerProvider) Opt { + return WithTraceOptions(otelhttp.WithTracerProvider(provider)) +} + +// WithTraceOptions sets tracing span options for the client. +func WithTraceOptions(opts ...otelhttp.Option) Opt { + return func(c *clientConfig) error { + c.traceOpts = append(c.traceOpts, opts...) + return nil + } +} + +// WithResponseHook adds a ResponseHook to the client. ResponseHooks are called +// for each HTTP response returned by the daemon. Hooks are invoked in the order +// they were added. +// +// Hooks must not read or close resp.Body. +func WithResponseHook(h ResponseHook) Opt { + return func(c *clientConfig) error { + if h == nil { + return errors.New("invalid response hook: hook is nil") + } + c.responseHooks = append(c.responseHooks, h) + return nil + } +} diff --git a/vendor/github.com/moby/moby/client/client_responsehook.go b/vendor/github.com/moby/moby/client/client_responsehook.go new file mode 100644 index 00000000000..7c93f111c78 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_responsehook.go @@ -0,0 +1,23 @@ +package client + +import ( + "net/http" +) + +type responseHookTransport struct { + base http.RoundTripper + hooks []ResponseHook +} + +func (t *responseHookTransport) RoundTrip(req *http.Request) (*http.Response, error) { + resp, err := t.base.RoundTrip(req) + if err != nil { + return resp, err + } + + for _, h := range t.hooks { + h(resp) + } + + return resp, nil +} diff --git a/vendor/github.com/moby/moby/client/client_unix.go b/vendor/github.com/moby/moby/client/client_unix.go new file mode 100644 index 00000000000..1fb9fbfb9e5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_unix.go @@ -0,0 +1,18 @@ +//go:build !windows + +package client + +import ( + "context" + "net" + "syscall" +) + +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. +const DefaultDockerHost = "unix:///var/run/docker.sock" + +// dialPipeContext connects to a Windows named pipe. It is not supported on non-Windows. +func dialPipeContext(_ context.Context, _ string) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/vendor/github.com/moby/moby/client/client_windows.go b/vendor/github.com/moby/moby/client/client_windows.go new file mode 100644 index 00000000000..b471c061240 --- /dev/null +++ b/vendor/github.com/moby/moby/client/client_windows.go @@ -0,0 +1,17 @@ +package client + +import ( + "context" + "net" + + "github.com/Microsoft/go-winio" +) + +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. +const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +// dialPipeContext connects to a Windows named pipe. It is not supported on non-Windows. +func dialPipeContext(ctx context.Context, addr string) (net.Conn, error) { + return winio.DialPipeContext(ctx, addr) +} diff --git a/vendor/github.com/moby/moby/client/config_create.go b/vendor/github.com/moby/moby/client/config_create.go new file mode 100644 index 00000000000..874e2c947c5 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// ConfigCreateOptions holds options for creating a config. +type ConfigCreateOptions struct { + Spec swarm.ConfigSpec +} + +// ConfigCreateResult holds the result from the ConfigCreate method. +type ConfigCreateResult struct { + ID string +} + +// ConfigCreate creates a new config. +func (cli *Client) ConfigCreate(ctx context.Context, options ConfigCreateOptions) (ConfigCreateResult, error) { + resp, err := cli.post(ctx, "/configs/create", nil, options.Spec, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ConfigCreateResult{}, err + } + + var out swarm.ConfigCreateResponse + err = json.NewDecoder(resp.Body).Decode(&out) + if err != nil { + return ConfigCreateResult{}, err + } + return ConfigCreateResult{ID: out.ID}, nil +} diff --git a/vendor/github.com/moby/moby/client/config_inspect.go b/vendor/github.com/moby/moby/client/config_inspect.go new file mode 100644 index 00000000000..0bf0ff79167 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_inspect.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// ConfigInspectOptions holds options for inspecting a config. +type ConfigInspectOptions struct { + // Add future optional parameters here +} + +// ConfigInspectResult holds the result from the ConfigInspect method. +type ConfigInspectResult struct { + Config swarm.Config + Raw json.RawMessage +} + +// ConfigInspect returns the config information with raw data +func (cli *Client) ConfigInspect(ctx context.Context, id string, options ConfigInspectOptions) (ConfigInspectResult, error) { + id, err := trimID("config", id) + if err != nil { + return ConfigInspectResult{}, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + if err != nil { + return ConfigInspectResult{}, err + } + + var out ConfigInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Config) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/config_list.go b/vendor/github.com/moby/moby/client/config_list.go new file mode 100644 index 00000000000..ee5e7fee7ac --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters Filters +} + +// ConfigListResult holds the result from the [client.ConfigList] method. +type ConfigListResult struct { + Items []swarm.Config +} + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options ConfigListOptions) (ConfigListResult, error) { + query := url.Values{} + options.Filters.updateURLValues(query) + + resp, err := cli.get(ctx, "/configs", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ConfigListResult{}, err + } + + var out ConfigListResult + err = json.NewDecoder(resp.Body).Decode(&out.Items) + if err != nil { + return ConfigListResult{}, err + } + return out, nil +} diff --git a/vendor/github.com/moby/moby/client/config_remove.go b/vendor/github.com/moby/moby/client/config_remove.go new file mode 100644 index 00000000000..c77a4c37862 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_remove.go @@ -0,0 +1,25 @@ +package client + +import "context" + +type ConfigRemoveOptions struct { + // Add future optional parameters here +} + +type ConfigRemoveResult struct { + // Add future fields here +} + +// ConfigRemove removes a config. +func (cli *Client) ConfigRemove(ctx context.Context, id string, options ConfigRemoveOptions) (ConfigRemoveResult, error) { + id, err := trimID("config", id) + if err != nil { + return ConfigRemoveResult{}, err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ConfigRemoveResult{}, err + } + return ConfigRemoveResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/config_update.go b/vendor/github.com/moby/moby/client/config_update.go new file mode 100644 index 00000000000..2651f4b2f89 --- /dev/null +++ b/vendor/github.com/moby/moby/client/config_update.go @@ -0,0 +1,32 @@ +package client + +import ( + "context" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// ConfigUpdateOptions holds options for updating a config. +type ConfigUpdateOptions struct { + Version swarm.Version + Spec swarm.ConfigSpec +} + +type ConfigUpdateResult struct{} + +// ConfigUpdate attempts to update a config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, options ConfigUpdateOptions) (ConfigUpdateResult, error) { + id, err := trimID("config", id) + if err != nil { + return ConfigUpdateResult{}, err + } + query := url.Values{} + query.Set("version", options.Version.String()) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, options.Spec, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ConfigUpdateResult{}, err + } + return ConfigUpdateResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_attach.go b/vendor/github.com/moby/moby/client/container_attach.go new file mode 100644 index 00000000000..ce84122d32f --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_attach.go @@ -0,0 +1,86 @@ +package client + +import ( + "context" + "net/http" + "net/url" +) + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerAttachResult is the result from attaching to a container. +type ContainerAttachResult struct { + HijackedResponse +} + +// ContainerAttach attaches a connection to a container in the server. +// It returns a [HijackedResponse] with the hijacked connection +// and a reader to get output. It's up to the caller to close +// the hijacked connection by calling [HijackedResponse.Close]. +// +// The stream format on the response uses one of two formats: +// +// - If the container is using a TTY, there is only a single stream (stdout) +// and data is copied directly from the container output stream, no extra +// multiplexing or headers. +// - If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// +// The format of the multiplexed stream is defined in the [stdcopy] package, +// and as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for [Stdout] and 2 for [Stderr]. Refer to [stdcopy.StdType] +// for details. SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded +// as big endian, this is the size of OUTPUT. You can use [stdcopy.StdCopy] +// to demultiplex this stream. +// +// [stdcopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy +// [stdcopy.StdCopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdCopy +// [stdcopy.StdType]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdType +// [Stdout]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stdout +// [Stderr]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stderr +func (cli *Client) ContainerAttach(ctx context.Context, containerID string, options ContainerAttachOptions) (ContainerAttachResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerAttachResult{}, err + } + + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + hijacked, err := cli.postHijacked(ctx, "/containers/"+containerID+"/attach", query, nil, http.Header{ + "Content-Type": {"text/plain"}, + }) + if err != nil { + return ContainerAttachResult{}, err + } + + return ContainerAttachResult{HijackedResponse: hijacked}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_commit.go b/vendor/github.com/moby/moby/client/container_commit.go new file mode 100644 index 00000000000..79da44a54f8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_commit.go @@ -0,0 +1,75 @@ +package client + +import ( + "context" + "encoding/json" + "errors" + "net/url" + + "github.com/distribution/reference" + "github.com/moby/moby/api/types/container" +) + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + NoPause bool // NoPause disables pausing the container during commit. + Config *container.Config +} + +// ContainerCommitResult is the result from committing a container. +type ContainerCommitResult struct { + ID string +} + +// ContainerCommit applies changes to a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, containerID string, options ContainerCommitOptions) (ContainerCommitResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerCommitResult{}, err + } + + var repository, tag string + if options.Reference != "" { + ref, err := reference.ParseNormalizedNamed(options.Reference) + if err != nil { + return ContainerCommitResult{}, err + } + + if _, ok := ref.(reference.Digested); ok { + return ContainerCommitResult{}, errors.New("refusing to create a tag with a digest reference") + } + ref = reference.TagNameOnly(ref) + + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = ref.Name() + } + + query := url.Values{} + query.Set("container", containerID) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if options.NoPause { + query.Set("pause", "0") + } + + var response container.CommitResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerCommitResult{}, err + } + + err = json.NewDecoder(resp.Body).Decode(&response) + return ContainerCommitResult{ID: response.ID}, err +} diff --git a/vendor/github.com/moby/moby/client/container_copy.go b/vendor/github.com/moby/moby/client/container_copy.go new file mode 100644 index 00000000000..f76511246ca --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_copy.go @@ -0,0 +1,137 @@ +package client + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "github.com/moby/moby/api/types/container" +) + +type ContainerStatPathOptions struct { + Path string +} + +type ContainerStatPathResult struct { + Stat container.PathStat +} + +// ContainerStatPath returns stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID string, options ContainerStatPathOptions) (ContainerStatPathResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerStatPathResult{}, err + } + + query := url.Values{} + query.Set("path", filepath.ToSlash(options.Path)) // Normalize the paths used in the API. + + resp, err := cli.head(ctx, "/containers/"+containerID+"/archive", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerStatPathResult{}, err + } + stat, err := getContainerPathStatFromHeader(resp.Header) + if err != nil { + return ContainerStatPathResult{}, err + } + return ContainerStatPathResult{Stat: stat}, nil +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + DestinationPath string + Content io.Reader + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +type CopyToContainerResult struct{} + +// CopyToContainer copies content into the container filesystem. +// Note that `content` must be a Reader for a TAR archive +func (cli *Client) CopyToContainer(ctx context.Context, containerID string, options CopyToContainerOptions) (CopyToContainerResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return CopyToContainerResult{}, err + } + + query := url.Values{} + query.Set("path", filepath.ToSlash(options.DestinationPath)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + response, err := cli.putRaw(ctx, "/containers/"+containerID+"/archive", query, options.Content, nil) + defer ensureReaderClosed(response) + if err != nil { + return CopyToContainerResult{}, err + } + + return CopyToContainerResult{}, nil +} + +type CopyFromContainerOptions struct { + SourcePath string +} + +type CopyFromContainerResult struct { + Content io.ReadCloser + Stat container.PathStat +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, containerID string, options CopyFromContainerOptions) (CopyFromContainerResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return CopyFromContainerResult{}, err + } + + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(options.SourcePath)) // Normalize the paths used in the API. + + resp, err := cli.get(ctx, "/containers/"+containerID+"/archive", query, nil) + if err != nil { + return CopyFromContainerResult{}, err + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(resp.Header) + if err != nil { + ensureReaderClosed(resp) + return CopyFromContainerResult{Stat: stat}, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return CopyFromContainerResult{Content: resp.Body, Stat: stat}, nil +} + +func getContainerPathStatFromHeader(header http.Header) (container.PathStat, error) { + var stat container.PathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/moby/moby/client/container_create.go b/vendor/github.com/moby/moby/client/container_create.go new file mode 100644 index 00000000000..d941a37207c --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_create.go @@ -0,0 +1,125 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + "path" + "sort" + "strings" + + cerrdefs "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/container" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ContainerCreate creates a new container based on the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, options ContainerCreateOptions) (ContainerCreateResult, error) { + cfg := options.Config + + if cfg == nil { + cfg = &container.Config{} + } + + if options.Image != "" { + if cfg.Image != "" { + return ContainerCreateResult{}, cerrdefs.ErrInvalidArgument.WithMessage("either Image or config.Image should be set") + } + newCfg := *cfg + newCfg.Image = options.Image + cfg = &newCfg + } + + if cfg.Image == "" { + return ContainerCreateResult{}, cerrdefs.ErrInvalidArgument.WithMessage("config.Image or Image is required") + } + + var response container.CreateResponse + + if options.HostConfig != nil { + options.HostConfig.CapAdd = normalizeCapabilities(options.HostConfig.CapAdd) + options.HostConfig.CapDrop = normalizeCapabilities(options.HostConfig.CapDrop) + } + + query := url.Values{} + if options.Platform != nil { + if p := formatPlatform(*options.Platform); p != "unknown" { + query.Set("platform", p) + } + } + + if options.Name != "" { + query.Set("name", options.Name) + } + + body := container.CreateRequest{ + Config: cfg, + HostConfig: options.HostConfig, + NetworkingConfig: options.NetworkingConfig, + } + + resp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerCreateResult{}, err + } + + err = json.NewDecoder(resp.Body).Decode(&response) + return ContainerCreateResult{ID: response.ID, Warnings: response.Warnings}, err +} + +// formatPlatform returns a formatted string representing platform (e.g., "linux/arm/v7"). +// +// It is a fork of [platforms.Format], and does not yet support "os.version", +// as [platforms.FormatAll] does. +// +// [platforms.Format]: https://github.com/containerd/platforms/blob/v1.0.0-rc.1/platforms.go#L309-L316 +// [platforms.FormatAll]: https://github.com/containerd/platforms/blob/v1.0.0-rc.1/platforms.go#L318-L330 +func formatPlatform(platform ocispec.Platform) string { + if platform.OS == "" { + return "unknown" + } + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} + +// allCapabilities is a magic value for "all capabilities" +const allCapabilities = "ALL" + +// normalizeCapabilities normalizes capabilities to their canonical form, +// removes duplicates, and sorts the results. +// +// It is similar to [caps.NormalizeLegacyCapabilities], +// but performs no validation based on supported capabilities. +// +// [caps.NormalizeLegacyCapabilities]: https://github.com/moby/moby/blob/v28.3.2/oci/caps/utils.go#L56 +func normalizeCapabilities(caps []string) []string { + var normalized []string + + unique := make(map[string]struct{}) + for _, c := range caps { + c = normalizeCap(c) + if _, ok := unique[c]; ok { + continue + } + unique[c] = struct{}{} + normalized = append(normalized, c) + } + + sort.Strings(normalized) + return normalized +} + +// normalizeCap normalizes a capability to its canonical format by upper-casing +// and adding a "CAP_" prefix (if not yet present). It also accepts the "ALL" +// magic-value. +func normalizeCap(capability string) string { + capability = strings.ToUpper(capability) + if capability == allCapabilities { + return capability + } + if !strings.HasPrefix(capability, "CAP_") { + capability = "CAP_" + capability + } + return capability +} diff --git a/vendor/github.com/moby/moby/client/container_create_opts.go b/vendor/github.com/moby/moby/client/container_create_opts.go new file mode 100644 index 00000000000..8580e20d3a4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_create_opts.go @@ -0,0 +1,25 @@ +package client + +import ( + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/network" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ContainerCreateOptions holds parameters to create a container. +type ContainerCreateOptions struct { + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *ocispec.Platform + Name string + + // Image is a shortcut for Config.Image - only one of Image or Config.Image should be set. + Image string +} + +// ContainerCreateResult is the result from creating a container. +type ContainerCreateResult struct { + ID string + Warnings []string +} diff --git a/vendor/github.com/moby/moby/client/container_diff.go b/vendor/github.com/moby/moby/client/container_diff.go new file mode 100644 index 00000000000..ec904337e03 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_diff.go @@ -0,0 +1,30 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/container" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string, options ContainerDiffOptions) (ContainerDiffResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerDiffResult{}, err + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerDiffResult{}, err + } + + var changes []container.FilesystemChange + err = json.NewDecoder(resp.Body).Decode(&changes) + if err != nil { + return ContainerDiffResult{}, err + } + return ContainerDiffResult{Changes: changes}, err +} diff --git a/vendor/github.com/moby/moby/client/container_diff_opts.go b/vendor/github.com/moby/moby/client/container_diff_opts.go new file mode 100644 index 00000000000..5e3c37ab4e8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_diff_opts.go @@ -0,0 +1,13 @@ +package client + +import "github.com/moby/moby/api/types/container" + +// ContainerDiffOptions holds parameters to show differences in a container filesystem. +type ContainerDiffOptions struct { + // Currently no options, but this allows for future extensibility +} + +// ContainerDiffResult is the result from showing differences in a container filesystem. +type ContainerDiffResult struct { + Changes []container.FilesystemChange +} diff --git a/vendor/github.com/moby/moby/client/container_exec.go b/vendor/github.com/moby/moby/client/container_exec.go new file mode 100644 index 00000000000..30ed00ea52d --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_exec.go @@ -0,0 +1,203 @@ +package client + +import ( + "context" + "encoding/json" + "net/http" + + cerrdefs "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/container" +) + +// ExecCreateOptions is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecCreateOptions struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + TTY bool // Attach standard streams to a tty. + ConsoleSize ConsoleSize // Initial terminal size [height, width], unused if TTY == false + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// ExecCreateResult holds the result of creating a container exec. +type ExecCreateResult struct { + ID string +} + +// ExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ExecCreate(ctx context.Context, containerID string, options ExecCreateOptions) (ExecCreateResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ExecCreateResult{}, err + } + + consoleSize, err := getConsoleSize(options.TTY, options.ConsoleSize) + if err != nil { + return ExecCreateResult{}, err + } + + req := container.ExecCreateRequest{ + User: options.User, + Privileged: options.Privileged, + Tty: options.TTY, + ConsoleSize: consoleSize, + AttachStdin: options.AttachStdin, + AttachStderr: options.AttachStderr, + AttachStdout: options.AttachStdout, + DetachKeys: options.DetachKeys, + Env: options.Env, + WorkingDir: options.WorkingDir, + Cmd: options.Cmd, + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/exec", nil, req, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ExecCreateResult{}, err + } + + var response container.ExecCreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return ExecCreateResult{ID: response.ID}, err +} + +type ConsoleSize struct { + Height, Width uint +} + +// ExecStartOptions holds options for starting a container exec. +type ExecStartOptions struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + TTY bool + // Terminal size [height, width], unused if TTY == false + ConsoleSize ConsoleSize +} + +// ExecStartResult holds the result of starting a container exec. +type ExecStartResult struct{} + +// ExecStart starts an exec process already created in the docker host. +func (cli *Client) ExecStart(ctx context.Context, execID string, options ExecStartOptions) (ExecStartResult, error) { + consoleSize, err := getConsoleSize(options.TTY, options.ConsoleSize) + if err != nil { + return ExecStartResult{}, err + } + + req := container.ExecStartRequest{ + Detach: options.Detach, + Tty: options.TTY, + ConsoleSize: consoleSize, + } + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, req, nil) + defer ensureReaderClosed(resp) + return ExecStartResult{}, err +} + +// ExecAttachOptions holds options for attaching to a container exec. +type ExecAttachOptions struct { + // Check if there's a tty + TTY bool + // Terminal size [height, width], unused if TTY == false + ConsoleSize ConsoleSize `json:",omitzero"` +} + +// ExecAttachResult holds the result of attaching to a container exec. +type ExecAttachResult struct { + HijackedResponse +} + +// ExecAttach attaches a connection to an exec process in the server. +// +// It returns a [HijackedResponse] with the hijacked connection +// and a reader to get output. It's up to the caller to close +// the hijacked connection by calling [HijackedResponse.Close]. +// +// The stream format on the response uses one of two formats: +// +// - If the container is using a TTY, there is only a single stream (stdout) +// and data is copied directly from the container output stream, no extra +// multiplexing or headers. +// - If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// +// You can use [stdcopy.StdCopy] to demultiplex this stream. Refer to +// [Client.ContainerAttach] for details about the multiplexed stream. +// +// [stdcopy.StdCopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdCopy +func (cli *Client) ExecAttach(ctx context.Context, execID string, options ExecAttachOptions) (ExecAttachResult, error) { + consoleSize, err := getConsoleSize(options.TTY, options.ConsoleSize) + if err != nil { + return ExecAttachResult{}, err + } + req := container.ExecStartRequest{ + Detach: false, + Tty: options.TTY, + ConsoleSize: consoleSize, + } + response, err := cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, req, http.Header{ + "Content-Type": {"application/json"}, + }) + return ExecAttachResult{HijackedResponse: response}, err +} + +func getConsoleSize(hasTTY bool, consoleSize ConsoleSize) (*[2]uint, error) { + if consoleSize.Height != 0 || consoleSize.Width != 0 { + if !hasTTY { + return nil, cerrdefs.ErrInvalidArgument.WithMessage("console size is only supported when TTY is enabled") + } + return &[2]uint{consoleSize.Height, consoleSize.Width}, nil + } + return nil, nil +} + +// ExecInspectOptions holds options for inspecting a container exec. +type ExecInspectOptions struct{} + +// ExecInspectResult holds the result of inspecting a container exec. +// +// It provides a subset of the information included in [container.ExecInspectResponse]. +// +// TODO(thaJeztah): include all fields of [container.ExecInspectResponse] ? +type ExecInspectResult struct { + ID string + ContainerID string + Running bool + ExitCode int + PID int +} + +// ExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ExecInspect(ctx context.Context, execID string, options ExecInspectOptions) (ExecInspectResult, error) { + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ExecInspectResult{}, err + } + + var response container.ExecInspectResponse + err = json.NewDecoder(resp.Body).Decode(&response) + if err != nil { + return ExecInspectResult{}, err + } + + var ec int + if response.ExitCode != nil { + ec = *response.ExitCode + } + + return ExecInspectResult{ + ID: response.ID, + ContainerID: response.ContainerID, + Running: response.Running, + ExitCode: ec, + PID: response.Pid, + }, nil +} diff --git a/vendor/github.com/moby/moby/client/container_export.go b/vendor/github.com/moby/moby/client/container_export.go new file mode 100644 index 00000000000..2d33efb7d89 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_export.go @@ -0,0 +1,47 @@ +package client + +import ( + "context" + "io" + "net/url" +) + +// ContainerExportOptions specifies options for container export operations. +type ContainerExportOptions struct { + // Currently no options are defined for ContainerExport +} + +// ContainerExportResult represents the result of a container export operation. +type ContainerExportResult interface { + io.ReadCloser +} + +// ContainerExport retrieves the raw contents of a container +// and returns them as an [io.ReadCloser]. It's up to the caller +// to close the stream. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ContainerExport(ctx context.Context, containerID string, options ContainerExportOptions) (ContainerExportResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return nil, err + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return &containerExportResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type containerExportResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*containerExportResult)(nil) + _ ContainerExportResult = (*containerExportResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/container_inspect.go b/vendor/github.com/moby/moby/client/container_inspect.go new file mode 100644 index 00000000000..4f12c46577f --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_inspect.go @@ -0,0 +1,47 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/container" +) + +// ContainerInspectOptions holds options for inspecting a container using +// the [Client.ConfigInspect] method. +type ContainerInspectOptions struct { + // Size controls whether the container's filesystem size should be calculated. + // When set, the [container.InspectResponse.SizeRw] and [container.InspectResponse.SizeRootFs] + // fields in [ContainerInspectResult.Container] are populated with the result. + // + // Calculating the size can be a costly operation, and should not be used + // unless needed. + Size bool +} + +// ContainerInspectResult holds the result from the [Client.ConfigInspect] method. +type ContainerInspectResult struct { + Container container.InspectResponse + Raw json.RawMessage +} + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string, options ContainerInspectOptions) (ContainerInspectResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerInspectResult{}, err + } + + query := url.Values{} + if options.Size { + query.Set("size", "1") + } + resp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + return ContainerInspectResult{}, err + } + var out ContainerInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Container) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/container_kill.go b/vendor/github.com/moby/moby/client/container_kill.go new file mode 100644 index 00000000000..ae7a4ebd8bd --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_kill.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "net/url" +) + +// ContainerKillOptions holds options for [Client.ContainerKill]. +type ContainerKillOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after a + // timeout. If no value is set, the default (SIGKILL) is used. + Signal string `json:",omitempty"` +} + +// ContainerKillResult holds the result of [Client.ContainerKill], +type ContainerKillResult struct { + // Add future fields here. +} + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID string, options ContainerKillOptions) (ContainerKillResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerKillResult{}, err + } + + query := url.Values{} + if options.Signal != "" { + query.Set("signal", options.Signal) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerKillResult{}, err + } + return ContainerKillResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_list.go b/vendor/github.com/moby/moby/client/container_list.go new file mode 100644 index 00000000000..d9334c544e6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_list.go @@ -0,0 +1,66 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/moby/moby/api/types/container" +) + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Size bool + All bool + Limit int + Filters Filters + + // Latest is non-functional and should not be used. Use Limit: 1 instead. + // + // Deprecated: the Latest option is non-functional and should not be used. Use Limit: 1 instead. + Latest bool + + // Since is no longer supported. Use the "since" filter instead. + // + // Deprecated: the Since option is no longer supported since docker 1.12 (API 1.24). Use the "since" filter instead. + Since string + + // Before is no longer supported. Use the "since" filter instead. + // + // Deprecated: the Before option is no longer supported since docker 1.12 (API 1.24). Use the "before" filter instead. + Before string +} + +type ContainerListResult struct { + Items []container.Summary +} + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options ContainerListOptions) (ContainerListResult, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Size { + query.Set("size", "1") + } + + options.Filters.updateURLValues(query) + + resp, err := cli.get(ctx, "/containers/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerListResult{}, err + } + + var containers []container.Summary + err = json.NewDecoder(resp.Body).Decode(&containers) + return ContainerListResult{Items: containers}, err +} diff --git a/vendor/github.com/moby/moby/client/container_logs.go b/vendor/github.com/moby/moby/client/container_logs.go new file mode 100644 index 00000000000..b26a3568abc --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_logs.go @@ -0,0 +1,134 @@ +package client + +import ( + "context" + "fmt" + "io" + "net/url" + "time" + + "github.com/moby/moby/client/internal/timestamp" +) + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerLogsResult is the result of a container logs operation. +type ContainerLogsResult interface { + io.ReadCloser +} + +// ContainerLogs returns the logs generated by a container in an [io.ReadCloser]. +// It's up to the caller to close the stream. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +// +// The stream format on the response uses one of two formats: +// +// - If the container is using a TTY, there is only a single stream (stdout) +// and data is copied directly from the container output stream, no extra +// multiplexing or headers. +// - If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// +// The format of the multiplexed stream is defined in the [stdcopy] package, +// and as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for [Stdout] and 2 for [Stderr]. Refer to [stdcopy.StdType] +// for details. SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded +// as big endian, this is the size of OUTPUT. You can use [stdcopy.StdCopy] +// to demultiplex this stream. +// +// [stdcopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy +// [stdcopy.StdCopy]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdCopy +// [stdcopy.StdType]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#StdType +// [Stdout]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stdout +// [Stderr]: https://pkg.go.dev/github.com/moby/moby/api/pkg/stdcopy#Stderr +func (cli *Client) ContainerLogs(ctx context.Context, containerID string, options ContainerLogsOptions) (ContainerLogsResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return nil, err + } + + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timestamp.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, fmt.Errorf(`invalid value for "since": %w`, err) + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timestamp.GetTimestamp(options.Until, time.Now()) + if err != nil { + return nil, fmt.Errorf(`invalid value for "until": %w`, err) + } + query.Set("until", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + + switch options.Tail { + case "", "all": + // don't send option; default is to show all logs. + // + // The default on the daemon-side is to show all logs; account for + // some special values. The CLI may set a magic "all" value that's + // used as default. + // + // Given that the default is to show all logs, we can ignore these + // values, and don't send "tail". + // + // see https://github.com/moby/moby/blob/0df791cb72b568eeadba2267fe9a5040d12b0487/daemon/logs.go#L75-L78 + // see https://github.com/moby/moby/blob/4d20b6fe56dfb2b06f4a5dd1f32913215a9c317b/daemon/cluster/services.go#L425-L449 + default: + query.Set("tail", options.Tail) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/logs", query, nil) + if err != nil { + return nil, err + } + return &containerLogsResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type containerLogsResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*containerLogsResult)(nil) + _ ContainerLogsResult = (*containerLogsResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/container_pause.go b/vendor/github.com/moby/moby/client/container_pause.go new file mode 100644 index 00000000000..07669c89707 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_pause.go @@ -0,0 +1,28 @@ +package client + +import "context" + +// ContainerPauseOptions holds options for [Client.ContainerPause]. +type ContainerPauseOptions struct { + // Add future optional parameters here. +} + +// ContainerPauseResult holds the result of [Client.ContainerPause], +type ContainerPauseResult struct { + // Add future fields here. +} + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string, options ContainerPauseOptions) (ContainerPauseResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerPauseResult{}, err + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerPauseResult{}, err + } + return ContainerPauseResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_prune.go b/vendor/github.com/moby/moby/client/container_prune.go new file mode 100644 index 00000000000..f826f8b6fb8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_prune.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/container" +) + +// ContainerPruneOptions holds parameters to prune containers. +type ContainerPruneOptions struct { + Filters Filters +} + +// ContainerPruneResult holds the result from the [Client.ContainerPrune] method. +type ContainerPruneResult struct { + Report container.PruneReport +} + +// ContainerPrune requests the daemon to delete unused data +func (cli *Client) ContainerPrune(ctx context.Context, opts ContainerPruneOptions) (ContainerPruneResult, error) { + query := url.Values{} + opts.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerPruneResult{}, err + } + + var report container.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return ContainerPruneResult{}, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return ContainerPruneResult{Report: report}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_remove.go b/vendor/github.com/moby/moby/client/container_remove.go new file mode 100644 index 00000000000..0fbfa05fa10 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_remove.go @@ -0,0 +1,45 @@ +package client + +import ( + "context" + "net/url" +) + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerRemoveResult holds the result of [Client.ContainerRemove], +type ContainerRemoveResult struct { + // Add future fields here. +} + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options ContainerRemoveOptions) (ContainerRemoveResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerRemoveResult{}, err + } + + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerRemoveResult{}, err + } + return ContainerRemoveResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_rename.go b/vendor/github.com/moby/moby/client/container_rename.go new file mode 100644 index 00000000000..4fd28a49864 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_rename.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "net/url" + "strings" + + cerrdefs "github.com/containerd/errdefs" +) + +// ContainerRenameOptions represents the options for renaming a container. +type ContainerRenameOptions struct { + NewName string +} + +// ContainerRenameResult represents the result of a container rename operation. +type ContainerRenameResult struct { + // This struct can be expanded in the future if needed +} + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID string, options ContainerRenameOptions) (ContainerRenameResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerRenameResult{}, err + } + options.NewName = strings.TrimSpace(options.NewName) + if options.NewName == "" || strings.TrimPrefix(options.NewName, "/") == "" { + // daemons before v29.0 did not handle the canonical name ("/") well + // let's be nice and validate it here before sending + return ContainerRenameResult{}, cerrdefs.ErrInvalidArgument.WithMessage("new name cannot be blank") + } + + query := url.Values{} + query.Set("name", options.NewName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + defer ensureReaderClosed(resp) + return ContainerRenameResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/container_resize.go b/vendor/github.com/moby/moby/client/container_resize.go new file mode 100644 index 00000000000..8ce26fb5855 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_resize.go @@ -0,0 +1,64 @@ +package client + +import ( + "context" + "net/url" + "strconv" +) + +// ContainerResizeOptions holds parameters to resize a TTY. +// It can be used to resize container TTYs and +// exec process TTYs too. +type ContainerResizeOptions struct { + Height uint + Width uint +} + +// ContainerResizeResult holds the result of [Client.ContainerResize], +type ContainerResizeResult struct { + // Add future fields here. +} + +// ContainerResize changes the size of the pseudo-TTY for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options ContainerResizeOptions) (ContainerResizeResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerResizeResult{}, err + } + // FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint. + query := url.Values{} + query.Set("h", strconv.FormatUint(uint64(options.Height), 10)) + query.Set("w", strconv.FormatUint(uint64(options.Width), 10)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/resize", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerResizeResult{}, err + } + return ContainerResizeResult{}, nil +} + +// ExecResizeOptions holds options for resizing a container exec TTY. +type ExecResizeOptions ContainerResizeOptions + +// ExecResizeResult holds the result of resizing a container exec TTY. +type ExecResizeResult struct{} + +// ExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ExecResize(ctx context.Context, execID string, options ExecResizeOptions) (ExecResizeResult, error) { + execID, err := trimID("exec", execID) + if err != nil { + return ExecResizeResult{}, err + } + // FIXME(thaJeztah): the API / backend accepts uint32, but container.ResizeOptions uses uint. + query := url.Values{} + query.Set("h", strconv.FormatUint(uint64(options.Height), 10)) + query.Set("w", strconv.FormatUint(uint64(options.Width), 10)) + + resp, err := cli.post(ctx, "/exec/"+execID+"/resize", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ExecResizeResult{}, err + } + return ExecResizeResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_restart.go b/vendor/github.com/moby/moby/client/container_restart.go new file mode 100644 index 00000000000..e883f75891b --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_restart.go @@ -0,0 +1,54 @@ +package client + +import ( + "context" + "net/url" + "strconv" +) + +// ContainerRestartOptions holds options for [Client.ContainerRestart]. +type ContainerRestartOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If no value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + +// ContainerRestartResult holds the result of [Client.ContainerRestart], +type ContainerRestartResult struct { + // Add future fields here. +} + +// ContainerRestart stops, and starts a container again. +// It makes the daemon wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options ContainerRestartOptions) (ContainerRestartResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerRestartResult{}, err + } + + query := url.Values{} + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" { + query.Set("signal", options.Signal) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerRestartResult{}, err + } + return ContainerRestartResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_start.go b/vendor/github.com/moby/moby/client/container_start.go new file mode 100644 index 00000000000..dfb821d1d1d --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_start.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + "net/url" +) + +// ContainerStartOptions holds options for [Client.ContainerStart]. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerStartResult holds the result of [Client.ContainerStart], +type ContainerStartResult struct { + // Add future fields here. +} + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options ContainerStartOptions) (ContainerStartResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerStartResult{}, err + } + + query := url.Values{} + if options.CheckpointID != "" { + query.Set("checkpoint", options.CheckpointID) + } + if options.CheckpointDir != "" { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerStartResult{}, err + } + return ContainerStartResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_stats.go b/vendor/github.com/moby/moby/client/container_stats.go new file mode 100644 index 00000000000..277769dbffa --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stats.go @@ -0,0 +1,75 @@ +package client + +import ( + "context" + "io" + "net/url" +) + +// ContainerStatsOptions holds parameters to retrieve container statistics +// using the [Client.ContainerStats] method. +type ContainerStatsOptions struct { + // Stream enables streaming [container.StatsResponse] results instead + // of collecting a single sample. If enabled, the client remains attached + // until the [ContainerStatsResult.Body] is closed or the context is + // cancelled. + Stream bool + + // IncludePreviousSample asks the daemon to collect a prior sample to populate the + // [container.StatsResponse.PreRead] and [container.StatsResponse.PreCPUStats] + // fields. + // + // It set, the daemon collects two samples at a one-second interval before + // returning the result. The first sample populates the PreCPUStats (“previous + // CPU”) field, allowing delta calculations for CPU usage. If false, only + // a single sample is taken and returned immediately, leaving PreRead and + // PreCPUStats empty. + // + // This option has no effect if Stream is enabled. If Stream is enabled, + // [container.StatsResponse.PreCPUStats] is never populated for the first + // record. + IncludePreviousSample bool +} + +// ContainerStatsResult holds the result from [Client.ContainerStats]. +// +// It wraps an [io.ReadCloser] that provides one or more [container.StatsResponse] +// objects for a container, as produced by the "GET /containers/{id}/stats" endpoint. +// If streaming is disabled, the stream contains a single record. +type ContainerStatsResult struct { + Body io.ReadCloser +} + +// ContainerStats retrieves live resource usage statistics for the specified +// container. The caller must close the [io.ReadCloser] in the returned result +// to release associated resources. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ContainerStats(ctx context.Context, containerID string, options ContainerStatsOptions) (ContainerStatsResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerStatsResult{}, err + } + + query := url.Values{} + if options.Stream { + query.Set("stream", "true") + } else { + // Note: daemons before v29.0 return an error if both set: "cannot have stream=true and one-shot=true" + // + // TODO(thaJeztah): consider making "stream=false" the default for the API as well, or using Accept Header to switch. + query.Set("stream", "false") + if !options.IncludePreviousSample { + query.Set("one-shot", "true") + } + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return ContainerStatsResult{}, err + } + + return ContainerStatsResult{ + Body: newCancelReadCloser(ctx, resp.Body), + }, nil +} diff --git a/vendor/github.com/moby/moby/client/container_stop.go b/vendor/github.com/moby/moby/client/container_stop.go new file mode 100644 index 00000000000..d4d47d8fd40 --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_stop.go @@ -0,0 +1,58 @@ +package client + +import ( + "context" + "net/url" + "strconv" +) + +// ContainerStopOptions holds the options for [Client.ContainerStop]. +type ContainerStopOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If no value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + +// ContainerStopResult holds the result of [Client.ContainerStop], +type ContainerStopResult struct { + // Add future fields here. +} + +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, options ContainerStopOptions) (ContainerStopResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerStopResult{}, err + } + + query := url.Values{} + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" { + query.Set("signal", options.Signal) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerStopResult{}, err + } + return ContainerStopResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_top.go b/vendor/github.com/moby/moby/client/container_top.go new file mode 100644 index 00000000000..dc0af8ae48b --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_top.go @@ -0,0 +1,44 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + "strings" + + "github.com/moby/moby/api/types/container" +) + +// ContainerTopOptions defines options for container top operations. +type ContainerTopOptions struct { + Arguments []string +} + +// ContainerTopResult represents the result of a ContainerTop operation. +type ContainerTopResult struct { + Processes [][]string + Titles []string +} + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, options ContainerTopOptions) (ContainerTopResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerTopResult{}, err + } + + query := url.Values{} + if len(options.Arguments) > 0 { + query.Set("ps_args", strings.Join(options.Arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerTopResult{}, err + } + + var response container.TopResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return ContainerTopResult{Processes: response.Processes, Titles: response.Titles}, err +} diff --git a/vendor/github.com/moby/moby/client/container_unpause.go b/vendor/github.com/moby/moby/client/container_unpause.go new file mode 100644 index 00000000000..627d60c960d --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_unpause.go @@ -0,0 +1,28 @@ +package client + +import "context" + +// ContainerUnpauseOptions holds options for [Client.ContainerUnpause]. +type ContainerUnpauseOptions struct { + // Add future optional parameters here. +} + +// ContainerUnpauseResult holds the result of [Client.ContainerUnpause], +type ContainerUnpauseResult struct { + // Add future fields here. +} + +// ContainerUnpause resumes the process execution within a container. +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string, options ContainerUnpauseOptions) (ContainerUnpauseResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerUnpauseResult{}, err + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerUnpauseResult{}, err + } + return ContainerUnpauseResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/container_update.go b/vendor/github.com/moby/moby/client/container_update.go new file mode 100644 index 00000000000..a1d4d249a9f --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_update.go @@ -0,0 +1,46 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/container" +) + +// ContainerUpdateOptions holds options for [Client.ContainerUpdate]. +type ContainerUpdateOptions struct { + Resources *container.Resources + RestartPolicy *container.RestartPolicy +} + +// ContainerUpdateResult is the result from updating a container. +type ContainerUpdateResult struct { + // Warnings encountered when updating the container. + Warnings []string +} + +// ContainerUpdate updates the resources of a container. +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, options ContainerUpdateOptions) (ContainerUpdateResult, error) { + containerID, err := trimID("container", containerID) + if err != nil { + return ContainerUpdateResult{}, err + } + + updateConfig := container.UpdateConfig{} + if options.Resources != nil { + updateConfig.Resources = *options.Resources + } + if options.RestartPolicy != nil { + updateConfig.RestartPolicy = *options.RestartPolicy + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ContainerUpdateResult{}, err + } + + var response container.UpdateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return ContainerUpdateResult{Warnings: response.Warnings}, err +} diff --git a/vendor/github.com/moby/moby/client/container_wait.go b/vendor/github.com/moby/moby/client/container_wait.go new file mode 100644 index 00000000000..6f71ed0518d --- /dev/null +++ b/vendor/github.com/moby/moby/client/container_wait.go @@ -0,0 +1,92 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/url" + + "github.com/moby/moby/api/types/container" +) + +const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ + +// ContainerWaitOptions holds options for [Client.ContainerWait]. +type ContainerWaitOptions struct { + Condition container.WaitCondition +} + +// ContainerWaitResult defines the result from the [Client.ContainerWait] method. +type ContainerWaitResult struct { + Result <-chan container.WaitResponse + Error <-chan error +} + +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either; +// +// - "not-running" ([container.WaitConditionNotRunning]) (default) +// - "next-exit" ([container.WaitConditionNextExit]) +// - "removed" ([container.WaitConditionRemoved]) +// +// ContainerWait blocks until the request has been acknowledged by the server +// (with a response header), then returns two channels on which the caller can +// wait for the exit status of the container or an error if there was a problem +// either beginning the wait request or in getting the response. This allows the +// caller to synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition ([container.WaitConditionNextExit]) before issuing a +// [Client.ContainerStart] request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, options ContainerWaitOptions) ContainerWaitResult { + resultC := make(chan container.WaitResponse) + errC := make(chan error, 1) + + containerID, err := trimID("container", containerID) + if err != nil { + errC <- err + return ContainerWaitResult{Result: resultC, Error: errC} + } + + query := url.Values{} + if options.Condition != "" { + query.Set("condition", string(options.Condition)) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return ContainerWaitResult{Result: resultC, Error: errC} + } + + go func() { + defer ensureReaderClosed(resp) + + responseText := bytes.NewBuffer(nil) + stream := io.TeeReader(resp.Body, responseText) + + var res container.WaitResponse + if err := json.NewDecoder(stream).Decode(&res); err != nil { + // NOTE(nicks): The /wait API does not work well with HTTP proxies. + // At any time, the proxy could cut off the response stream. + // + // But because the HTTP status has already been written, the proxy's + // only option is to write a plaintext error message. + // + // If there's a JSON parsing error, read the real error message + // off the body and send it to the client. + if errors.As(err, new(*json.SyntaxError)) { + _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) + errC <- errors.New(responseText.String()) + } else { + errC <- err + } + return + } + + resultC <- res + }() + + return ContainerWaitResult{Result: resultC, Error: errC} +} diff --git a/vendor/github.com/moby/moby/client/distribution_inspect.go b/vendor/github.com/moby/moby/client/distribution_inspect.go new file mode 100644 index 00000000000..ffbf869d3c9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/distribution_inspect.go @@ -0,0 +1,45 @@ +package client + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + + "github.com/moby/moby/api/types/registry" +) + +// DistributionInspectResult holds the result of the DistributionInspect operation. +type DistributionInspectResult struct { + registry.DistributionInspect +} + +// DistributionInspectOptions holds options for the DistributionInspect operation. +type DistributionInspectOptions struct { + EncodedRegistryAuth string +} + +// DistributionInspect returns the image digest with the full manifest. +func (cli *Client) DistributionInspect(ctx context.Context, imageRef string, options DistributionInspectOptions) (DistributionInspectResult, error) { + if imageRef == "" { + return DistributionInspectResult{}, objectNotFoundError{object: "distribution", id: imageRef} + } + + var headers http.Header + if options.EncodedRegistryAuth != "" { + headers = http.Header{ + registry.AuthHeader: {options.EncodedRegistryAuth}, + } + } + + // Contact the registry to retrieve digest and platform information + resp, err := cli.get(ctx, "/distribution/"+imageRef+"/json", url.Values{}, headers) + defer ensureReaderClosed(resp) + if err != nil { + return DistributionInspectResult{}, err + } + + var distributionInspect registry.DistributionInspect + err = json.NewDecoder(resp.Body).Decode(&distributionInspect) + return DistributionInspectResult{DistributionInspect: distributionInspect}, err +} diff --git a/vendor/github.com/moby/moby/client/envvars.go b/vendor/github.com/moby/moby/client/envvars.go new file mode 100644 index 00000000000..a02295d1625 --- /dev/null +++ b/vendor/github.com/moby/moby/client/envvars.go @@ -0,0 +1,95 @@ +package client + +const ( + // EnvOverrideHost is the name of the environment variable that can be used + // to override the default host to connect to (DefaultDockerHost). + // + // This env-var is read by [FromEnv] and [WithHostFromEnv] and when set to a + // non-empty value, takes precedence over the default host (which is platform + // specific), or any host already set. + EnvOverrideHost = "DOCKER_HOST" + + // EnvOverrideAPIVersion is the name of the environment variable that can + // be used to override the API version to use. Value must be + // formatted as MAJOR.MINOR, for example, "1.19". + // + // This env-var is read by [FromEnv] and [WithAPIVersionFromEnv] and when set to a + // non-empty value, takes precedence over API version negotiation. + // + // This environment variable should be used for debugging purposes only, as + // it can set the client to use an incompatible (or invalid) API version. + EnvOverrideAPIVersion = "DOCKER_API_VERSION" + + // EnvOverrideCertPath is the name of the environment variable that can be + // used to specify the directory from which to load the TLS certificates + // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure + // the [Client] for a TCP connection protected by TLS client authentication. + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection. Refer to [EnvTLSVerify] below to learn how to + // disable verification for testing purposes. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the ["daemon attack surface"]. + // + // For local access to the API, it is recommended to connect with the daemon + // using the default local socket connection (on Linux), or the named pipe + // (on Windows). + // + // If you need to access the API of a remote daemon, consider using an SSH + // (ssh://) connection, which is easier to set up, and requires no additional + // configuration if the host is accessible using ssh. + // + // If you cannot use the alternatives above, and you must expose the API over + // a TCP connection. Refer to [Protect the Docker daemon socket] + // to learn how to configure the daemon and client to use a TCP connection + // with TLS client authentication. Make sure you know the differences between + // a regular TLS connection and a TLS connection protected by TLS client + // authentication, and verify that the API cannot be accessed by other clients. + // + // ["daemon attack surface"]: https://docs.docker.com/go/attack-surface/ + // [Protect the Docker daemon socket]: https://docs.docker.com/engine/security/protect-access/ + EnvOverrideCertPath = "DOCKER_CERT_PATH" + + // EnvTLSVerify is the name of the environment variable that can be used to + // enable or disable TLS certificate verification. When set to a non-empty + // value, TLS certificate verification is enabled, and the client is configured + // to use a TLS connection, using certificates from the default directories + // (within `~/.docker`); refer to EnvOverrideCertPath above for additional + // details. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the ["daemon attack surface"]. + // + // Before setting up your client and daemon to use a TCP connection with TLS + // client authentication, consider using one of the alternatives mentioned + // in [EnvOverrideCertPath]. + // + // Disabling TLS certificate verification (for testing purposes) + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection, and it is highly recommended to keep verification + // enabled to prevent machine-in-the-middle attacks. Refer to [Protect the Docker daemon socket] + // in the documentation and pages linked from that page to learn how to + // configure the daemon and client to use a TCP connection with TLS client + // authentication enabled. + // + // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to + // disable TLS certificate verification. Disabling verification is insecure, + // so should only be done for testing purposes. + // + // From the[crypto/tls.Config] documentation: + // + // InsecureSkipVerify controls whether a client verifies the server's + // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls + // accepts any certificate presented by the server and any host name in that + // certificate. In this mode, TLS is susceptible to machine-in-the-middle + // attacks unless custom verification is used. This should be used only for + // testing or in combination with VerifyConnection or VerifyPeerCertificate. + // + // ["daemon attack surface"]: https://docs.docker.com/go/attack-surface/ + // [Protect the Docker daemon socket]: https://docs.docker.com/engine/security/protect-access/ + EnvTLSVerify = "DOCKER_TLS_VERIFY" +) diff --git a/vendor/github.com/moby/moby/client/errors.go b/vendor/github.com/moby/moby/client/errors.go new file mode 100644 index 00000000000..9fbfa7666ed --- /dev/null +++ b/vendor/github.com/moby/moby/client/errors.go @@ -0,0 +1,114 @@ +package client + +import ( + "context" + "errors" + "fmt" + "net/http" + + cerrdefs "github.com/containerd/errdefs" + "github.com/containerd/errdefs/pkg/errhttp" + "github.com/moby/moby/client/pkg/versions" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + error +} + +// Error returns a string representation of an errConnectionFailed +func (e errConnectionFailed) Error() string { + return e.error.Error() +} + +func (e errConnectionFailed) Unwrap() error { + return e.error +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + return errors.As(err, &errConnectionFailed{}) +} + +// connectionFailed returns an error with host in the error message when connection +// to docker daemon failed. +func connectionFailed(host string) error { + var err error + if host == "" { + err = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") + } else { + err = fmt.Errorf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", host) + } + return errConnectionFailed{error: err} +} + +type objectNotFoundError struct { + object string + id string +} + +func (e objectNotFoundError) NotFound() {} + +func (e objectNotFoundError) Error() string { + return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) +} + +// requiresVersion returns an error if the APIVersion required is less than the +// current supported version. +// +// It performs API-version negotiation if the Client is configured with this +// option, otherwise it assumes the latest API version is used. +func (cli *Client) requiresVersion(ctx context.Context, apiRequired, feature string) error { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + if err := cli.checkVersion(ctx); err != nil { + return err + } + if cli.version != "" && versions.LessThan(cli.version, apiRequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, apiRequired, cli.version) + } + return nil +} + +type httpError struct { + err error + errdef error +} + +func (e *httpError) Error() string { + return e.err.Error() +} + +func (e *httpError) Unwrap() error { + return e.err +} + +func (e *httpError) Is(target error) bool { + return errors.Is(e.errdef, target) +} + +// httpErrorFromStatusCode creates an errdef error, based on the provided HTTP status-code +func httpErrorFromStatusCode(err error, statusCode int) error { + if err == nil { + return nil + } + base := errhttp.ToNative(statusCode) + if base != nil { + return &httpError{err: err, errdef: base} + } + + switch { + case statusCode >= http.StatusOK && statusCode < http.StatusBadRequest: + // it's a client error + return err + case statusCode >= http.StatusBadRequest && statusCode < http.StatusInternalServerError: + return &httpError{err: err, errdef: cerrdefs.ErrInvalidArgument} + case statusCode >= http.StatusInternalServerError && statusCode < 600: + return &httpError{err: err, errdef: cerrdefs.ErrInternal} + default: + return &httpError{err: err, errdef: cerrdefs.ErrUnknown} + } +} diff --git a/vendor/github.com/moby/moby/client/filters.go b/vendor/github.com/moby/moby/client/filters.go new file mode 100644 index 00000000000..347ad5c689a --- /dev/null +++ b/vendor/github.com/moby/moby/client/filters.go @@ -0,0 +1,59 @@ +package client + +import ( + "encoding/json" + "net/url" +) + +// Filters describes a predicate for an API request. +// +// Each entry in the map is a filter term. +// Each term is evaluated against the set of values. +// A filter term is satisfied if any one of the values in the set is a match. +// An item matches the filters when all terms are satisfied. +// +// Like all other map types in Go, the zero value is empty and read-only. +type Filters map[string]map[string]bool + +// Add appends values to the value-set of term. +// +// The receiver f is returned for chaining. +// +// f := make(Filters).Add("name", "foo", "bar").Add("status", "exited") +func (f Filters) Add(term string, values ...string) Filters { + if _, ok := f[term]; !ok { + f[term] = make(map[string]bool) + } + for _, v := range values { + f[term][v] = true + } + return f +} + +// Clone returns a deep copy of f. +func (f Filters) Clone() Filters { + out := make(Filters, len(f)) + for term, values := range f { + inner := make(map[string]bool, len(values)) + for v, ok := range values { + inner[v] = ok + } + out[term] = inner + } + return out +} + +// updateURLValues sets the "filters" key in values to the marshalled value of +// f, replacing any existing values. When f is empty, any existing "filters" key +// is removed. +func (f Filters) updateURLValues(values url.Values) { + if len(f) > 0 { + b, err := json.Marshal(f) + if err != nil { + panic(err) // Marshaling builtin types should never fail + } + values.Set("filters", string(b)) + } else { + values.Del("filters") + } +} diff --git a/vendor/github.com/moby/moby/client/hijack.go b/vendor/github.com/moby/moby/client/hijack.go new file mode 100644 index 00000000000..31c44e59887 --- /dev/null +++ b/vendor/github.com/moby/moby/client/hijack.go @@ -0,0 +1,172 @@ +package client + +import ( + "bufio" + "context" + "fmt" + "net" + "net/http" + "net/url" + "time" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body any, headers map[string][]string) (HijackedResponse, error) { + jsonBody, err := jsonEncode(body) + if err != nil { + return HijackedResponse{}, err + } + req, err := cli.buildRequest(ctx, http.MethodPost, cli.getAPIPath(ctx, path, query), jsonBody, headers) + if err != nil { + return HijackedResponse{}, err + } + conn, mediaType, err := setupHijackConn(cli.dialer(), req, "tcp") + if err != nil { + return HijackedResponse{}, err + } + + return NewHijackedResponse(conn, mediaType), nil +} + +// DialHijack returns a hijacked connection with negotiated protocol proto. +func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, http.NoBody) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + conn, _, err := setupHijackConn(cli.Dialer(), req, proto) + return conn, err +} + +func setupHijackConn(dialer func(context.Context) (net.Conn, error), req *http.Request, proto string) (_ net.Conn, _ string, retErr error) { + ctx := req.Context() + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", proto) + + conn, err := dialer(ctx) + if err != nil { + return nil, "", fmt.Errorf("cannot connect to the Docker daemon. Is 'docker daemon' running on this host?: %w", err) + } + defer func() { + if retErr != nil { + _ = conn.Close() + } + }() + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection prohibits + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + _ = tcpConn.SetKeepAlive(true) + _ = tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + hc := &hijackedConn{conn, bufio.NewReader(conn)} + + // Server hijacks the connection, error 'connection closed' expected + resp, err := otelhttp.NewTransport(hc).RoundTrip(req) + if err != nil { + return nil, "", err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + _ = resp.Body.Close() + return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + } + + if hc.r.Buffered() > 0 { + // If there is buffered content, wrap the connection. We return an + // object that implements CloseWrite if the underlying connection + // implements it. + if _, ok := hc.Conn.(CloseWriter); ok { + conn = &hijackedConnCloseWriter{hc} + } else { + conn = hc + } + } else { + hc.r.Reset(nil) + } + + return conn, resp.Header.Get("Content-Type"), nil +} + +// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case +// that a) there was already buffered data in the http layer when Hijack() was +// called, and b) the underlying net.Conn does *not* implement CloseWrite(). +// hijackedConn does not implement CloseWrite() either. +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := req.Write(c.Conn); err != nil { + return nil, err + } + return http.ReadResponse(c.r, req) +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +// hijackedConnCloseWriter is a hijackedConn which additionally implements +// CloseWrite(). It is returned by setupHijackConn in the case that a) there +// was already buffered data in the http layer when Hijack() was called, and b) +// the underlying net.Conn *does* implement CloseWrite(). +type hijackedConnCloseWriter struct { + *hijackedConn +} + +var _ CloseWriter = &hijackedConnCloseWriter{} + +func (c *hijackedConnCloseWriter) CloseWrite() error { + conn := c.Conn.(CloseWriter) + return conn.CloseWrite() +} + +// NewHijackedResponse initializes a [HijackedResponse] type. +func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { + return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + mediaType string + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. +// returns false if HTTP Content-Type is not relevant, and the container must be +// inspected. +func (h *HijackedResponse) MediaType() (string, bool) { + if h.mediaType == "" { + return "", false + } + return h.mediaType, true +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} diff --git a/vendor/github.com/moby/moby/client/image_build.go b/vendor/github.com/moby/moby/client/image_build.go new file mode 100644 index 00000000000..5062ec5de12 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_build.go @@ -0,0 +1,179 @@ +package client + +import ( + "context" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + + cerrdefs "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/network" +) + +// ImageBuild sends a request to the daemon to build images. +// The Body in the response implements an [io.ReadCloser] and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options ImageBuildOptions) (ImageBuildResult, error) { + query, err := cli.imageBuildOptionsToQuery(ctx, options) + if err != nil { + return ImageBuildResult{}, err + } + + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return ImageBuildResult{}, err + } + + headers := http.Header{} + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/x-tar") + + resp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return ImageBuildResult{}, err + } + + return ImageBuildResult{ + Body: resp.Body, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(_ context.Context, options ImageBuildOptions) (url.Values, error) { + query := url.Values{} + if len(options.Tags) > 0 { + query["t"] = options.Tags + } + if len(options.SecurityOpt) > 0 { + query["securityopt"] = options.SecurityOpt + } + if len(options.ExtraHosts) > 0 { + query["extrahosts"] = options.ExtraHosts + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if !options.Remove { + // only send value when opting out because the daemon's default is + // to remove intermediate containers after a successful build, + // + // TODO(thaJeztah): deprecate "Remove" option, and provide a "NoRemove" or "Keep" option instead. + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + // TODO(thaJeztah): squash is experimental, and deprecated when using BuildKit? + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + if options.CPUSetCPUs != "" { + query.Set("cpusetcpus", options.CPUSetCPUs) + } + if options.NetworkMode != "" && options.NetworkMode != network.NetworkDefault { + query.Set("networkmode", options.NetworkMode) + } + if options.CPUSetMems != "" { + query.Set("cpusetmems", options.CPUSetMems) + } + if options.CPUShares != 0 { + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + } + if options.CPUQuota != 0 { + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + } + if options.CPUPeriod != 0 { + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + } + if options.Memory != 0 { + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + } + if options.MemorySwap != 0 { + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + } + if options.CgroupParent != "" { + query.Set("cgroupparent", options.CgroupParent) + } + if options.ShmSize != 0 { + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + } + if options.Dockerfile != "" { + query.Set("dockerfile", options.Dockerfile) + } + if options.Target != "" { + query.Set("target", options.Target) + } + if len(options.Ulimits) != 0 { + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + } + if len(options.BuildArgs) != 0 { + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + } + if len(options.Labels) != 0 { + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + } + if len(options.CacheFrom) != 0 { + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + } + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + if len(options.Platforms) > 0 { + if len(options.Platforms) > 1 { + // TODO(thaJeztah): update API spec and add equivalent check on the daemon. We need this still for older daemons, which would ignore it. + return query, cerrdefs.ErrInvalidArgument.WithMessage("specifying multiple platforms is not yet supported") + } + query.Set("platform", formatPlatform(options.Platforms[0])) + } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + if options.Version != "" { + query.Set("version", string(options.Version)) + } + + if options.Outputs != nil { + outputsJSON, err := json.Marshal(options.Outputs) + if err != nil { + return query, err + } + query.Set("outputs", string(outputsJSON)) + } + return query, nil +} diff --git a/vendor/github.com/moby/moby/client/image_build_opts.go b/vendor/github.com/moby/moby/client/image_build_opts.go new file mode 100644 index 00000000000..f65ad0f2bf7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_build_opts.go @@ -0,0 +1,79 @@ +package client + +import ( + "io" + + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/registry" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*container.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]registry.AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + // Platforms selects the platforms to build the image for. Multiple platforms + // can be provided if the daemon supports multi-platform builds. + Platforms []ocispec.Platform + // Version specifies the version of the underlying builder to use + Version build.BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string +} + +// ImageBuildResult holds information +// returned by a server after building +// an image. +type ImageBuildResult struct { + Body io.ReadCloser +} diff --git a/vendor/github.com/moby/moby/client/image_history.go b/vendor/github.com/moby/moby/client/image_history.go new file mode 100644 index 00000000000..8618f1553ee --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_history.go @@ -0,0 +1,55 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageHistoryWithPlatform sets the platform for the image history operation. +func ImageHistoryWithPlatform(platform ocispec.Platform) ImageHistoryOption { + return imageHistoryOptionFunc(func(opt *imageHistoryOpts) error { + if opt.apiOptions.Platform != nil { + return fmt.Errorf("platform already set to %s", *opt.apiOptions.Platform) + } + opt.apiOptions.Platform = &platform + return nil + }) +} + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string, historyOpts ...ImageHistoryOption) (ImageHistoryResult, error) { + query := url.Values{} + + var opts imageHistoryOpts + for _, o := range historyOpts { + if err := o.Apply(&opts); err != nil { + return ImageHistoryResult{}, err + } + } + + if opts.apiOptions.Platform != nil { + if err := cli.requiresVersion(ctx, "1.48", "platform"); err != nil { + return ImageHistoryResult{}, err + } + + p, err := encodePlatform(opts.apiOptions.Platform) + if err != nil { + return ImageHistoryResult{}, err + } + query.Set("platform", p) + } + + resp, err := cli.get(ctx, "/images/"+imageID+"/history", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImageHistoryResult{}, err + } + + var history ImageHistoryResult + err = json.NewDecoder(resp.Body).Decode(&history.Items) + return history, err +} diff --git a/vendor/github.com/moby/moby/client/image_history_opts.go b/vendor/github.com/moby/moby/client/image_history_opts.go new file mode 100644 index 00000000000..7fc57afd1cc --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_history_opts.go @@ -0,0 +1,29 @@ +package client + +import ( + "github.com/moby/moby/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageHistoryOption is a type representing functional options for the image history operation. +type ImageHistoryOption interface { + Apply(*imageHistoryOpts) error +} +type imageHistoryOptionFunc func(opt *imageHistoryOpts) error + +func (f imageHistoryOptionFunc) Apply(o *imageHistoryOpts) error { + return f(o) +} + +type imageHistoryOpts struct { + apiOptions imageHistoryOptions +} + +type imageHistoryOptions struct { + // Platform from the manifest list to use for history. + Platform *ocispec.Platform +} + +type ImageHistoryResult struct { + Items []image.HistoryResponseItem +} diff --git a/vendor/github.com/moby/moby/client/image_import.go b/vendor/github.com/moby/moby/client/image_import.go new file mode 100644 index 00000000000..f383f76d49c --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_import.go @@ -0,0 +1,66 @@ +package client + +import ( + "context" + "io" + "net/url" + + "github.com/distribution/reference" +) + +// ImageImportResult holds the response body returned by the daemon for image import. +type ImageImportResult interface { + io.ReadCloser +} + +// ImageImport creates a new image based on the source options. It returns the +// JSON content in the [ImageImportResult]. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ImageImport(ctx context.Context, source ImageImportSource, ref string, options ImageImportOptions) (ImageImportResult, error) { + if ref != "" { + // Check if the given image name can be resolved + if _, err := reference.ParseNormalizedNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + if source.SourceName != "" { + query.Set("fromSrc", source.SourceName) + } + if ref != "" { + query.Set("repo", ref) + } + if options.Tag != "" { + query.Set("tag", options.Tag) + } + if options.Message != "" { + query.Set("message", options.Message) + } + if p := formatPlatform(options.Platform); p != "unknown" { + // TODO(thaJeztah): would we ever support mutiple platforms here? (would require multiple rootfs tars as well?) + query.Set("platform", p) + } + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return &imageImportResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +// ImageImportResult holds the response body returned by the daemon for image import. +type imageImportResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*imageImportResult)(nil) + _ ImageImportResult = (*imageImportResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/image_import_opts.go b/vendor/github.com/moby/moby/client/image_import_opts.go new file mode 100644 index 00000000000..c70473bdd54 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_import_opts.go @@ -0,0 +1,21 @@ +package client + +import ( + "io" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform ocispec.Platform // Platform is the target platform of the image +} diff --git a/vendor/github.com/moby/moby/client/image_inspect.go b/vendor/github.com/moby/moby/client/image_inspect.go new file mode 100644 index 00000000000..635931fd030 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_inspect.go @@ -0,0 +1,62 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/url" +) + +// ImageInspect returns the image information. +func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts ...ImageInspectOption) (ImageInspectResult, error) { + if imageID == "" { + return ImageInspectResult{}, objectNotFoundError{object: "image", id: imageID} + } + + var opts imageInspectOpts + for _, opt := range inspectOpts { + if err := opt.Apply(&opts); err != nil { + return ImageInspectResult{}, fmt.Errorf("error applying image inspect option: %w", err) + } + } + + query := url.Values{} + if opts.apiOptions.Manifests { + if err := cli.requiresVersion(ctx, "1.48", "manifests"); err != nil { + return ImageInspectResult{}, err + } + query.Set("manifests", "1") + } + + if opts.apiOptions.Platform != nil { + if err := cli.requiresVersion(ctx, "1.49", "platform"); err != nil { + return ImageInspectResult{}, err + } + platform, err := encodePlatform(opts.apiOptions.Platform) + if err != nil { + return ImageInspectResult{}, err + } + query.Set("platform", platform) + } + + resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImageInspectResult{}, err + } + + buf := opts.raw + if buf == nil { + buf = &bytes.Buffer{} + } + + if _, err := io.Copy(buf, resp.Body); err != nil { + return ImageInspectResult{}, err + } + + var response ImageInspectResult + err = json.Unmarshal(buf.Bytes(), &response) + return response, err +} diff --git a/vendor/github.com/moby/moby/client/image_inspect_opts.go b/vendor/github.com/moby/moby/client/image_inspect_opts.go new file mode 100644 index 00000000000..266c1fe8151 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_inspect_opts.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + + "github.com/moby/moby/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageInspectOption is a type representing functional options for the image inspect operation. +type ImageInspectOption interface { + Apply(*imageInspectOpts) error +} +type imageInspectOptionFunc func(opt *imageInspectOpts) error + +func (f imageInspectOptionFunc) Apply(o *imageInspectOpts) error { + return f(o) +} + +// ImageInspectWithRawResponse instructs the client to additionally store the +// raw inspect response in the provided buffer. +func ImageInspectWithRawResponse(raw *bytes.Buffer) ImageInspectOption { + return imageInspectOptionFunc(func(opts *imageInspectOpts) error { + opts.raw = raw + return nil + }) +} + +// ImageInspectWithManifests sets manifests API option for the image inspect operation. +// This option is only available for API version 1.48 and up. +// With this option set, the image inspect operation response includes +// the [image.InspectResponse.Manifests] field if the server is multi-platform +// capable. +func ImageInspectWithManifests(manifests bool) ImageInspectOption { + return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { + clientOpts.apiOptions.Manifests = manifests + return nil + }) +} + +// ImageInspectWithPlatform sets platform API option for the image inspect operation. +// This option is only available for API version 1.49 and up. +// With this option set, the image inspect operation returns information for the +// specified platform variant of the multi-platform image. +func ImageInspectWithPlatform(platform *ocispec.Platform) ImageInspectOption { + return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { + clientOpts.apiOptions.Platform = platform + return nil + }) +} + +type imageInspectOpts struct { + raw *bytes.Buffer + apiOptions imageInspectOptions +} + +type imageInspectOptions struct { + // Manifests returns the image manifests. + Manifests bool + + // Platform selects the specific platform of a multi-platform image to inspect. + // + // This option is only available for API version 1.49 and up. + Platform *ocispec.Platform +} + +type ImageInspectResult struct { + image.InspectResponse +} diff --git a/vendor/github.com/moby/moby/client/image_list.go b/vendor/github.com/moby/moby/client/image_list.go new file mode 100644 index 00000000000..8570709a7f4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_list.go @@ -0,0 +1,61 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/image" + "github.com/moby/moby/client/pkg/versions" +) + +// ImageList returns a list of images in the docker host. +// +// Experimental: Set the [image.ListOptions.Manifest] option +// to include [image.Summary.Manifests] with information about image manifests. +// This is experimental and might change in the future without any backward +// compatibility. +func (cli *Client) ImageList(ctx context.Context, options ImageListOptions) (ImageListResult, error) { + var images []image.Summary + + query := url.Values{} + + options.Filters.updateURLValues(query) + if options.All { + query.Set("all", "1") + } + if options.SharedSize { + query.Set("shared-size", "1") + } + if options.Manifests { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + if err := cli.checkVersion(ctx); err != nil { + return ImageListResult{}, err + } + + if versions.GreaterThanOrEqualTo(cli.version, "1.47") { + query.Set("manifests", "1") + } + } + if options.Identity { + if err := cli.requiresVersion(ctx, "1.54", "identity"); err != nil { + return ImageListResult{}, err + } + // Identity data in image list is scoped to manifests. + query.Set("manifests", "1") + query.Set("identity", "1") + } + + resp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImageListResult{}, err + } + + err = json.NewDecoder(resp.Body).Decode(&images) + return ImageListResult{Items: images}, err +} diff --git a/vendor/github.com/moby/moby/client/image_list_opts.go b/vendor/github.com/moby/moby/client/image_list_opts.go new file mode 100644 index 00000000000..297ab960c5a --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_list_opts.go @@ -0,0 +1,27 @@ +package client + +import "github.com/moby/moby/api/types/image" + +// ImageListOptions holds parameters to list images with. +type ImageListOptions struct { + // All controls whether all images in the graph are filtered, or just + // the heads. + All bool + + // Filters is a JSON-encoded set of filter arguments. + Filters Filters + + // SharedSize indicates whether the shared size of images should be computed. + SharedSize bool + + // Manifests indicates whether the image manifests should be returned. + Manifests bool + + // Identity indicates whether image identity information should be returned. + Identity bool +} + +// ImageListResult holds the result from ImageList. +type ImageListResult struct { + Items []image.Summary +} diff --git a/vendor/github.com/moby/moby/client/image_load.go b/vendor/github.com/moby/moby/client/image_load.go new file mode 100644 index 00000000000..ec5fcae6ebf --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_load.go @@ -0,0 +1,64 @@ +package client + +import ( + "context" + "io" + "net/http" + "net/url" +) + +// ImageLoadResult returns information to the client about a load process. +// It implements [io.ReadCloser] and must be closed to avoid a resource leak. +type ImageLoadResult interface { + io.ReadCloser +} + +// ImageLoad loads an image in the docker host from the client host. It's up +// to the caller to close the [ImageLoadResult] returned by this function. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, loadOpts ...ImageLoadOption) (ImageLoadResult, error) { + var opts imageLoadOpts + for _, opt := range loadOpts { + if err := opt.Apply(&opts); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("quiet", "0") + if opts.apiOptions.Quiet { + query.Set("quiet", "1") + } + if len(opts.apiOptions.Platforms) > 0 { + if err := cli.requiresVersion(ctx, "1.48", "platform"); err != nil { + return nil, err + } + + p, err := encodePlatforms(opts.apiOptions.Platforms...) + if err != nil { + return nil, err + } + query["platform"] = p + } + + resp, err := cli.postRaw(ctx, "/images/load", query, input, http.Header{ + "Content-Type": {"application/x-tar"}, + }) + if err != nil { + return nil, err + } + return &imageLoadResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +// imageLoadResult returns information to the client about a load process. +type imageLoadResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*imageLoadResult)(nil) + _ ImageLoadResult = (*imageLoadResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/image_load_opts.go b/vendor/github.com/moby/moby/client/image_load_opts.go new file mode 100644 index 00000000000..aeb4fcf8397 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_load_opts.go @@ -0,0 +1,53 @@ +package client + +import ( + "fmt" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageLoadOption is a type representing functional options for the image load operation. +type ImageLoadOption interface { + Apply(*imageLoadOpts) error +} +type imageLoadOptionFunc func(opt *imageLoadOpts) error + +func (f imageLoadOptionFunc) Apply(o *imageLoadOpts) error { + return f(o) +} + +type imageLoadOpts struct { + apiOptions imageLoadOptions +} + +type imageLoadOptions struct { + // Quiet suppresses progress output + Quiet bool + + // Platforms selects the platforms to load if the image is a + // multi-platform image and has multiple variants. + Platforms []ocispec.Platform +} + +// ImageLoadWithQuiet sets the quiet option for the image load operation. +func ImageLoadWithQuiet(quiet bool) ImageLoadOption { + return imageLoadOptionFunc(func(opt *imageLoadOpts) error { + opt.apiOptions.Quiet = quiet + return nil + }) +} + +// ImageLoadWithPlatforms sets the platforms to be loaded from the image. +// +// Platform is an optional parameter that specifies the platform to load from +// the provided multi-platform image. Passing a platform only has an effect +// if the input image is a multi-platform image. +func ImageLoadWithPlatforms(platforms ...ocispec.Platform) ImageLoadOption { + return imageLoadOptionFunc(func(opt *imageLoadOpts) error { + if opt.apiOptions.Platforms != nil { + return fmt.Errorf("platforms already set to %v", opt.apiOptions.Platforms) + } + opt.apiOptions.Platforms = platforms + return nil + }) +} diff --git a/vendor/github.com/moby/moby/client/image_prune.go b/vendor/github.com/moby/moby/client/image_prune.go new file mode 100644 index 00000000000..7f3a25b8984 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_prune.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/image" +) + +// ImagePruneOptions holds parameters to prune images. +type ImagePruneOptions struct { + Filters Filters +} + +// ImagePruneResult holds the result from the [Client.ImagePrune] method. +type ImagePruneResult struct { + Report image.PruneReport +} + +// ImagePrune requests the daemon to delete unused data +func (cli *Client) ImagePrune(ctx context.Context, opts ImagePruneOptions) (ImagePruneResult, error) { + query := url.Values{} + opts.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImagePruneResult{}, err + } + + var report image.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return ImagePruneResult{}, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return ImagePruneResult{Report: report}, nil +} diff --git a/vendor/github.com/moby/moby/client/image_pull.go b/vendor/github.com/moby/moby/client/image_pull.go new file mode 100644 index 00000000000..11c0afa4182 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_pull.go @@ -0,0 +1,93 @@ +package client + +import ( + "context" + "io" + "iter" + "net/http" + "net/url" + + cerrdefs "github.com/containerd/errdefs" + "github.com/distribution/reference" + "github.com/moby/moby/api/types/jsonstream" + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/client/internal" +) + +type ImagePullResponse interface { + io.ReadCloser + JSONMessages(ctx context.Context) iter.Seq2[jsonstream.Message, error] + Wait(ctx context.Context) error +} + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// Callers can: +// - use [ImagePullResponse.Wait] to wait for pull to complete +// - use [ImagePullResponse.JSONMessages] to monitor pull progress as a sequence +// of JSONMessages, [ImagePullResponse.Close] does not need to be called in this case. +// - use the [io.Reader] interface and call [ImagePullResponse.Close] after processing. +func (cli *Client) ImagePull(ctx context.Context, refStr string, options ImagePullOptions) (ImagePullResponse, error) { + // FIXME(vdemeester): there is currently used in a few way in docker/docker + // - if not in trusted content, ref is used to pass the whole reference, and tag is empty + // - if in trusted content, ref is used to pass the reference name, and tag for the digest + // + // ref; https://github.com/docker-archive-public/docker.engine-api/pull/162 + + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", ref.Name()) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + if len(options.Platforms) > 0 { + if len(options.Platforms) > 1 { + // TODO(thaJeztah): update API spec and add equivalent check on the daemon. We need this still for older daemons, which would ignore it. + return nil, cerrdefs.ErrInvalidArgument.WithMessage("specifying multiple platforms is not yet supported") + } + query.Set("platform", formatPlatform(options.Platforms[0])) + } + resp, err := cli.tryImageCreate(ctx, query, staticAuth(options.RegistryAuth)) + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + resp, err = cli.tryImageCreate(ctx, query, options.PrivilegeFunc) + } + if err != nil { + return nil, err + } + + return internal.NewJSONMessageStream(resp.Body), nil +} + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, resolveAuth registry.RequestAuthConfig) (*http.Response, error) { + hdr := http.Header{} + if resolveAuth != nil { + registryAuth, err := resolveAuth(ctx) + if err != nil { + return nil, err + } + if registryAuth != "" { + hdr.Set(registry.AuthHeader, registryAuth) + } + } + return cli.post(ctx, "/images/create", query, nil, hdr) +} diff --git a/vendor/github.com/moby/moby/client/image_pull_opts.go b/vendor/github.com/moby/moby/client/image_pull_opts.go new file mode 100644 index 00000000000..1b78185ddab --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_pull_opts.go @@ -0,0 +1,25 @@ +package client + +import ( + "context" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + + // Platforms selects the platforms to pull. Multiple platforms can be + // specified if the image ia a multi-platform image. + Platforms []ocispec.Platform +} diff --git a/vendor/github.com/moby/moby/client/image_push.go b/vendor/github.com/moby/moby/client/image_push.go new file mode 100644 index 00000000000..5dd8bc14075 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_push.go @@ -0,0 +1,98 @@ +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "iter" + "net/http" + "net/url" + + cerrdefs "github.com/containerd/errdefs" + "github.com/distribution/reference" + "github.com/moby/moby/api/types/jsonstream" + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/client/internal" +) + +type ImagePushResponse interface { + io.ReadCloser + JSONMessages(ctx context.Context) iter.Seq2[jsonstream.Message, error] + Wait(ctx context.Context) error +} + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// Callers can +// - use [ImagePushResponse.Wait] to wait for push to complete +// - use [ImagePushResponse.JSONMessages] to monitor pull progress as a sequence +// of JSONMessages, [ImagePushResponse.Close] does not need to be called in this case. +// - use the [io.Reader] interface and call [ImagePushResponse.Close] after processing. +func (cli *Client) ImagePush(ctx context.Context, image string, options ImagePushOptions) (ImagePushResponse, error) { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if _, ok := ref.(reference.Digested); ok { + return nil, errors.New("cannot push a digest reference") + } + + query := url.Values{} + if !options.All { + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + } + + if options.Platform != nil { + if err := cli.requiresVersion(ctx, "1.46", "platform"); err != nil { + return nil, err + } + + p := *options.Platform + pJson, err := json.Marshal(p) + if err != nil { + return nil, fmt.Errorf("invalid platform: %v", err) + } + + query.Set("platform", string(pJson)) + } + + resp, err := cli.tryImagePush(ctx, ref.Name(), query, staticAuth(options.RegistryAuth)) + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + resp, err = cli.tryImagePush(ctx, ref.Name(), query, options.PrivilegeFunc) + } + if err != nil { + return nil, err + } + return internal.NewJSONMessageStream(resp.Body), nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, resolveAuth registry.RequestAuthConfig) (*http.Response, error) { + hdr := http.Header{} + if resolveAuth != nil { + registryAuth, err := resolveAuth(ctx) + if err != nil { + return nil, err + } + if registryAuth != "" { + hdr.Set(registry.AuthHeader, registryAuth) + } + } + + // Always send a body (which may be an empty JSON document ("{}")) to prevent + // EOF errors on older daemons which had faulty fallback code for handling + // authentication in the body when no auth-header was set, resulting in; + // + // Error response from daemon: bad parameters and missing X-Registry-Auth: invalid X-Registry-Auth header: EOF + // + // We use [http.NoBody], which gets marshaled to an empty JSON document. + // + // see: https://github.com/moby/moby/commit/ea29dffaa541289591aa44fa85d2a596ce860e16 + return cli.post(ctx, "/images/"+imageID+"/push", query, http.NoBody, hdr) +} diff --git a/vendor/github.com/moby/moby/client/image_push_opts.go b/vendor/github.com/moby/moby/client/image_push_opts.go new file mode 100644 index 00000000000..591c6b60579 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_push_opts.go @@ -0,0 +1,26 @@ +package client + +import ( + "context" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImagePushOptions holds information to push images. +type ImagePushOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + + // Platform is an optional field that selects a specific platform to push + // when the image is a multi-platform image. + // Using this will only push a single platform-specific manifest. + Platform *ocispec.Platform `json:",omitempty"` +} diff --git a/vendor/github.com/moby/moby/client/image_remove.go b/vendor/github.com/moby/moby/client/image_remove.go new file mode 100644 index 00000000000..095b4f04c40 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_remove.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/image" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options ImageRemoveOptions) (ImageRemoveResult, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + if len(options.Platforms) > 0 { + p, err := encodePlatforms(options.Platforms...) + if err != nil { + return ImageRemoveResult{}, err + } + query["platforms"] = p + } + + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ImageRemoveResult{}, err + } + + var dels []image.DeleteResponse + err = json.NewDecoder(resp.Body).Decode(&dels) + return ImageRemoveResult{Items: dels}, err +} diff --git a/vendor/github.com/moby/moby/client/image_remove_opts.go b/vendor/github.com/moby/moby/client/image_remove_opts.go new file mode 100644 index 00000000000..3b5d8a77f7e --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_remove_opts.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/moby/moby/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Platforms []ocispec.Platform + Force bool + PruneChildren bool +} + +// ImageRemoveResult holds the delete responses returned by the daemon. +type ImageRemoveResult struct { + Items []image.DeleteResponse +} diff --git a/vendor/github.com/moby/moby/client/image_save.go b/vendor/github.com/moby/moby/client/image_save.go new file mode 100644 index 00000000000..508f88b7d2e --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_save.go @@ -0,0 +1,59 @@ +package client + +import ( + "context" + "io" + "net/url" +) + +type ImageSaveResult interface { + io.ReadCloser +} + +// ImageSave retrieves one or more images from the docker host as an +// [ImageSaveResult]. Callers should close the reader, but the underlying +// [io.ReadCloser] is automatically closed if the context is canceled, +// +// Platforms is an optional parameter that specifies the platforms to save +// from the image. Passing a platform only has an effect if the input image +// is a multi-platform image. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string, saveOpts ...ImageSaveOption) (ImageSaveResult, error) { + var opts imageSaveOpts + for _, opt := range saveOpts { + if err := opt.Apply(&opts); err != nil { + return nil, err + } + } + + query := url.Values{ + "names": imageIDs, + } + + if len(opts.apiOptions.Platforms) > 0 { + if err := cli.requiresVersion(ctx, "1.48", "platform"); err != nil { + return nil, err + } + p, err := encodePlatforms(opts.apiOptions.Platforms...) + if err != nil { + return nil, err + } + query["platform"] = p + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return &imageSaveResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type imageSaveResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*imageSaveResult)(nil) + _ ImageSaveResult = (*imageSaveResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/image_save_opts.go b/vendor/github.com/moby/moby/client/image_save_opts.go new file mode 100644 index 00000000000..9c0b3b74a4e --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_save_opts.go @@ -0,0 +1,41 @@ +package client + +import ( + "fmt" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ImageSaveOption interface { + Apply(*imageSaveOpts) error +} + +type imageSaveOptionFunc func(opt *imageSaveOpts) error + +func (f imageSaveOptionFunc) Apply(o *imageSaveOpts) error { + return f(o) +} + +// ImageSaveWithPlatforms sets the platforms to be saved from the image. It +// produces an error if platforms are already set. This option only has an +// effect if the input image is a multi-platform image. +func ImageSaveWithPlatforms(platforms ...ocispec.Platform) ImageSaveOption { + // TODO(thaJeztah): verify the GoDoc; do we produce an error for a single-platform image without the given platform? + return imageSaveOptionFunc(func(opt *imageSaveOpts) error { + if opt.apiOptions.Platforms != nil { + return fmt.Errorf("platforms already set to %v", opt.apiOptions.Platforms) + } + opt.apiOptions.Platforms = platforms + return nil + }) +} + +type imageSaveOpts struct { + apiOptions imageSaveOptions +} + +type imageSaveOptions struct { + // Platforms selects the platforms to save if the image is a + // multi-platform image and has multiple variants. + Platforms []ocispec.Platform +} diff --git a/vendor/github.com/moby/moby/client/image_search.go b/vendor/github.com/moby/moby/client/image_search.go new file mode 100644 index 00000000000..6e280906a2f --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_search.go @@ -0,0 +1,47 @@ +package client + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + "strconv" + + cerrdefs "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/registry" +) + +// ImageSearch makes the docker host search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options ImageSearchOptions) (ImageSearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + options.Filters.updateURLValues(query) + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + defer ensureReaderClosed(resp) + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) + if privilegeErr != nil { + return ImageSearchResult{}, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return ImageSearchResult{}, err + } + + err = json.NewDecoder(resp.Body).Decode(&results) + return ImageSearchResult{Items: results}, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { + return cli.get(ctx, "/images/search", query, http.Header{ + registry.AuthHeader: {registryAuth}, + }) +} diff --git a/vendor/github.com/moby/moby/client/image_search_opts.go b/vendor/github.com/moby/moby/client/image_search_opts.go new file mode 100644 index 00000000000..95a7d41fa0e --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_search_opts.go @@ -0,0 +1,27 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/registry" +) + +// ImageSearchResult wraps results returned by ImageSearch. +type ImageSearchResult struct { + Items []registry.SearchResult +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + Filters Filters + Limit int +} diff --git a/vendor/github.com/moby/moby/client/image_tag.go b/vendor/github.com/moby/moby/client/image_tag.go new file mode 100644 index 00000000000..5566f4624f4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/image_tag.go @@ -0,0 +1,48 @@ +package client + +import ( + "context" + "errors" + "fmt" + "net/url" + + "github.com/distribution/reference" +) + +type ImageTagOptions struct { + Source string + Target string +} + +type ImageTagResult struct{} + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, options ImageTagOptions) (ImageTagResult, error) { + source := options.Source + target := options.Target + + if _, err := reference.ParseAnyReference(source); err != nil { + return ImageTagResult{}, fmt.Errorf("error parsing reference: %q is not a valid repository/tag: %w", source, err) + } + + ref, err := reference.ParseNormalizedNamed(target) + if err != nil { + return ImageTagResult{}, fmt.Errorf("error parsing reference: %q is not a valid repository/tag: %w", target, err) + } + + if _, ok := ref.(reference.Digested); ok { + return ImageTagResult{}, errors.New("refusing to create a tag with a digest reference") + } + + ref = reference.TagNameOnly(ref) + + query := url.Values{} + query.Set("repo", ref.Name()) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) + defer ensureReaderClosed(resp) + return ImageTagResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/internal/json-stream.go b/vendor/github.com/moby/moby/client/internal/json-stream.go new file mode 100644 index 00000000000..07d07bd7e35 --- /dev/null +++ b/vendor/github.com/moby/moby/client/internal/json-stream.go @@ -0,0 +1,50 @@ +package internal + +import ( + "encoding/json" + "io" + "slices" + + "github.com/moby/moby/api/types" +) + +const rs = 0x1E + +type DecoderFn func(v any) error + +// NewJSONStreamDecoder builds adequate DecoderFn to read json records formatted with specified content-type +func NewJSONStreamDecoder(r io.Reader, contentType string) DecoderFn { + switch contentType { + case types.MediaTypeJSONSequence: + return json.NewDecoder(NewRSFilterReader(r)).Decode + case types.MediaTypeJSON, types.MediaTypeNDJSON, types.MediaTypeJSONLines: + fallthrough + default: + return json.NewDecoder(r).Decode + } +} + +// RSFilterReader wraps an io.Reader and filters out ASCII RS characters +type RSFilterReader struct { + reader io.Reader + buffer []byte +} + +// NewRSFilterReader creates a new RSFilterReader that filters out RS characters +func NewRSFilterReader(r io.Reader) *RSFilterReader { + return &RSFilterReader{ + reader: r, + buffer: make([]byte, 4096), // Internal buffer for reading chunks + } +} + +// Read implements the io.Reader interface, filtering out RS characters +func (r *RSFilterReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + n, err = r.reader.Read(p) + filtered := slices.DeleteFunc(p[:n], func(b byte) bool { return b == rs }) + return len(filtered), err +} diff --git a/vendor/github.com/moby/moby/client/internal/jsonmessages.go b/vendor/github.com/moby/moby/client/internal/jsonmessages.go new file mode 100644 index 00000000000..31262fd8e28 --- /dev/null +++ b/vendor/github.com/moby/moby/client/internal/jsonmessages.go @@ -0,0 +1,84 @@ +package internal + +import ( + "context" + "encoding/json" + "errors" + "io" + "iter" + "sync" + + "github.com/moby/moby/api/types/jsonstream" +) + +func NewJSONMessageStream(rc io.ReadCloser) Stream { + if rc == nil { + panic("nil io.ReadCloser") + } + return Stream{ + rc: rc, + close: sync.OnceValue(rc.Close), + } +} + +type Stream struct { + rc io.ReadCloser + close func() error +} + +// Read implements io.ReadCloser +func (r Stream) Read(p []byte) (n int, err error) { + if r.rc == nil { + return 0, io.EOF + } + return r.rc.Read(p) +} + +// Close implements io.ReadCloser +func (r Stream) Close() error { + if r.close == nil { + return nil + } + return r.close() +} + +var _ io.ReadCloser = Stream{} + +// JSONMessages decodes the response stream as a sequence of JSONMessages. +// if stream ends or context is cancelled, the underlying [io.Reader] is closed. +func (r Stream) JSONMessages(ctx context.Context) iter.Seq2[jsonstream.Message, error] { + stop := context.AfterFunc(ctx, func() { + _ = r.Close() + }) + dec := json.NewDecoder(r) + return func(yield func(jsonstream.Message, error) bool) { + defer func() { + stop() // unregister AfterFunc + r.Close() + }() + for { + var jm jsonstream.Message + err := dec.Decode(&jm) + if errors.Is(err, io.EOF) { + break + } + if ctx.Err() != nil { + yield(jm, ctx.Err()) + return + } + if !yield(jm, err) { + return + } + } + } +} + +// Wait waits for operation to complete and detects errors reported as JSONMessage +func (r Stream) Wait(ctx context.Context) error { + for _, err := range r.JSONMessages(ctx) { + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/moby/moby/client/internal/timestamp/timestamp.go b/vendor/github.com/moby/moby/client/internal/timestamp/timestamp.go new file mode 100644 index 00000000000..7b175f0c93b --- /dev/null +++ b/vendor/github.com/moby/moby/client/internal/timestamp/timestamp.go @@ -0,0 +1,131 @@ +package timestamp + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation := !strings.ContainsAny(value, "zZ+") && strings.Count(value, "-") != 3 + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has +// the format ("%d.%09d", time.Unix(), int64(time.Nanosecond())). +// If the incoming nanosecond portion is longer than 9 digits it is truncated. +// The expectation is that the seconds and nanoseconds will be used to create a +// time variable. For example: +// +// seconds, nanoseconds, _ := ParseTimestamp("1136073600.000000001",0) +// since := time.Unix(seconds, nanoseconds) +// +// returns seconds as defaultSeconds if value == "" +func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, _ error) { + if value == "" { + return defaultSeconds, 0, nil + } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (seconds int64, nanoseconds int64, _ error) { + s, n, ok := strings.Cut(value, ".") + sec, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return sec, 0, err + } + if !ok { + return sec, 0, nil + } + nsec, err := strconv.ParseInt(n, 10, 64) + if err != nil { + return sec, nsec, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + nsec = int64(float64(nsec) * math.Pow(float64(10), float64(9-len(n)))) + return sec, nsec, nil +} diff --git a/vendor/github.com/moby/moby/client/login.go b/vendor/github.com/moby/moby/client/login.go new file mode 100644 index 00000000000..b295080ab7d --- /dev/null +++ b/vendor/github.com/moby/moby/client/login.go @@ -0,0 +1,45 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/registry" +) + +type RegistryLoginOptions struct { + Username string + Password string + ServerAddress string + IdentityToken string + RegistryToken string +} + +// RegistryLoginResult holds the result of a RegistryLogin query. +type RegistryLoginResult struct { + Auth registry.AuthResponse +} + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns unauthorizedError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, options RegistryLoginOptions) (RegistryLoginResult, error) { + auth := registry.AuthConfig{ + Username: options.Username, + Password: options.Password, + ServerAddress: options.ServerAddress, + IdentityToken: options.IdentityToken, + RegistryToken: options.RegistryToken, + } + + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + defer ensureReaderClosed(resp) + + if err != nil { + return RegistryLoginResult{}, err + } + + var response registry.AuthResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return RegistryLoginResult{Auth: response}, err +} diff --git a/vendor/github.com/moby/moby/client/network_connect.go b/vendor/github.com/moby/moby/client/network_connect.go new file mode 100644 index 00000000000..40db955a90b --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_connect.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/network" +) + +// NetworkConnectOptions represents the data to be used to connect a container to the +// network. +type NetworkConnectOptions struct { + Container string + EndpointConfig *network.EndpointSettings +} + +// NetworkConnectResult represents the result of a NetworkConnect operation. +type NetworkConnectResult struct { + // Currently empty; placeholder for future fields. +} + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID string, options NetworkConnectOptions) (NetworkConnectResult, error) { + networkID, err := trimID("network", networkID) + if err != nil { + return NetworkConnectResult{}, err + } + + containerID, err := trimID("container", options.Container) + if err != nil { + return NetworkConnectResult{}, err + } + + nc := network.ConnectRequest{ + Container: containerID, + EndpointConfig: options.EndpointConfig, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + defer ensureReaderClosed(resp) + return NetworkConnectResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/network_create.go b/vendor/github.com/moby/moby/client/network_create.go new file mode 100644 index 00000000000..25ea32af451 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_create.go @@ -0,0 +1,69 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/network" +) + +// NetworkCreateOptions holds options to create a network. +type NetworkCreateOptions struct { + Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`) + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). + EnableIPv4 *bool // EnableIPv4 represents whether to enable IPv4. + EnableIPv6 *bool // EnableIPv6 represents whether to enable IPv6. + IPAM *network.IPAM // IPAM is the network's IP Address Management. + Internal bool // Internal represents if the network is used internal only. + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + ConfigFrom string // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly]. + Options map[string]string // Options specifies the network-specific options to use for when creating the network. + Labels map[string]string // Labels holds metadata specific to the network being created. +} + +// NetworkCreateResult represents the result of a network create operation. +type NetworkCreateResult struct { + ID string + + Warning []string +} + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options NetworkCreateOptions) (NetworkCreateResult, error) { + req := network.CreateRequest{ + Name: name, + Driver: options.Driver, + Scope: options.Scope, + EnableIPv4: options.EnableIPv4, + EnableIPv6: options.EnableIPv6, + IPAM: options.IPAM, + Internal: options.Internal, + Attachable: options.Attachable, + Ingress: options.Ingress, + ConfigOnly: options.ConfigOnly, + Options: options.Options, + Labels: options.Labels, + } + + if options.ConfigFrom != "" { + req.ConfigFrom = &network.ConfigReference{Network: options.ConfigFrom} + } + + resp, err := cli.post(ctx, "/networks/create", nil, req, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NetworkCreateResult{}, err + } + + var response network.CreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + + var warnings []string + if response.Warning != "" { + warnings = []string{response.Warning} + } + + return NetworkCreateResult{ID: response.ID, Warning: warnings}, err +} diff --git a/vendor/github.com/moby/moby/client/network_disconnect.go b/vendor/github.com/moby/moby/client/network_disconnect.go new file mode 100644 index 00000000000..64a1796b8de --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_disconnect.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/network" +) + +// NetworkDisconnectOptions represents the data to be used to disconnect a container +// from the network. +type NetworkDisconnectOptions struct { + Container string + Force bool +} + +// NetworkDisconnectResult represents the result of a NetworkDisconnect operation. +type NetworkDisconnectResult struct { + // Currently empty; placeholder for future fields. +} + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID string, options NetworkDisconnectOptions) (NetworkDisconnectResult, error) { + networkID, err := trimID("network", networkID) + if err != nil { + return NetworkDisconnectResult{}, err + } + + containerID, err := trimID("container", options.Container) + if err != nil { + return NetworkDisconnectResult{}, err + } + + req := network.DisconnectRequest{ + Container: containerID, + Force: options.Force, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, req, nil) + defer ensureReaderClosed(resp) + return NetworkDisconnectResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/network_inspect.go b/vendor/github.com/moby/moby/client/network_inspect.go new file mode 100644 index 00000000000..77578052768 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_inspect.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/network" +) + +// NetworkInspectResult contains the result of a network inspection. +type NetworkInspectResult struct { + Network network.Inspect + Raw json.RawMessage +} + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options NetworkInspectOptions) (NetworkInspectResult, error) { + networkID, err := trimID("network", networkID) + if err != nil { + return NetworkInspectResult{}, err + } + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + + resp, err := cli.get(ctx, "/networks/"+networkID, query, nil) + if err != nil { + return NetworkInspectResult{}, err + } + + var out NetworkInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Network) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/network_inspect_opts.go b/vendor/github.com/moby/moby/client/network_inspect_opts.go new file mode 100644 index 00000000000..d83f113e17b --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_inspect_opts.go @@ -0,0 +1,7 @@ +package client + +// NetworkInspectOptions holds parameters to inspect network. +type NetworkInspectOptions struct { + Scope string + Verbose bool +} diff --git a/vendor/github.com/moby/moby/client/network_list.go b/vendor/github.com/moby/moby/client/network_list.go new file mode 100644 index 00000000000..d65f560974e --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_list.go @@ -0,0 +1,28 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/network" +) + +// NetworkListResult holds the result from the [Client.NetworkList] method. +type NetworkListResult struct { + Items []network.Summary +} + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options NetworkListOptions) (NetworkListResult, error) { + query := url.Values{} + options.Filters.updateURLValues(query) + resp, err := cli.get(ctx, "/networks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NetworkListResult{}, err + } + var res NetworkListResult + err = json.NewDecoder(resp.Body).Decode(&res.Items) + return res, err +} diff --git a/vendor/github.com/moby/moby/client/network_list_opts.go b/vendor/github.com/moby/moby/client/network_list_opts.go new file mode 100644 index 00000000000..0d21ab31382 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_list_opts.go @@ -0,0 +1,6 @@ +package client + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters Filters +} diff --git a/vendor/github.com/moby/moby/client/network_prune.go b/vendor/github.com/moby/moby/client/network_prune.go new file mode 100644 index 00000000000..55f7cac029b --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_prune.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/network" +) + +// NetworkPruneOptions holds parameters to prune networks. +type NetworkPruneOptions struct { + Filters Filters +} + +// NetworkPruneResult holds the result from the [Client.NetworkPrune] method. +type NetworkPruneResult struct { + Report network.PruneReport +} + +// NetworkPrune requests the daemon to delete unused networks +func (cli *Client) NetworkPrune(ctx context.Context, opts NetworkPruneOptions) (NetworkPruneResult, error) { + query := url.Values{} + opts.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NetworkPruneResult{}, err + } + + var report network.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return NetworkPruneResult{}, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return NetworkPruneResult{Report: report}, nil +} diff --git a/vendor/github.com/moby/moby/client/network_remove.go b/vendor/github.com/moby/moby/client/network_remove.go new file mode 100644 index 00000000000..2bceb0d9340 --- /dev/null +++ b/vendor/github.com/moby/moby/client/network_remove.go @@ -0,0 +1,26 @@ +package client + +import ( + "context" +) + +// NetworkRemoveOptions specifies options for removing a network. +type NetworkRemoveOptions struct { + // No options currently; placeholder for future use. +} + +// NetworkRemoveResult represents the result of a network removal operation. +type NetworkRemoveResult struct { + // No fields currently; placeholder for future use. +} + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string, options NetworkRemoveOptions) (NetworkRemoveResult, error) { + networkID, err := trimID("network", networkID) + if err != nil { + return NetworkRemoveResult{}, err + } + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + defer ensureReaderClosed(resp) + return NetworkRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/node_inspect.go b/vendor/github.com/moby/moby/client/node_inspect.go new file mode 100644 index 00000000000..cd4ce0119f3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_inspect.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/moby/moby/api/types/swarm" +) + +// NodeInspectOptions holds parameters to inspect nodes with. +type NodeInspectOptions struct{} + +type NodeInspectResult struct { + Node swarm.Node + Raw json.RawMessage +} + +// NodeInspect returns the node information. +func (cli *Client) NodeInspect(ctx context.Context, nodeID string, options NodeInspectOptions) (NodeInspectResult, error) { + nodeID, err := trimID("node", nodeID) + if err != nil { + return NodeInspectResult{}, err + } + resp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NodeInspectResult{}, err + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return NodeInspectResult{}, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return NodeInspectResult{Node: response, Raw: body}, err +} diff --git a/vendor/github.com/moby/moby/client/node_list.go b/vendor/github.com/moby/moby/client/node_list.go new file mode 100644 index 00000000000..1a1b57922e7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_list.go @@ -0,0 +1,33 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters Filters +} + +type NodeListResult struct { + Items []swarm.Node +} + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options NodeListOptions) (NodeListResult, error) { + query := url.Values{} + options.Filters.updateURLValues(query) + resp, err := cli.get(ctx, "/nodes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return NodeListResult{}, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.Body).Decode(&nodes) + return NodeListResult{Items: nodes}, err +} diff --git a/vendor/github.com/moby/moby/client/node_remove.go b/vendor/github.com/moby/moby/client/node_remove.go new file mode 100644 index 00000000000..56c39d67a61 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_remove.go @@ -0,0 +1,29 @@ +package client + +import ( + "context" + "net/url" +) + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} +type NodeRemoveResult struct{} + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options NodeRemoveOptions) (NodeRemoveResult, error) { + nodeID, err := trimID("node", nodeID) + if err != nil { + return NodeRemoveResult{}, err + } + + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + defer ensureReaderClosed(resp) + return NodeRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/node_update.go b/vendor/github.com/moby/moby/client/node_update.go new file mode 100644 index 00000000000..4bc7c3b6982 --- /dev/null +++ b/vendor/github.com/moby/moby/client/node_update.go @@ -0,0 +1,30 @@ +package client + +import ( + "context" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// NodeUpdateOptions holds parameters to update nodes with. +type NodeUpdateOptions struct { + Version swarm.Version + Spec swarm.NodeSpec +} + +type NodeUpdateResult struct{} + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, options NodeUpdateOptions) (NodeUpdateResult, error) { + nodeID, err := trimID("node", nodeID) + if err != nil { + return NodeUpdateResult{}, err + } + + query := url.Values{} + query.Set("version", options.Version.String()) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, options.Spec, nil) + defer ensureReaderClosed(resp) + return NodeUpdateResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/ping.go b/vendor/github.com/moby/moby/client/ping.go new file mode 100644 index 00000000000..d315e4b98be --- /dev/null +++ b/vendor/github.com/moby/moby/client/ping.go @@ -0,0 +1,166 @@ +package client + +import ( + "context" + "net/http" + "path" + "strings" + + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/swarm" +) + +// PingOptions holds options for [client.Ping]. +type PingOptions struct { + // NegotiateAPIVersion queries the API and updates the version to match the API + // version. NegotiateAPIVersion downgrades the client's API version to match the + // APIVersion if the ping version is lower than the default version. If the API + // version reported by the server is higher than the maximum version supported + // by the client, it uses the client's maximum version. + // + // If a manual override is in place, either through the "DOCKER_API_VERSION" + // ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized + // with a fixed version ([WithAPIVersion]), no negotiation is performed. + // + // If the API server's ping response does not contain an API version, or if the + // client did not get a successful ping response, it assumes it is connected with + // an old daemon that does not support API version negotiation, in which case it + // downgrades to the lowest supported API version. + NegotiateAPIVersion bool + + // ForceNegotiate forces the client to re-negotiate the API version, even if + // API-version negotiation already happened or it the client is configured + // with a fixed version (using [WithAPIVersion] or [WithAPIVersionFromEnv]). + // + // This option has no effect if NegotiateAPIVersion is not set. + ForceNegotiate bool +} + +// PingResult holds the result of a [Client.Ping] API call. +type PingResult struct { + APIVersion string + OSType string + Experimental bool + BuilderVersion build.BuilderVersion + + // SwarmStatus provides information about the current swarm status of the + // engine, obtained from the "Swarm" header in the API response. + // + // It can be a nil struct if the API version does not provide this header + // in the ping response, or if an error occurred, in which case the client + // should use other ways to get the current swarm status, such as the /swarm + // endpoint. + SwarmStatus *SwarmStatus +} + +// SwarmStatus provides information about the current swarm status and role, +// obtained from the "Swarm" header in the API response. +type SwarmStatus struct { + // NodeState represents the state of the node. + NodeState swarm.LocalNodeState + + // ControlAvailable indicates if the node is a swarm manager. + ControlAvailable bool +} + +// Ping pings the server and returns the value of the "Docker-Experimental", +// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use +// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported +// by the daemon. It ignores internal server errors returned by the API, which +// may be returned if the daemon is in an unhealthy state, but returns errors +// for other non-success status codes, failing to connect to the API, or failing +// to parse the API response. +func (cli *Client) Ping(ctx context.Context, options PingOptions) (PingResult, error) { + if !options.NegotiateAPIVersion { + // No API version negotiation needed; just return ping response. + return cli.ping(ctx) + } + if cli.negotiated.Load() && !options.ForceNegotiate { + // API version was already negotiated or manually set. + return cli.ping(ctx) + } + + // Ensure exclusive write access to version and negotiated fields + cli.negotiateLock.Lock() + defer cli.negotiateLock.Unlock() + + ping, err := cli.ping(ctx) + if err != nil { + return ping, err + } + + if cli.negotiated.Load() && !options.ForceNegotiate { + // API version was already negotiated or manually set. + // + // We check cli.negotiated again under lock, to account for race + // conditions with the check at the start of this function. + return ping, nil + } + + if ping.APIVersion == "" { + cli.setAPIVersion(MaxAPIVersion) + return ping, nil + } + + return ping, cli.negotiateAPIVersion(ping.APIVersion) +} + +func (cli *Client) ping(ctx context.Context) (PingResult, error) { + // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() + // because ping requests are used during API version negotiation, so we want + // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping + req, err := cli.buildRequest(ctx, http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return PingResult{}, err + } + resp, err := cli.doRequest(req) + defer ensureReaderClosed(resp) + if err == nil && resp.StatusCode == http.StatusOK { + // Fast-path; successfully connected using a HEAD request and + // we got a "OK" (200) status. For non-200 status-codes, we fall + // back to doing a GET request, as a HEAD request won't have a + // response-body to get error details from. + return newPingResult(resp), nil + } + // close to allow reusing connection. + ensureReaderClosed(resp) + + // HEAD failed or returned a non-OK status; fallback to GET. + req2, err := cli.buildRequest(ctx, http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return PingResult{}, err + } + resp, err = cli.doRequest(req2) + defer ensureReaderClosed(resp) + if err != nil { + // Failed to connect. + return PingResult{}, err + } + + // GET request succeeded but may have returned a non-200 status. + // Return a Ping response, together with any error returned by + // the API server. + return newPingResult(resp), checkResponseErr(resp) +} + +func newPingResult(resp *http.Response) PingResult { + if resp == nil { + return PingResult{} + } + var swarmStatus *SwarmStatus + if si := resp.Header.Get("Swarm"); si != "" { + state, role, _ := strings.Cut(si, "/") + swarmStatus = &SwarmStatus{ + NodeState: swarm.LocalNodeState(state), + ControlAvailable: role == "manager", + } + } + + return PingResult{ + APIVersion: resp.Header.Get("Api-Version"), + OSType: resp.Header.Get("Ostype"), + Experimental: resp.Header.Get("Docker-Experimental") == "true", + BuilderVersion: build.BuilderVersion(resp.Header.Get("Builder-Version")), + SwarmStatus: swarmStatus, + } +} diff --git a/vendor/github.com/moby/moby/client/pkg/versions/compare.go b/vendor/github.com/moby/moby/client/pkg/versions/compare.go new file mode 100644 index 00000000000..1a0325c7eda --- /dev/null +++ b/vendor/github.com/moby/moby/client/pkg/versions/compare.go @@ -0,0 +1,65 @@ +package versions + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + if v1 == v2 { + return 0 + } + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + maxVer := len(currTab) + if len(otherTab) > maxVer { + maxVer = len(otherTab) + } + for i := 0; i < maxVer; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/vendor/github.com/moby/moby/client/plugin_create.go b/vendor/github.com/moby/moby/client/plugin_create.go new file mode 100644 index 00000000000..c1a2dd5a6c2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_create.go @@ -0,0 +1,31 @@ +package client + +import ( + "context" + "io" + "net/http" + "net/url" +) + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} + +// PluginCreateResult represents the result of a plugin create operation. +type PluginCreateResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions PluginCreateOptions) (PluginCreateResult, error) { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/x-tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + defer ensureReaderClosed(resp) + return PluginCreateResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_disable.go b/vendor/github.com/moby/moby/client/plugin_disable.go new file mode 100644 index 00000000000..65ab0aa0048 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_disable.go @@ -0,0 +1,31 @@ +package client + +import ( + "context" + "net/url" +) + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginDisableResult represents the result of a plugin disable operation. +type PluginDisableResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options PluginDisableOptions) (PluginDisableResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginDisableResult{}, err + } + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + defer ensureReaderClosed(resp) + return PluginDisableResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_enable.go b/vendor/github.com/moby/moby/client/plugin_enable.go new file mode 100644 index 00000000000..7c3e26b67a7 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_enable.go @@ -0,0 +1,31 @@ +package client + +import ( + "context" + "net/url" + "strconv" +) + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginEnableResult represents the result of a plugin enable operation. +type PluginEnableResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options PluginEnableOptions) (PluginEnableResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginEnableResult{}, err + } + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + defer ensureReaderClosed(resp) + return PluginEnableResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_inspect.go b/vendor/github.com/moby/moby/client/plugin_inspect.go new file mode 100644 index 00000000000..8caf06a8e05 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_inspect.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/plugin" +) + +// PluginInspectOptions holds parameters to inspect a plugin. +type PluginInspectOptions struct { + // Add future optional parameters here +} + +// PluginInspectResult holds the result from the [Client.PluginInspect] method. +type PluginInspectResult struct { + Plugin plugin.Plugin + Raw json.RawMessage +} + +// PluginInspect inspects an existing plugin +func (cli *Client) PluginInspect(ctx context.Context, name string, options PluginInspectOptions) (PluginInspectResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginInspectResult{}, err + } + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + if err != nil { + return PluginInspectResult{}, err + } + + var out PluginInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Plugin) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_install.go b/vendor/github.com/moby/moby/client/plugin_install.go new file mode 100644 index 00000000000..a589b2e1fd3 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_install.go @@ -0,0 +1,175 @@ +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + + cerrdefs "github.com/containerd/errdefs" + "github.com/distribution/reference" + "github.com/moby/moby/api/types/plugin" + "github.com/moby/moby/api/types/registry" +) + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + AcceptPermissionsFunc func(context.Context, plugin.Privileges) (bool, error) + Args []string +} + +// PluginInstallResult holds the result of a plugin install operation. +// It is an io.ReadCloser from which the caller can read installation progress or result. +type PluginInstallResult struct { + io.ReadCloser +} + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options PluginInstallOptions) (_ PluginInstallResult, retErr error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return PluginInstallResult{}, fmt.Errorf("invalid remote reference: %w", err) + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, &options) + if err != nil { + return PluginInstallResult{}, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return PluginInstallResult{}, err + } + + name = resp.Header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.Body) + if err != nil { + _ = pw.CloseWithError(err) + return + } + defer func() { + if retErr != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if _, err := cli.PluginSet(ctx, name, PluginSetOptions{Args: options.Args}); err != nil { + _ = pw.CloseWithError(err) + return + } + } + + if options.Disabled { + _ = pw.Close() + return + } + + _, enableErr := cli.PluginEnable(ctx, name, PluginEnableOptions{Timeout: 0}) + _ = pw.CloseWithError(enableErr) + }() + return PluginInstallResult{pr}, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (*http.Response, error) { + return cli.get(ctx, "/plugins/privileges", query, http.Header{ + registry.AuthHeader: {registryAuth}, + }) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges plugin.Privileges, registryAuth string) (*http.Response, error) { + return cli.post(ctx, "/plugins/pull", query, privileges, http.Header{ + registry.AuthHeader: {registryAuth}, + }) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options pluginOptions) (plugin.Privileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.getRegistryAuth()) + if cerrdefs.IsUnauthorized(err) && options.getPrivilegeFunc() != nil { + // TODO: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.getPrivilegeFunc()(ctx) + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.setRegistryAuth(newAuthHeader) + resp, err = cli.tryPluginPrivileges(ctx, query, options.getRegistryAuth()) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges plugin.Privileges + if err := json.NewDecoder(resp.Body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.getAcceptAllPermissions() && options.getAcceptPermissionsFunc() != nil && len(privileges) > 0 { + accept, err := options.getAcceptPermissionsFunc()(ctx, privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, errors.New("permission denied while installing plugin " + options.getRemoteRef()) + } + } + return privileges, nil +} + +type pluginOptions interface { + getRegistryAuth() string + setRegistryAuth(string) + getPrivilegeFunc() func(context.Context) (string, error) + getAcceptAllPermissions() bool + getAcceptPermissionsFunc() func(context.Context, plugin.Privileges) (bool, error) + getRemoteRef() string +} + +func (o *PluginInstallOptions) getRegistryAuth() string { + return o.RegistryAuth +} + +func (o *PluginInstallOptions) setRegistryAuth(auth string) { + o.RegistryAuth = auth +} + +func (o *PluginInstallOptions) getPrivilegeFunc() func(context.Context) (string, error) { + return o.PrivilegeFunc +} + +func (o *PluginInstallOptions) getAcceptAllPermissions() bool { + return o.AcceptAllPermissions +} + +func (o *PluginInstallOptions) getAcceptPermissionsFunc() func(context.Context, plugin.Privileges) (bool, error) { + return o.AcceptPermissionsFunc +} + +func (o *PluginInstallOptions) getRemoteRef() string { + return o.RemoteRef +} diff --git a/vendor/github.com/moby/moby/client/plugin_list.go b/vendor/github.com/moby/moby/client/plugin_list.go new file mode 100644 index 00000000000..cbd90b407a4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/plugin" +) + +// PluginListOptions holds parameters to list plugins. +type PluginListOptions struct { + Filters Filters +} + +// PluginListResult represents the result of a plugin list operation. +type PluginListResult struct { + Items []plugin.Plugin +} + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context, options PluginListOptions) (PluginListResult, error) { + query := url.Values{} + + options.Filters.updateURLValues(query) + resp, err := cli.get(ctx, "/plugins", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return PluginListResult{}, err + } + + var plugins plugin.ListResponse + err = json.NewDecoder(resp.Body).Decode(&plugins) + return PluginListResult{Items: plugins}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_push.go b/vendor/github.com/moby/moby/client/plugin_push.go new file mode 100644 index 00000000000..4ba25d1336e --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_push.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "io" + "net/http" + + "github.com/moby/moby/api/types/registry" +) + +// PluginPushOptions holds parameters to push a plugin. +type PluginPushOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// PluginPushResult is the result of a plugin push operation +type PluginPushResult struct { + io.ReadCloser +} + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, options PluginPushOptions) (PluginPushResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginPushResult{}, err + } + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, http.Header{ + registry.AuthHeader: {options.RegistryAuth}, + }) + if err != nil { + return PluginPushResult{}, err + } + return PluginPushResult{resp.Body}, nil +} diff --git a/vendor/github.com/moby/moby/client/plugin_remove.go b/vendor/github.com/moby/moby/client/plugin_remove.go new file mode 100644 index 00000000000..229f4085824 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_remove.go @@ -0,0 +1,33 @@ +package client + +import ( + "context" + "net/url" +) + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginRemoveResult represents the result of a plugin removal. +type PluginRemoveResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options PluginRemoveOptions) (PluginRemoveResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginRemoveResult{}, err + } + + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + defer ensureReaderClosed(resp) + return PluginRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_set.go b/vendor/github.com/moby/moby/client/plugin_set.go new file mode 100644 index 00000000000..c1f6bb5fac8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_set.go @@ -0,0 +1,27 @@ +package client + +import ( + "context" +) + +// PluginSetOptions defines options for modifying a plugin's settings. +type PluginSetOptions struct { + Args []string +} + +// PluginSetResult represents the result of a plugin set operation. +type PluginSetResult struct { + // Currently empty; can be extended in the future if needed. +} + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, options PluginSetOptions) (PluginSetResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return PluginSetResult{}, err + } + + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, options.Args, nil) + defer ensureReaderClosed(resp) + return PluginSetResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/plugin_upgrade.go b/vendor/github.com/moby/moby/client/plugin_upgrade.go new file mode 100644 index 00000000000..f9df6e5843d --- /dev/null +++ b/vendor/github.com/moby/moby/client/plugin_upgrade.go @@ -0,0 +1,89 @@ +package client + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/distribution/reference" + "github.com/moby/moby/api/types/plugin" + "github.com/moby/moby/api/types/registry" +) + +// PluginUpgradeOptions holds parameters to upgrade a plugin. +type PluginUpgradeOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // For details, refer to [github.com/moby/moby/api/types/registry.RequestAuthConfig]. + PrivilegeFunc func(context.Context) (string, error) + AcceptPermissionsFunc func(context.Context, plugin.Privileges) (bool, error) + Args []string +} + +// PluginUpgradeResult holds the result of a plugin upgrade operation. +type PluginUpgradeResult io.ReadCloser + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options PluginUpgradeOptions) (PluginUpgradeResult, error) { + name, err := trimID("plugin", name) + if err != nil { + return nil, err + } + + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, fmt.Errorf("invalid remote reference: %w", err) + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, &options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.Body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges plugin.Privileges, name, registryAuth string) (*http.Response, error) { + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, http.Header{ + registry.AuthHeader: {registryAuth}, + }) +} + +func (o *PluginUpgradeOptions) getRegistryAuth() string { + return o.RegistryAuth +} + +func (o *PluginUpgradeOptions) setRegistryAuth(auth string) { + o.RegistryAuth = auth +} + +func (o *PluginUpgradeOptions) getPrivilegeFunc() func(context.Context) (string, error) { + return o.PrivilegeFunc +} + +func (o *PluginUpgradeOptions) getAcceptAllPermissions() bool { + return o.AcceptAllPermissions +} + +func (o *PluginUpgradeOptions) getAcceptPermissionsFunc() func(context.Context, plugin.Privileges) (bool, error) { + return o.AcceptPermissionsFunc +} + +func (o *PluginUpgradeOptions) getRemoteRef() string { + return o.RemoteRef +} diff --git a/vendor/github.com/moby/moby/client/request.go b/vendor/github.com/moby/moby/client/request.go new file mode 100644 index 00000000000..7b1ff743dc9 --- /dev/null +++ b/vendor/github.com/moby/moby/client/request.go @@ -0,0 +1,376 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "reflect" + "strings" + + "github.com/moby/moby/api/types/common" +) + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) { + return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) +} + +// get sends an http request to the docker API using the method GET with a specific Go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) { + return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) +} + +// post sends an http POST request to the API. +func (cli *Client) post(ctx context.Context, path string, query url.Values, body any, headers http.Header) (*http.Response, error) { + jsonBody, headers, err := prepareJSONRequest(body, headers) + if err != nil { + return nil, err + } + return cli.sendRequest(ctx, http.MethodPost, path, query, jsonBody, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { + return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) +} + +func (cli *Client) put(ctx context.Context, path string, query url.Values, body any, headers http.Header) (*http.Response, error) { + jsonBody, headers, err := prepareJSONRequest(body, headers) + if err != nil { + return nil, err + } + return cli.putRaw(ctx, path, query, jsonBody, headers) +} + +// putRaw sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { + // PUT requests are expected to always have a body (apparently) + // so explicitly pass an empty body to sendRequest to signal that + // it should set the Content-Type header if not already present. + if body == nil { + body = http.NoBody + } + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers http.Header) (*http.Response, error) { + return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) +} + +// prepareJSONRequest encodes the given body to JSON and returns it as an [io.Reader], and sets the Content-Type +// header. If body is nil, or a nil-interface, a "nil" body is returned without +// error. +func prepareJSONRequest(body any, headers http.Header) (io.Reader, http.Header, error) { + jsonBody, err := jsonEncode(body) + if err != nil { + return nil, headers, err + } + if jsonBody == nil || jsonBody == http.NoBody { + // no content-type is set on empty requests. + return jsonBody, headers, nil + } + + hdr := http.Header{} + if headers != nil { + hdr = headers.Clone() + } + + // TODO(thaJeztah): should this return an error if a different Content-Type is already set? + hdr.Set("Content-Type", "application/json") + return jsonBody, hdr, nil +} + +func (cli *Client) buildRequest(ctx context.Context, method, path string, body io.Reader, headers http.Header) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + req.URL.Scheme = cli.scheme + req.URL.Host = cli.addr + + if cli.proto == "unix" || cli.proto == "npipe" { + // Override host header for non-tcp connections. + req.Host = DummyHost + } + + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers http.Header) (*http.Response, error) { + req, err := cli.buildRequest(ctx, method, cli.getAPIPath(ctx, path, query), body, headers) + if err != nil { + return nil, err + } + + resp, err := cli.doRequest(req) + if err != nil { + // Failed to connect or context error. + return resp, err + } + + // Successfully made a request; return the response and handle any + // API HTTP response errors. + return resp, checkResponseErr(resp) +} + +// doRequest sends an HTTP request and returns an HTTP response. It is a +// wrapper around [http.Client.Do] with extra handling to decorate errors. +// +// Otherwise, it behaves identical to [http.Client.Do]; an error is returned +// when failing to make a connection, On error, any Response can be ignored. +// A non-2xx status code doesn't cause an error. +func (cli *Client) doRequest(req *http.Request) (*http.Response, error) { + resp, err := cli.client.Do(req) + if err == nil { + return resp, nil + } + + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return nil, errConnectionFailed{fmt.Errorf("%w.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)} + } + + const ( + // Go 1.25 / TLS 1.3 may produce a generic "handshake failure" + // whereas TLS 1.2 may produce a "bad certificate" TLS alert. + // See https://github.com/golang/go/issues/56371 + // + // > https://tip.golang.org/doc/go1.12#tls_1_3 + // > + // > In TLS 1.3 the client is the last one to speak in the handshake, so if + // > it causes an error to occur on the server, it will be returned on the + // > client by the first Read, not by Handshake. For example, that will be + // > the case if the server rejects the client certificate. + // + // https://github.com/golang/go/blob/go1.25.1/src/crypto/tls/alert.go#L71-L72 + alertBadCertificate = "bad certificate" // go1.24 / TLS 1.2 + alertHandshakeFailure = "handshake failure" // go1.25 / TLS 1.3 + ) + + // TODO(thaJeztah): see if we can use errors.As for a [crypto/tls.AlertError] instead of bare string matching. + if cli.scheme == "https" && (strings.Contains(err.Error(), alertHandshakeFailure) || strings.Contains(err.Error(), alertBadCertificate)) { + return nil, errConnectionFailed{fmt.Errorf("the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings: %w", err)} + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return nil, err + } + + if errors.Is(err, os.ErrPermission) { + // Don't include request errors (Get "http://%2Fvar%2Frun%2Fdocker.sock/v1.51/version"), + // which are irrelevant if we weren't able to connect. + return nil, errConnectionFailed{fmt.Errorf("permission denied while trying to connect to the docker API at %v", cli.host)} + } + if errors.Is(err, os.ErrNotExist) { + // Unwrap the error to remove request errors (Get "http://%2Fvar%2Frun%2Fdocker.sock/v1.51/version"), + // which are irrelevant if we weren't able to connect. + err = errors.Unwrap(err) + return nil, errConnectionFailed{fmt.Errorf("failed to connect to the docker API at %v; check if the path is correct and if the daemon is running: %w", cli.host, err)} + } + var dnsErr *net.DNSError + if errors.As(err, &dnsErr) { + return nil, errConnectionFailed{fmt.Errorf("failed to connect to the docker API at %v: %w", cli.host, dnsErr)} + } + + var nErr net.Error + if errors.As(err, &nErr) { + // FIXME(thaJeztah): any net.Error should be considered a connection error (but we should include the original error)? + if nErr.Timeout() { + return nil, connectionFailed(cli.host) + } + if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") { + return nil, connectionFailed(cli.host) + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // ("//./pipe/docker_engine"), and the client must be running elevated. + // + // Give users a clue rather than the not-overly useful message such as; + // + // open //./pipe/docker_engine: The system cannot find the file specified. + // + // Note we can't string compare "The system cannot find the file specified" as + // this is localized; for example. in French the error would be; + // + // open //./pipe/docker_engine: Le fichier spécifié est introuvable. + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + // Checks if client is running with elevated privileges + if f, elevatedErr := os.Open(`\\.\PHYSICALDRIVE0`); elevatedErr != nil { + err = fmt.Errorf("in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect: %w", err) + } else { + _ = f.Close() + err = fmt.Errorf("this error may indicate that the docker daemon is not running: %w", err) + } + } + + return nil, errConnectionFailed{fmt.Errorf("error during connect: %w", err)} +} + +func checkResponseErr(serverResp *http.Response) (retErr error) { + if serverResp == nil { + return nil + } + if serverResp.StatusCode >= http.StatusOK && serverResp.StatusCode < http.StatusBadRequest { + return nil + } + defer func() { + retErr = httpErrorFromStatusCode(retErr, serverResp.StatusCode) + }() + + var body []byte + var err error + var reqURL string + if serverResp.Request != nil { + reqURL = serverResp.Request.URL.String() + } + statusMsg := serverResp.Status + if statusMsg == "" { + statusMsg = http.StatusText(serverResp.StatusCode) + } + var reqMethod string + if serverResp.Request != nil { + reqMethod = serverResp.Request.Method + } + if serverResp.Body != nil && reqMethod != http.MethodHead { + bodyMax := 1 * 1024 * 1024 // 1 MiB + bodyR := &io.LimitedReader{ + R: serverResp.Body, + N: int64(bodyMax), + } + body, err = io.ReadAll(bodyR) + if err != nil { + return err + } + if bodyR.N == 0 { + if reqURL != "" { + return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", statusMsg, bodyMax, reqURL) + } + return fmt.Errorf("request returned %s with a message (> %d bytes); check if the server supports the requested API version", statusMsg, bodyMax) + } + } + if len(body) == 0 { + if reqURL != "" { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", statusMsg, reqURL) + } + return fmt.Errorf("request returned %s; check if the server supports the requested API version", statusMsg) + } + + var daemonErr error + if serverResp.Header.Get("Content-Type") == "application/json" { + var errorResponse common.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return fmt.Errorf("error reading JSON: %w", err) + } + if errorResponse.Message == "" { + // Error-message is empty, which means that we successfully parsed the + // JSON-response (no error produced), but it didn't contain an error + // message. This could either be because the response was empty, or + // the response was valid JSON, but not with the expected schema + // ([common.ErrorResponse]). + // + // We cannot use "strict" JSON handling (json.NewDecoder with DisallowUnknownFields) + // due to the API using an open schema (we must anticipate fields + // being added to [common.ErrorResponse] in the future, and not + // reject those responses. + // + // For these cases, we construct an error with the status-code + // returned, but we could consider returning (a truncated version + // of) the actual response as-is. + // + // TODO(thaJeztah): consider adding a log.Debug to allow clients to debug the actual response when enabling debug logging. + daemonErr = fmt.Errorf(`API returned a %d (%s) but provided no error-message`, + serverResp.StatusCode, + http.StatusText(serverResp.StatusCode), + ) + } else { + daemonErr = errors.New(strings.TrimSpace(errorResponse.Message)) + } + } else { + // Fall back to returning the response as-is for situations where a + // plain text error is returned. This branch may also catch + // situations where a proxy is involved, returning an HTML response. + daemonErr = errors.New(strings.TrimSpace(string(body))) + } + return fmt.Errorf("Error response from daemon: %w", daemonErr) +} + +func (cli *Client) addHeaders(req *http.Request, headers http.Header) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + req.Header.Set(k, v) + } + + for k, v := range headers { + req.Header[http.CanonicalHeaderKey(k)] = v + } + + if cli.userAgent != nil { + if *cli.userAgent == "" { + req.Header.Del("User-Agent") + } else { + req.Header.Set("User-Agent", *cli.userAgent) + } + } + return req +} + +func jsonEncode(data any) (io.Reader, error) { + switch x := data.(type) { + case nil: + return http.NoBody, nil + case io.Reader: + // http.NoBody or other readers + return x, nil + case json.RawMessage: + if len(x) == 0 { + return http.NoBody, nil + } + return bytes.NewReader(x), nil + } + + // encoding/json encodes a nil pointer as the JSON document `null`, + // irrespective of whether the type implements json.Marshaler or encoding.TextMarshaler. + // That is almost certainly not what the caller intended as the request body. + if v := reflect.ValueOf(data); v.Kind() == reflect.Ptr && v.IsNil() { + return http.NoBody, nil + } + + b, err := json.Marshal(data) + if err != nil { + return nil, err + } + return bytes.NewReader(b), nil +} + +func ensureReaderClosed(response *http.Response) { + if response == nil || response.Body == nil { + return + } + if response.ContentLength == 0 || (response.Request != nil && response.Request.Method == http.MethodHead) { + // No need to drain head requests or zero-length responses. + _ = response.Body.Close() + return + } + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + // see https://github.com/google/go-github/pull/317/files#r57536827 + // + // TODO(thaJeztah): see if this optimization is still needed, or already implemented in stdlib, + // and check if context-cancellation should handle this as well. If still needed, consider + // wrapping response.Body, or returning a "closer()" from [Client.sendRequest] and related + // methods. + _, _ = io.CopyN(io.Discard, response.Body, 512) + _ = response.Body.Close() +} diff --git a/vendor/github.com/moby/moby/client/secret_create.go b/vendor/github.com/moby/moby/client/secret_create.go new file mode 100644 index 00000000000..8e59a42ce70 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// SecretCreateOptions holds options for creating a secret. +type SecretCreateOptions struct { + Spec swarm.SecretSpec +} + +// SecretCreateResult holds the result from the [Client.SecretCreate] method. +type SecretCreateResult struct { + ID string +} + +// SecretCreate creates a new secret. +func (cli *Client) SecretCreate(ctx context.Context, options SecretCreateOptions) (SecretCreateResult, error) { + resp, err := cli.post(ctx, "/secrets/create", nil, options.Spec, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SecretCreateResult{}, err + } + + var out swarm.ConfigCreateResponse + err = json.NewDecoder(resp.Body).Decode(&out) + if err != nil { + return SecretCreateResult{}, err + } + return SecretCreateResult{ID: out.ID}, nil +} diff --git a/vendor/github.com/moby/moby/client/secret_inspect.go b/vendor/github.com/moby/moby/client/secret_inspect.go new file mode 100644 index 00000000000..fefd4cd23df --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_inspect.go @@ -0,0 +1,35 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// SecretInspectOptions holds options for inspecting a secret. +type SecretInspectOptions struct { + // Add future optional parameters here +} + +// SecretInspectResult holds the result from the [Client.SecretInspect]. method. +type SecretInspectResult struct { + Secret swarm.Secret + Raw json.RawMessage +} + +// SecretInspect returns the secret information with raw data. +func (cli *Client) SecretInspect(ctx context.Context, id string, options SecretInspectOptions) (SecretInspectResult, error) { + id, err := trimID("secret", id) + if err != nil { + return SecretInspectResult{}, err + } + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + if err != nil { + return SecretInspectResult{}, err + } + + var out SecretInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Secret) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/secret_list.go b/vendor/github.com/moby/moby/client/secret_list.go new file mode 100644 index 00000000000..be36955757e --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters Filters +} + +// SecretListResult holds the result from the [client.SecretList] method. +type SecretListResult struct { + Items []swarm.Secret +} + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options SecretListOptions) (SecretListResult, error) { + query := url.Values{} + options.Filters.updateURLValues(query) + + resp, err := cli.get(ctx, "/secrets", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SecretListResult{}, err + } + + var out SecretListResult + err = json.NewDecoder(resp.Body).Decode(&out.Items) + if err != nil { + return SecretListResult{}, err + } + return out, nil +} diff --git a/vendor/github.com/moby/moby/client/secret_remove.go b/vendor/github.com/moby/moby/client/secret_remove.go new file mode 100644 index 00000000000..8554f3f215c --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_remove.go @@ -0,0 +1,25 @@ +package client + +import "context" + +type SecretRemoveOptions struct { + // Add future optional parameters here +} + +type SecretRemoveResult struct { + // Add future fields here +} + +// SecretRemove removes a secret. +func (cli *Client) SecretRemove(ctx context.Context, id string, options SecretRemoveOptions) (SecretRemoveResult, error) { + id, err := trimID("secret", id) + if err != nil { + return SecretRemoveResult{}, err + } + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SecretRemoveResult{}, err + } + return SecretRemoveResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/secret_update.go b/vendor/github.com/moby/moby/client/secret_update.go new file mode 100644 index 00000000000..c88ad110604 --- /dev/null +++ b/vendor/github.com/moby/moby/client/secret_update.go @@ -0,0 +1,32 @@ +package client + +import ( + "context" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// SecretUpdateOptions holds options for updating a secret. +type SecretUpdateOptions struct { + Version swarm.Version + Spec swarm.SecretSpec +} + +type SecretUpdateResult struct{} + +// SecretUpdate attempts to update a secret. +func (cli *Client) SecretUpdate(ctx context.Context, id string, options SecretUpdateOptions) (SecretUpdateResult, error) { + id, err := trimID("secret", id) + if err != nil { + return SecretUpdateResult{}, err + } + query := url.Values{} + query.Set("version", options.Version.String()) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, options.Spec, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SecretUpdateResult{}, err + } + return SecretUpdateResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/service_create.go b/vendor/github.com/moby/moby/client/service_create.go new file mode 100644 index 00000000000..319bca6f4c8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_create.go @@ -0,0 +1,206 @@ +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + + "github.com/distribution/reference" + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/api/types/swarm" + "github.com/opencontainers/go-digest" +) + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + Spec swarm.ServiceSpec + + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResult represents the result of creating a service. +type ServiceCreateResult struct { + // ID is the ID of the created service. + ID string + + // Warnings is a list of warnings that occurred during service creation. + Warnings []string +} + +// ServiceCreate creates a new service. +func (cli *Client) ServiceCreate(ctx context.Context, options ServiceCreateOptions) (ServiceCreateResult, error) { + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if options.Spec.TaskTemplate.ContainerSpec == nil && (options.Spec.TaskTemplate.Runtime == "" || options.Spec.TaskTemplate.Runtime == swarm.RuntimeContainer) { + options.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(options.Spec); err != nil { + return ServiceCreateResult{}, err + } + + // ensure that the image is tagged + var warnings []string + switch { + case options.Spec.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(options.Spec.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + options.Spec.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + if warning := resolveContainerSpecImage(ctx, cli, &options.Spec.TaskTemplate, options.EncodedRegistryAuth); warning != "" { + warnings = append(warnings, warning) + } + } + case options.Spec.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(options.Spec.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + options.Spec.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + if warning := resolvePluginSpecRemote(ctx, cli, &options.Spec.TaskTemplate, options.EncodedRegistryAuth); warning != "" { + warnings = append(warnings, warning) + } + } + } + + headers := http.Header{} + if options.EncodedRegistryAuth != "" { + headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} + } + resp, err := cli.post(ctx, "/services/create", nil, options.Spec, headers) + defer ensureReaderClosed(resp) + if err != nil { + return ServiceCreateResult{}, err + } + + var response swarm.ServiceCreateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + warnings = append(warnings, response.Warnings...) + + return ServiceCreateResult{ + ID: response.ID, + Warnings: warnings, + }, err +} + +func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth) + if err != nil { + return digestWarning(taskSpec.ContainerSpec.Image) + } + taskSpec.ContainerSpec.Image = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + return "" +} + +func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth) + if err != nil { + return digestWarning(taskSpec.PluginSpec.Remote) + } + taskSpec.PluginSpec.Remote = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + return "" +} + +func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, DistributionInspectOptions{ + EncodedRegistryAuth: encodedAuth, + }) + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + // clear architecture field for arm. This is a temporary patch to address + // https://github.com/docker/swarmkit/issues/2294. The issue is that while + // image manifests report "arm" as the architecture, the node reports + // something like "armv7l" (includes the variant), which causes arm images + // to stop working with swarm mode. This patch removes the architecture + // constraint for arm images to ensure tasks get scheduled. + arch := p.Architecture + if strings.ToLower(arch) == "arm" { + arch = "" + } + platforms = append(platforms, swarm.Platform{ + Architecture: arch, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// image unmodified in other situations. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, hasDigest := namedRef.(reference.Digested); !hasDigest { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return image +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// empty string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/vendor/github.com/moby/moby/client/service_inspect.go b/vendor/github.com/moby/moby/client/service_inspect.go new file mode 100644 index 00000000000..9bda43f8616 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_inspect.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// ServiceInspectOptions holds parameters related to the service inspect operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// ServiceInspectResult represents the result of a service inspect operation. +type ServiceInspectResult struct { + Service swarm.Service + Raw json.RawMessage +} + +// ServiceInspect retrieves detailed information about a specific service by its ID. +func (cli *Client) ServiceInspect(ctx context.Context, serviceID string, options ServiceInspectOptions) (ServiceInspectResult, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return ServiceInspectResult{}, err + } + + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", options.InsertDefaults)) + resp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + if err != nil { + return ServiceInspectResult{}, err + } + + var out ServiceInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Service) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/service_list.go b/vendor/github.com/moby/moby/client/service_list.go new file mode 100644 index 00000000000..94b5204be33 --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_list.go @@ -0,0 +1,44 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters Filters + + // Status indicates whether the server should include the service task + // count of running and desired tasks. + Status bool +} + +// ServiceListResult represents the result of a service list operation. +type ServiceListResult struct { + Items []swarm.Service +} + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options ServiceListOptions) (ServiceListResult, error) { + query := url.Values{} + + options.Filters.updateURLValues(query) + + if options.Status { + query.Set("status", "true") + } + + resp, err := cli.get(ctx, "/services", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ServiceListResult{}, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.Body).Decode(&services) + return ServiceListResult{Items: services}, err +} diff --git a/vendor/github.com/moby/moby/client/service_logs.go b/vendor/github.com/moby/moby/client/service_logs.go new file mode 100644 index 00000000000..57099ffe46b --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_logs.go @@ -0,0 +1,106 @@ +package client + +import ( + "context" + "fmt" + "io" + "net/url" + "time" + + "github.com/moby/moby/client/internal/timestamp" +) + +// ServiceLogsOptions holds parameters to filter logs with. +type ServiceLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ServiceLogsResult holds the result of a service logs operation. +// It implements [io.ReadCloser]. +// It's up to the caller to close the stream. +type ServiceLogsResult interface { + io.ReadCloser +} + +// ServiceLogs returns the logs generated by a service in a [ServiceLogsResult]. +// as an [io.ReadCloser]. Callers should close the stream. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options ServiceLogsOptions) (ServiceLogsResult, error) { + // TODO(thaJeztah): this function needs documentation about the format of ths stream (similar to for container logs) + // TODO(thaJeztah): migrate CLI utilities to the client where suitable; https://github.com/docker/cli/blob/v29.0.0-rc.1/cli/command/service/logs.go#L73-L348 + + serviceID, err := trimID("service", serviceID) + if err != nil { + return nil, err + } + + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timestamp.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, fmt.Errorf(`invalid value for "since": %w`, err) + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + switch options.Tail { + case "", "all": + // don't send option; default is to show all logs. + // + // The default on the daemon-side is to show all logs; account for + // some special values. The CLI may set a magic "all" value that's + // used as default. + // + // Given that the default is to show all logs, we can ignore these + // values, and don't send "tail". + // + // see https://github.com/moby/moby/blob/0df791cb72b568eeadba2267fe9a5040d12b0487/daemon/logs.go#L75-L78 + // see https://github.com/moby/moby/blob/4d20b6fe56dfb2b06f4a5dd1f32913215a9c317b/daemon/cluster/services.go#L425-L449 + default: + query.Set("tail", options.Tail) + } + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return &serviceLogsResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type serviceLogsResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*serviceLogsResult)(nil) + _ ServiceLogsResult = (*serviceLogsResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/service_remove.go b/vendor/github.com/moby/moby/client/service_remove.go new file mode 100644 index 00000000000..163689b693e --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_remove.go @@ -0,0 +1,25 @@ +package client + +import "context" + +// ServiceRemoveOptions contains options for removing a service. +type ServiceRemoveOptions struct { + // No options currently; placeholder for future use +} + +// ServiceRemoveResult contains the result of removing a service. +type ServiceRemoveResult struct { + // No fields currently; placeholder for future use +} + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string, options ServiceRemoveOptions) (ServiceRemoveResult, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return ServiceRemoveResult{}, err + } + + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + defer ensureReaderClosed(resp) + return ServiceRemoveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/service_update.go b/vendor/github.com/moby/moby/client/service_update.go new file mode 100644 index 00000000000..2505fe4b8ee --- /dev/null +++ b/vendor/github.com/moby/moby/client/service_update.go @@ -0,0 +1,114 @@ +package client + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + + "github.com/moby/moby/api/types/registry" + "github.com/moby/moby/api/types/swarm" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + Version swarm.Version + Spec swarm.ServiceSpec + + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom swarm.RegistryAuthSource + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceUpdateResult represents the result of a service update. +type ServiceUpdateResult struct { + // Warnings contains any warnings that occurred during the update. + Warnings []string +} + +// ServiceUpdate updates a Service. The version number is required to avoid +// conflicting writes. It must be the value as set *before* the update. +// You can find this value in the [swarm.Service.Meta] field, which can +// be found using [Client.ServiceInspectWithRaw]. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, options ServiceUpdateOptions) (ServiceUpdateResult, error) { + serviceID, err := trimID("service", serviceID) + if err != nil { + return ServiceUpdateResult{}, err + } + + if err := validateServiceSpec(options.Spec); err != nil { + return ServiceUpdateResult{}, err + } + + query := url.Values{} + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", string(options.RegistryAuthFrom)) + } + + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + + query.Set("version", options.Version.String()) + + // ensure that the image is tagged + var warnings []string + switch { + case options.Spec.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(options.Spec.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + options.Spec.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + if warning := resolveContainerSpecImage(ctx, cli, &options.Spec.TaskTemplate, options.EncodedRegistryAuth); warning != "" { + warnings = append(warnings, warning) + } + } + case options.Spec.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(options.Spec.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + options.Spec.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + if warning := resolvePluginSpecRemote(ctx, cli, &options.Spec.TaskTemplate, options.EncodedRegistryAuth); warning != "" { + warnings = append(warnings, warning) + } + } + } + + headers := http.Header{} + if options.EncodedRegistryAuth != "" { + headers.Set(registry.AuthHeader, options.EncodedRegistryAuth) + } + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, options.Spec, headers) + defer ensureReaderClosed(resp) + if err != nil { + return ServiceUpdateResult{}, err + } + + var response swarm.ServiceUpdateResponse + err = json.NewDecoder(resp.Body).Decode(&response) + warnings = append(warnings, response.Warnings...) + return ServiceUpdateResult{Warnings: warnings}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go b/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go new file mode 100644 index 00000000000..03ecce4094a --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_get_unlock_key.go @@ -0,0 +1,26 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmGetUnlockKeyResult contains the swarm unlock key. +type SwarmGetUnlockKeyResult struct { + Key string +} + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (SwarmGetUnlockKeyResult, error) { + resp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SwarmGetUnlockKeyResult{}, err + } + + var response swarm.UnlockKeyResponse + err = json.NewDecoder(resp.Body).Decode(&response) + return SwarmGetUnlockKeyResult{Key: response.UnlockKey}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_init.go b/vendor/github.com/moby/moby/client/swarm_init.go new file mode 100644 index 00000000000..caad560856b --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_init.go @@ -0,0 +1,54 @@ +package client + +import ( + "context" + "encoding/json" + "net/netip" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmInitOptions contains options for initializing a new swarm. +type SwarmInitOptions struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + DataPathPort uint32 + ForceNewCluster bool + Spec swarm.Spec + AutoLockManagers bool + Availability swarm.NodeAvailability + DefaultAddrPool []netip.Prefix + SubnetSize uint32 +} + +// SwarmInitResult contains the result of a SwarmInit operation. +type SwarmInitResult struct { + NodeID string +} + +// SwarmInit initializes the swarm. +func (cli *Client) SwarmInit(ctx context.Context, options SwarmInitOptions) (SwarmInitResult, error) { + req := swarm.InitRequest{ + ListenAddr: options.ListenAddr, + AdvertiseAddr: options.AdvertiseAddr, + DataPathAddr: options.DataPathAddr, + DataPathPort: options.DataPathPort, + ForceNewCluster: options.ForceNewCluster, + Spec: options.Spec, + AutoLockManagers: options.AutoLockManagers, + Availability: options.Availability, + DefaultAddrPool: options.DefaultAddrPool, + SubnetSize: options.SubnetSize, + } + + resp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SwarmInitResult{}, err + } + + var nodeID string + err = json.NewDecoder(resp.Body).Decode(&nodeID) + return SwarmInitResult{NodeID: nodeID}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_inspect.go b/vendor/github.com/moby/moby/client/swarm_inspect.go new file mode 100644 index 00000000000..40e1d018a81 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_inspect.go @@ -0,0 +1,31 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmInspectOptions holds options for inspecting a swarm. +type SwarmInspectOptions struct { + // Add future optional parameters here +} + +// SwarmInspectResult represents the result of a SwarmInspect operation. +type SwarmInspectResult struct { + Swarm swarm.Swarm +} + +// SwarmInspect inspects the swarm. +func (cli *Client) SwarmInspect(ctx context.Context, options SwarmInspectOptions) (SwarmInspectResult, error) { + resp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SwarmInspectResult{}, err + } + + var s swarm.Swarm + err = json.NewDecoder(resp.Body).Decode(&s) + return SwarmInspectResult{Swarm: s}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_join.go b/vendor/github.com/moby/moby/client/swarm_join.go new file mode 100644 index 00000000000..66a7544821e --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_join.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmJoinOptions specifies options for joining a swarm. +type SwarmJoinOptions struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability swarm.NodeAvailability +} + +// SwarmJoinResult contains the result of joining a swarm. +type SwarmJoinResult struct { + // No fields currently; placeholder for future use +} + +// SwarmJoin joins the swarm. +func (cli *Client) SwarmJoin(ctx context.Context, options SwarmJoinOptions) (SwarmJoinResult, error) { + req := swarm.JoinRequest{ + ListenAddr: options.ListenAddr, + AdvertiseAddr: options.AdvertiseAddr, + DataPathAddr: options.DataPathAddr, + RemoteAddrs: options.RemoteAddrs, + JoinToken: options.JoinToken, + Availability: options.Availability, + } + + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + defer ensureReaderClosed(resp) + return SwarmJoinResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_leave.go b/vendor/github.com/moby/moby/client/swarm_leave.go new file mode 100644 index 00000000000..a65a13de3f4 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_leave.go @@ -0,0 +1,25 @@ +package client + +import ( + "context" + "net/url" +) + +// SwarmLeaveOptions contains options for leaving a swarm. +type SwarmLeaveOptions struct { + Force bool +} + +// SwarmLeaveResult represents the result of a SwarmLeave operation. +type SwarmLeaveResult struct{} + +// SwarmLeave leaves the swarm. +func (cli *Client) SwarmLeave(ctx context.Context, options SwarmLeaveOptions) (SwarmLeaveResult, error) { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + defer ensureReaderClosed(resp) + return SwarmLeaveResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_unlock.go b/vendor/github.com/moby/moby/client/swarm_unlock.go new file mode 100644 index 00000000000..92335afb546 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_unlock.go @@ -0,0 +1,25 @@ +package client + +import ( + "context" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmUnlockOptions specifies options for unlocking a swarm. +type SwarmUnlockOptions struct { + Key string +} + +// SwarmUnlockResult represents the result of unlocking a swarm. +type SwarmUnlockResult struct{} + +// SwarmUnlock unlocks locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, options SwarmUnlockOptions) (SwarmUnlockResult, error) { + req := &swarm.UnlockRequest{ + UnlockKey: options.Key, + } + resp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + defer ensureReaderClosed(resp) + return SwarmUnlockResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/swarm_update.go b/vendor/github.com/moby/moby/client/swarm_update.go new file mode 100644 index 00000000000..81f62b2c024 --- /dev/null +++ b/vendor/github.com/moby/moby/client/swarm_update.go @@ -0,0 +1,33 @@ +package client + +import ( + "context" + "net/url" + "strconv" + + "github.com/moby/moby/api/types/swarm" +) + +// SwarmUpdateOptions contains options for updating a swarm. +type SwarmUpdateOptions struct { + Version swarm.Version + Spec swarm.Spec + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} + +// SwarmUpdateResult represents the result of a SwarmUpdate operation. +type SwarmUpdateResult struct{} + +// SwarmUpdate updates the swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, options SwarmUpdateOptions) (SwarmUpdateResult, error) { + query := url.Values{} + query.Set("version", options.Version.String()) + query.Set("rotateWorkerToken", strconv.FormatBool(options.RotateWorkerToken)) + query.Set("rotateManagerToken", strconv.FormatBool(options.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", strconv.FormatBool(options.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, options.Spec, nil) + defer ensureReaderClosed(resp) + return SwarmUpdateResult{}, err +} diff --git a/vendor/github.com/moby/moby/client/system_disk_usage.go b/vendor/github.com/moby/moby/client/system_disk_usage.go new file mode 100644 index 00000000000..1bb2d0d7efa --- /dev/null +++ b/vendor/github.com/moby/moby/client/system_disk_usage.go @@ -0,0 +1,334 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "slices" + + "github.com/moby/moby/api/types/build" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/image" + "github.com/moby/moby/api/types/system" + "github.com/moby/moby/api/types/volume" + "github.com/moby/moby/client/pkg/versions" +) + +// DiskUsageOptions holds parameters for [Client.DiskUsage] operations. +type DiskUsageOptions struct { + // Containers controls whether container disk usage should be computed. + Containers bool + + // Images controls whether image disk usage should be computed. + Images bool + + // BuildCache controls whether build cache disk usage should be computed. + BuildCache bool + + // Volumes controls whether volume disk usage should be computed. + Volumes bool + + // Verbose enables more detailed disk usage information. + Verbose bool +} + +// DiskUsageResult is the result of [Client.DiskUsage] operations. +type DiskUsageResult struct { + // Containers holds container disk usage information. + Containers ContainersDiskUsage + + // Images holds image disk usage information. + Images ImagesDiskUsage + + // BuildCache holds build cache disk usage information. + BuildCache BuildCacheDiskUsage + + // Volumes holds volume disk usage information. + Volumes VolumesDiskUsage +} + +// ContainersDiskUsage contains disk usage information for containers. +type ContainersDiskUsage struct { + // ActiveCount is the number of active containers. + ActiveCount int64 + + // TotalCount is the total number of containers. + TotalCount int64 + + // Reclaimable is the amount of disk space that can be reclaimed. + Reclaimable int64 + + // TotalSize is the total disk space used by all containers. + TotalSize int64 + + // Items holds detailed information about each container. + Items []container.Summary +} + +// ImagesDiskUsage contains disk usage information for images. +type ImagesDiskUsage struct { + // ActiveCount is the number of active images. + ActiveCount int64 + + // TotalCount is the total number of images. + TotalCount int64 + + // Reclaimable is the amount of disk space that can be reclaimed. + Reclaimable int64 + + // TotalSize is the total disk space used by all images. + TotalSize int64 + + // Items holds detailed information about each image. + Items []image.Summary +} + +// VolumesDiskUsage contains disk usage information for volumes. +type VolumesDiskUsage struct { + // ActiveCount is the number of active volumes. + ActiveCount int64 + + // TotalCount is the total number of volumes. + TotalCount int64 + + // Reclaimable is the amount of disk space that can be reclaimed. + Reclaimable int64 + + // TotalSize is the total disk space used by all volumes. + TotalSize int64 + + // Items holds detailed information about each volume. + Items []volume.Volume +} + +// BuildCacheDiskUsage contains disk usage information for build cache. +type BuildCacheDiskUsage struct { + // ActiveCount is the number of active build cache records. + ActiveCount int64 + + // TotalCount is the total number of build cache records. + TotalCount int64 + + // Reclaimable is the amount of disk space that can be reclaimed. + Reclaimable int64 + + // TotalSize is the total disk space used by all build cache records. + TotalSize int64 + + // Items holds detailed information about each build cache record. + Items []build.CacheRecord +} + +// DiskUsage requests the current data usage from the daemon. +func (cli *Client) DiskUsage(ctx context.Context, options DiskUsageOptions) (DiskUsageResult, error) { + query := url.Values{} + + for _, t := range []struct { + flag bool + sysObj system.DiskUsageObject + }{ + {options.Containers, system.ContainerObject}, + {options.Images, system.ImageObject}, + {options.Volumes, system.VolumeObject}, + {options.BuildCache, system.BuildCacheObject}, + } { + if t.flag { + query.Add("type", string(t.sysObj)) + } + } + + if options.Verbose { + query.Set("verbose", "1") + } + + resp, err := cli.get(ctx, "/system/df", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return DiskUsageResult{}, err + } + + if versions.LessThan(cli.version, "1.52") { + // Generate result from a legacy response. + var du legacyDiskUsage + if err := json.NewDecoder(resp.Body).Decode(&du); err != nil { + return DiskUsageResult{}, fmt.Errorf("retrieving disk usage: %v", err) + } + + return diskUsageResultFromLegacyAPI(&du), nil + } + + var du system.DiskUsage + if err := json.NewDecoder(resp.Body).Decode(&du); err != nil { + return DiskUsageResult{}, fmt.Errorf("retrieving disk usage: %v", err) + } + + var r DiskUsageResult + if idu := du.ImageUsage; idu != nil { + r.Images = ImagesDiskUsage{ + ActiveCount: idu.ActiveCount, + Reclaimable: idu.Reclaimable, + TotalCount: idu.TotalCount, + TotalSize: idu.TotalSize, + } + + if options.Verbose { + r.Images.Items = slices.Clone(idu.Items) + } + } + + if cdu := du.ContainerUsage; cdu != nil { + r.Containers = ContainersDiskUsage{ + ActiveCount: cdu.ActiveCount, + Reclaimable: cdu.Reclaimable, + TotalCount: cdu.TotalCount, + TotalSize: cdu.TotalSize, + } + + if options.Verbose { + r.Containers.Items = slices.Clone(cdu.Items) + } + } + + if bdu := du.BuildCacheUsage; bdu != nil { + r.BuildCache = BuildCacheDiskUsage{ + ActiveCount: bdu.ActiveCount, + Reclaimable: bdu.Reclaimable, + TotalCount: bdu.TotalCount, + TotalSize: bdu.TotalSize, + } + + if options.Verbose { + r.BuildCache.Items = slices.Clone(bdu.Items) + } + } + + if vdu := du.VolumeUsage; vdu != nil { + r.Volumes = VolumesDiskUsage{ + ActiveCount: vdu.ActiveCount, + Reclaimable: vdu.Reclaimable, + TotalCount: vdu.TotalCount, + TotalSize: vdu.TotalSize, + } + + if options.Verbose { + r.Volumes.Items = slices.Clone(vdu.Items) + } + } + + return r, nil +} + +// legacyDiskUsage is the response as was used by API < v1.52. +type legacyDiskUsage struct { + LayersSize int64 `json:"LayersSize,omitempty"` + Images []image.Summary `json:"Images,omitzero"` + Containers []container.Summary `json:"Containers,omitzero"` + Volumes []volume.Volume `json:"Volumes,omitzero"` + BuildCache []build.CacheRecord `json:"BuildCache,omitzero"` +} + +func diskUsageResultFromLegacyAPI(du *legacyDiskUsage) DiskUsageResult { + return DiskUsageResult{ + Images: imageDiskUsageFromLegacyAPI(du), + Containers: containerDiskUsageFromLegacyAPI(du), + BuildCache: buildCacheDiskUsageFromLegacyAPI(du), + Volumes: volumeDiskUsageFromLegacyAPI(du), + } +} + +func imageDiskUsageFromLegacyAPI(du *legacyDiskUsage) ImagesDiskUsage { + idu := ImagesDiskUsage{ + TotalSize: du.LayersSize, + TotalCount: int64(len(du.Images)), + Items: du.Images, + } + + var used int64 + for _, i := range idu.Items { + if i.Containers > 0 { + idu.ActiveCount++ + + if i.Size == -1 || i.SharedSize == -1 { + continue + } + used += (i.Size - i.SharedSize) + } + } + + if idu.TotalCount > 0 { + idu.Reclaimable = idu.TotalSize - used + } + + return idu +} + +func containerDiskUsageFromLegacyAPI(du *legacyDiskUsage) ContainersDiskUsage { + cdu := ContainersDiskUsage{ + TotalCount: int64(len(du.Containers)), + Items: du.Containers, + } + + var used int64 + for _, c := range cdu.Items { + cdu.TotalSize += c.SizeRw + switch c.State { + case container.StateRunning, container.StatePaused, container.StateRestarting: + cdu.ActiveCount++ + used += c.SizeRw + case container.StateCreated, container.StateRemoving, container.StateExited, container.StateDead: + // not active + } + } + + cdu.Reclaimable = cdu.TotalSize - used + return cdu +} + +func buildCacheDiskUsageFromLegacyAPI(du *legacyDiskUsage) BuildCacheDiskUsage { + bdu := BuildCacheDiskUsage{ + TotalCount: int64(len(du.BuildCache)), + Items: du.BuildCache, + } + + var used int64 + for _, b := range du.BuildCache { + if !b.Shared { + bdu.TotalSize += b.Size + } + + if b.InUse { + bdu.ActiveCount++ + if !b.Shared { + used += b.Size + } + } + } + + bdu.Reclaimable = bdu.TotalSize - used + return bdu +} + +func volumeDiskUsageFromLegacyAPI(du *legacyDiskUsage) VolumesDiskUsage { + vdu := VolumesDiskUsage{ + TotalCount: int64(len(du.Volumes)), + Items: du.Volumes, + } + + var used int64 + for _, v := range vdu.Items { + // Ignore volumes with no usage data + if v.UsageData != nil { + if v.UsageData.RefCount > 0 { + vdu.ActiveCount++ + used += v.UsageData.Size + } + if v.UsageData.Size > 0 { + vdu.TotalSize += v.UsageData.Size + } + } + } + + vdu.Reclaimable = vdu.TotalSize - used + return vdu +} diff --git a/vendor/github.com/moby/moby/client/system_events.go b/vendor/github.com/moby/moby/client/system_events.go new file mode 100644 index 00000000000..b0ca71a19b0 --- /dev/null +++ b/vendor/github.com/moby/moby/client/system_events.go @@ -0,0 +1,115 @@ +package client + +import ( + "context" + "net/http" + "net/url" + "time" + + "github.com/moby/moby/api/types" + "github.com/moby/moby/api/types/events" + "github.com/moby/moby/client/internal" + "github.com/moby/moby/client/internal/timestamp" +) + +// EventsListOptions holds parameters to filter events with. +type EventsListOptions struct { + Since string + Until string + Filters Filters +} + +// EventsResult holds the result of an Events query. +type EventsResult struct { + Messages <-chan events.Message + Err <-chan error +} + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an [io.EOF] error is +// sent over the error channel. If an error is sent, all processing is stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options EventsListOptions) EventsResult { + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(options) + if err != nil { + close(started) + errs <- err + return + } + + headers := http.Header{} + headers.Add("Accept", types.MediaTypeJSONLines) // Implicit q=1.0; in case server doesn't parse correctly. + headers.Add("Accept", types.MediaTypeNDJSON+";q=0.9") + headers.Add("Accept", types.MediaTypeJSONSequence+";q=0.5") + resp, err := cli.get(ctx, "/events", query, headers) + if err != nil { + close(started) + errs <- err + return + } + defer resp.Body.Close() + + contentType := resp.Header.Get("Content-Type") + decoder := internal.NewJSONStreamDecoder(resp.Body, contentType) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return EventsResult{ + Messages: messages, + Err: errs, + } +} + +func buildEventsQueryParams(options EventsListOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timestamp.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timestamp.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + options.Filters.updateURLValues(query) + + return query, nil +} diff --git a/vendor/github.com/moby/moby/client/system_info.go b/vendor/github.com/moby/moby/client/system_info.go new file mode 100644 index 00000000000..4c0a2238e17 --- /dev/null +++ b/vendor/github.com/moby/moby/client/system_info.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/moby/moby/api/types/system" +) + +type InfoOptions struct { + // No options currently; placeholder for future use +} + +type SystemInfoResult struct { + Info system.Info +} + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context, options InfoOptions) (SystemInfoResult, error) { + resp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(resp) + if err != nil { + return SystemInfoResult{}, err + } + + var info system.Info + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { + return SystemInfoResult{}, fmt.Errorf("Error reading remote info: %v", err) + } + + return SystemInfoResult{Info: info}, nil +} diff --git a/vendor/github.com/moby/moby/client/task_inspect.go b/vendor/github.com/moby/moby/client/task_inspect.go new file mode 100644 index 00000000000..96edcb09f20 --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_inspect.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/swarm" +) + +// TaskInspectOptions contains options for inspecting a task. +type TaskInspectOptions struct { + // Currently no options are defined. +} + +// TaskInspectResult contains the result of a task inspection. +type TaskInspectResult struct { + Task swarm.Task + Raw json.RawMessage +} + +// TaskInspect returns the task information and its raw representation. +func (cli *Client) TaskInspect(ctx context.Context, taskID string, options TaskInspectOptions) (TaskInspectResult, error) { + taskID, err := trimID("task", taskID) + if err != nil { + return TaskInspectResult{}, err + } + + resp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + if err != nil { + return TaskInspectResult{}, err + } + + var out TaskInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Task) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/task_list.go b/vendor/github.com/moby/moby/client/task_list.go new file mode 100644 index 00000000000..5f7c41bb9dc --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_list.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/swarm" +) + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters Filters +} + +// TaskListResult contains the result of a task list operation. +type TaskListResult struct { + Items []swarm.Task +} + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options TaskListOptions) (TaskListResult, error) { + query := url.Values{} + + options.Filters.updateURLValues(query) + + resp, err := cli.get(ctx, "/tasks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return TaskListResult{}, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.Body).Decode(&tasks) + return TaskListResult{Items: tasks}, err +} diff --git a/vendor/github.com/moby/moby/client/task_logs.go b/vendor/github.com/moby/moby/client/task_logs.go new file mode 100644 index 00000000000..e4de019f308 --- /dev/null +++ b/vendor/github.com/moby/moby/client/task_logs.go @@ -0,0 +1,84 @@ +package client + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/moby/moby/client/internal/timestamp" +) + +// TaskLogsOptions holds parameters to filter logs with. +type TaskLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// TaskLogsResult holds the result of a task logs operation. +// It implements [io.ReadCloser]. +type TaskLogsResult interface { + io.ReadCloser +} + +// TaskLogs returns the logs generated by a service in a [TaskLogsResult]. +// as an [io.ReadCloser]. Callers should close the stream. +// +// The underlying [io.ReadCloser] is automatically closed if the context is canceled, +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options TaskLogsOptions) (TaskLogsResult, error) { + // TODO(thaJeztah): this function needs documentation about the format of ths stream (similar to for container logs) + // TODO(thaJeztah): migrate CLI utilities to the client where suitable; https://github.com/docker/cli/blob/v29.0.0-rc.1/cli/command/service/logs.go#L73-L348 + + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timestamp.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return &taskLogsResult{ + ReadCloser: newCancelReadCloser(ctx, resp.Body), + }, nil +} + +type taskLogsResult struct { + io.ReadCloser +} + +var ( + _ io.ReadCloser = (*taskLogsResult)(nil) + _ ContainerLogsResult = (*taskLogsResult)(nil) +) diff --git a/vendor/github.com/moby/moby/client/utils.go b/vendor/github.com/moby/moby/client/utils.go new file mode 100644 index 00000000000..4415e0dc5a6 --- /dev/null +++ b/vendor/github.com/moby/moby/client/utils.go @@ -0,0 +1,154 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + cerrdefs "github.com/containerd/errdefs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type emptyIDError string + +func (e emptyIDError) InvalidParameter() {} + +func (e emptyIDError) Error() string { + return "invalid " + string(e) + " name or ID: value is empty" +} + +// trimID trims the given object-ID / name, returning an error if it's empty. +func trimID(objType, id string) (string, error) { + id = strings.TrimSpace(id) + if id == "" { + return "", emptyIDError(objType) + } + return id, nil +} + +// parseAPIVersion checks v to be a well-formed (".") +// API version. It returns an error if the value is empty or does not +// have the correct format, but does not validate if the API version is +// within the supported range ([MinAPIVersion] <= v <= [MaxAPIVersion]). +// +// It returns version after normalizing, or an error if validation failed. +func parseAPIVersion(version string) (string, error) { + if strings.TrimPrefix(strings.TrimSpace(version), "v") == "" { + return "", cerrdefs.ErrInvalidArgument.WithMessage("value is empty") + } + major, minor, err := parseMajorMinor(version) + if err != nil { + return "", err + } + return fmt.Sprintf("%d.%d", major, minor), nil +} + +// parseMajorMinor is a helper for parseAPIVersion. +func parseMajorMinor(v string) (major, minor int, _ error) { + if strings.HasPrefix(v, "v") { + return 0, 0, cerrdefs.ErrInvalidArgument.WithMessage("must be formatted .") + } + if strings.TrimSpace(v) == "" { + return 0, 0, cerrdefs.ErrInvalidArgument.WithMessage("value is empty") + } + + majVer, minVer, ok := strings.Cut(v, ".") + if !ok { + return 0, 0, cerrdefs.ErrInvalidArgument.WithMessage("must be formatted .") + } + major, err := strconv.Atoi(majVer) + if err != nil { + return 0, 0, cerrdefs.ErrInvalidArgument.WithMessage("invalid major version: must be formatted .") + } + minor, err = strconv.Atoi(minVer) + if err != nil { + return 0, 0, cerrdefs.ErrInvalidArgument.WithMessage("invalid minor version: must be formatted .") + } + return major, minor, nil +} + +// encodePlatforms marshals the given platform(s) to JSON format, to +// be used for query-parameters for filtering / selecting platforms. +func encodePlatforms(platform ...ocispec.Platform) ([]string, error) { + if len(platform) == 0 { + return []string{}, nil + } + if len(platform) == 1 { + p, err := encodePlatform(&platform[0]) + if err != nil { + return nil, err + } + return []string{p}, nil + } + + seen := make(map[string]struct{}, len(platform)) + out := make([]string, 0, len(platform)) + for i := range platform { + p, err := encodePlatform(&platform[i]) + if err != nil { + return nil, err + } + if _, ok := seen[p]; !ok { + out = append(out, p) + seen[p] = struct{}{} + } + } + return out, nil +} + +// encodePlatform marshals the given platform to JSON format, to +// be used for query-parameters for filtering / selecting platforms. It +// is used as a helper for encodePlatforms, +func encodePlatform(platform *ocispec.Platform) (string, error) { + p, err := json.Marshal(platform) + if err != nil { + return "", fmt.Errorf("%w: invalid platform: %v", cerrdefs.ErrInvalidArgument, err) + } + return string(p), nil +} + +func decodeWithRaw[T any](resp *http.Response, out *T) (raw json.RawMessage, _ error) { + if resp == nil || resp.Body == nil { + return nil, errors.New("empty response") + } + defer ensureReaderClosed(resp) + + var buf bytes.Buffer + tr := io.TeeReader(resp.Body, &buf) + err := json.NewDecoder(tr).Decode(out) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// newCancelReadCloser wraps rc so it's automatically closed when ctx is canceled. +// Close is idempotent and returns the first error from rc.Close. +func newCancelReadCloser(ctx context.Context, rc io.ReadCloser) io.ReadCloser { + crc := &cancelReadCloser{ + rc: rc, + close: sync.OnceValue(rc.Close), + } + crc.stop = context.AfterFunc(ctx, func() { _ = crc.Close() }) + return crc +} + +type cancelReadCloser struct { + rc io.ReadCloser + close func() error + stop func() bool +} + +func (c *cancelReadCloser) Read(p []byte) (int, error) { return c.rc.Read(p) } + +func (c *cancelReadCloser) Close() error { + c.stop() // unregister AfterFunc + return c.close() +} diff --git a/vendor/github.com/moby/moby/client/version.go b/vendor/github.com/moby/moby/client/version.go new file mode 100644 index 00000000000..7fa5a3fa092 --- /dev/null +++ b/vendor/github.com/moby/moby/client/version.go @@ -0,0 +1,81 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/system" +) + +// ServerVersionOptions specifies options for the server version request. +type ServerVersionOptions struct { + // Currently no options are supported. +} + +// ServerVersionResult contains information about the Docker server host. +type ServerVersionResult struct { + // Platform is the platform (product name) the server is running on. + Platform PlatformInfo + + // Version is the version of the daemon. + Version string + + // APIVersion is the highest API version supported by the server. + APIVersion string + + // MinAPIVersion is the minimum API version the server supports. + MinAPIVersion string + + // Os is the operating system the server runs on. + Os string + + // Arch is the hardware architecture the server runs on. + Arch string + + // Experimental indicates that the daemon runs with experimental + // features enabled. + // + // Deprecated: this field will be removed in the next version. + Experimental bool + + // Components contains version information for the components making + // up the server. Information in this field is for informational + // purposes, and not part of the API contract. + Components []system.ComponentVersion +} + +// PlatformInfo holds information about the platform (product name) the +// server is running on. +type PlatformInfo struct { + // Name is the name of the platform (for example, "Docker Engine - Community", + // or "Docker Desktop 4.49.0 (208003)") + Name string +} + +// ServerVersion returns information of the Docker server host. +func (cli *Client) ServerVersion(ctx context.Context, _ ServerVersionOptions) (ServerVersionResult, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return ServerVersionResult{}, err + } + + var v system.VersionResponse + err = json.NewDecoder(resp.Body).Decode(&v) + if err != nil { + return ServerVersionResult{}, err + } + + return ServerVersionResult{ + Platform: PlatformInfo{ + Name: v.Platform.Name, + }, + Version: v.Version, + APIVersion: v.APIVersion, + MinAPIVersion: v.MinAPIVersion, + Os: v.Os, + Arch: v.Arch, + Experimental: v.Experimental, //nolint:staticcheck // ignore deprecated field. + Components: v.Components, + }, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_create.go b/vendor/github.com/moby/moby/client/volume_create.go new file mode 100644 index 00000000000..674e0633572 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_create.go @@ -0,0 +1,42 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/volume" +) + +// VolumeCreateOptions specifies the options to create a volume. +type VolumeCreateOptions struct { + Name string + Driver string + DriverOpts map[string]string + Labels map[string]string + ClusterVolumeSpec *volume.ClusterVolumeSpec +} + +// VolumeCreateResult is the result of a volume creation. +type VolumeCreateResult struct { + Volume volume.Volume +} + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options VolumeCreateOptions) (VolumeCreateResult, error) { + createRequest := volume.CreateRequest{ + Name: options.Name, + Driver: options.Driver, + DriverOpts: options.DriverOpts, + Labels: options.Labels, + ClusterVolumeSpec: options.ClusterVolumeSpec, + } + resp, err := cli.post(ctx, "/volumes/create", nil, createRequest, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumeCreateResult{}, err + } + + var v volume.Volume + err = json.NewDecoder(resp.Body).Decode(&v) + return VolumeCreateResult{Volume: v}, err +} diff --git a/vendor/github.com/moby/moby/client/volume_inspect.go b/vendor/github.com/moby/moby/client/volume_inspect.go new file mode 100644 index 00000000000..cf00236a2a8 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_inspect.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/moby/moby/api/types/volume" +) + +// VolumeInspectOptions holds options for inspecting a volume. +type VolumeInspectOptions struct { + // Add future optional parameters here +} + +// VolumeInspectResult holds the result from the [Client.VolumeInspect] method. +type VolumeInspectResult struct { + Volume volume.Volume + Raw json.RawMessage +} + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string, options VolumeInspectOptions) (VolumeInspectResult, error) { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return VolumeInspectResult{}, err + } + + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + return VolumeInspectResult{}, err + } + + var out VolumeInspectResult + out.Raw, err = decodeWithRaw(resp, &out.Volume) + return out, err +} diff --git a/vendor/github.com/moby/moby/client/volume_list.go b/vendor/github.com/moby/moby/client/volume_list.go new file mode 100644 index 00000000000..989a0292ec2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_list.go @@ -0,0 +1,46 @@ +package client + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/moby/moby/api/types/volume" +) + +// VolumeListOptions holds parameters to list volumes. +type VolumeListOptions struct { + Filters Filters +} + +// VolumeListResult holds the result from the [Client.VolumeList] method. +type VolumeListResult struct { + // List of volumes. + Items []volume.Volume + + // Warnings that occurred when fetching the list of volumes. + Warnings []string +} + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, options VolumeListOptions) (VolumeListResult, error) { + query := url.Values{} + + options.Filters.updateURLValues(query) + resp, err := cli.get(ctx, "/volumes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumeListResult{}, err + } + + var apiResp volume.ListResponse + err = json.NewDecoder(resp.Body).Decode(&apiResp) + if err != nil { + return VolumeListResult{}, err + } + + return VolumeListResult{ + Items: apiResp.Volumes, + Warnings: apiResp.Warnings, + }, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_prune.go b/vendor/github.com/moby/moby/client/volume_prune.go new file mode 100644 index 00000000000..eec0f482baf --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_prune.go @@ -0,0 +1,55 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + cerrdefs "github.com/containerd/errdefs" + "github.com/moby/moby/api/types/volume" +) + +// VolumePruneOptions holds parameters to prune volumes. +type VolumePruneOptions struct { + // All controls whether named volumes should also be pruned. By + // default, only anonymous volumes are pruned. + All bool + + // Filters to apply when pruning. + Filters Filters +} + +// VolumePruneResult holds the result from the [Client.VolumePrune] method. +type VolumePruneResult struct { + Report volume.PruneReport +} + +// VolumePrune requests the daemon to delete unused data +func (cli *Client) VolumePrune(ctx context.Context, options VolumePruneOptions) (VolumePruneResult, error) { + if options.All { + if _, ok := options.Filters["all"]; ok { + return VolumePruneResult{}, cerrdefs.ErrInvalidArgument.WithMessage(`conflicting options: cannot specify both "all" and "all" filter`) + } + if options.Filters == nil { + options.Filters = Filters{} + } + options.Filters.Add("all", "true") + } + + query := url.Values{} + options.Filters.updateURLValues(query) + + resp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumePruneResult{}, err + } + + var report volume.PruneReport + if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { + return VolumePruneResult{}, fmt.Errorf("error retrieving volume prune report: %v", err) + } + + return VolumePruneResult{Report: report}, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_remove.go b/vendor/github.com/moby/moby/client/volume_remove.go new file mode 100644 index 00000000000..0449e08d4a2 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_remove.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + "net/url" +) + +// VolumeRemoveOptions holds options for [Client.VolumeRemove]. +type VolumeRemoveOptions struct { + // Force the removal of the volume + Force bool +} + +// VolumeRemoveResult holds the result of [Client.VolumeRemove], +type VolumeRemoveResult struct { + // Add future fields here. +} + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, options VolumeRemoveOptions) (VolumeRemoveResult, error) { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return VolumeRemoveResult{}, err + } + + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumeRemoveResult{}, err + } + return VolumeRemoveResult{}, nil +} diff --git a/vendor/github.com/moby/moby/client/volume_update.go b/vendor/github.com/moby/moby/client/volume_update.go new file mode 100644 index 00000000000..5aa2a0aa170 --- /dev/null +++ b/vendor/github.com/moby/moby/client/volume_update.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + "net/url" + + "github.com/moby/moby/api/types/swarm" + "github.com/moby/moby/api/types/volume" +) + +// VolumeUpdateOptions holds options for [Client.VolumeUpdate]. +type VolumeUpdateOptions struct { + Version swarm.Version + // Spec is the ClusterVolumeSpec to update the volume to. + Spec *volume.ClusterVolumeSpec `json:"Spec,omitempty"` +} + +// VolumeUpdateResult holds the result of [Client.VolumeUpdate], +type VolumeUpdateResult struct { + // Add future fields here. +} + +// VolumeUpdate updates a volume. This only works for Cluster Volumes, and +// only some fields can be updated. +func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, options VolumeUpdateOptions) (VolumeUpdateResult, error) { + volumeID, err := trimID("volume", volumeID) + if err != nil { + return VolumeUpdateResult{}, err + } + + query := url.Values{} + query.Set("version", options.Version.String()) + + resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) + defer ensureReaderClosed(resp) + if err != nil { + return VolumeUpdateResult{}, err + } + return VolumeUpdateResult{}, nil +} diff --git a/vendor/github.com/oklog/ulid/.gitignore b/vendor/github.com/oklog/ulid/.gitignore deleted file mode 100644 index c92c4d56084..00000000000 --- a/vendor/github.com/oklog/ulid/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -#### joe made this: http://goel.io/joe - -#####=== Go ===##### - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - diff --git a/vendor/github.com/oklog/ulid/.travis.yml b/vendor/github.com/oklog/ulid/.travis.yml deleted file mode 100644 index 43eb762fa34..00000000000 --- a/vendor/github.com/oklog/ulid/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -sudo: false -go: - - 1.10.x -install: - - go get -v github.com/golang/lint/golint - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - - go get -d -t -v ./... - - go build -v ./... -script: - - go vet ./... - - $HOME/gopath/bin/golint . - - go test -v -race ./... - - go test -v -covermode=count -coverprofile=cov.out - - $HOME/gopath/bin/goveralls -coverprofile=cov.out -service=travis-ci -repotoken "$COVERALLS_TOKEN" || true diff --git a/vendor/github.com/oklog/ulid/AUTHORS.md b/vendor/github.com/oklog/ulid/AUTHORS.md deleted file mode 100644 index 95581c78b06..00000000000 --- a/vendor/github.com/oklog/ulid/AUTHORS.md +++ /dev/null @@ -1,2 +0,0 @@ -- Peter Bourgon (@peterbourgon) -- Tomás Senart (@tsenart) diff --git a/vendor/github.com/oklog/ulid/CHANGELOG.md b/vendor/github.com/oklog/ulid/CHANGELOG.md deleted file mode 100644 index 8da38c6b00d..00000000000 --- a/vendor/github.com/oklog/ulid/CHANGELOG.md +++ /dev/null @@ -1,33 +0,0 @@ -## 1.3.1 / 2018-10-02 - -* Use underlying entropy source for random increments in Monotonic (#32) - -## 1.3.0 / 2018-09-29 - -* Monotonic entropy support (#31) - -## 1.2.0 / 2018-09-09 - -* Add a function to convert Unix time in milliseconds back to time.Time (#30) - -## 1.1.0 / 2018-08-15 - -* Ensure random part is always read from the entropy reader in full (#28) - -## 1.0.0 / 2018-07-29 - -* Add ParseStrict and MustParseStrict functions (#26) -* Enforce overflow checking when parsing (#20) - -## 0.3.0 / 2017-01-03 - -* Implement ULID.Compare method - -## 0.2.0 / 2016-12-13 - -* Remove year 2262 Timestamp bug. (#1) -* Gracefully handle invalid encodings when parsing. - -## 0.1.0 / 2016-12-06 - -* First ULID release diff --git a/vendor/github.com/oklog/ulid/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/CONTRIBUTING.md deleted file mode 100644 index 68f03f26eba..00000000000 --- a/vendor/github.com/oklog/ulid/CONTRIBUTING.md +++ /dev/null @@ -1,17 +0,0 @@ -# Contributing - -We use GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull - request, addressing (with `@...`) one or more of the maintainers - (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. - -* If you plan to do something more involved, first propose your ideas - in a Github issue. This will avoid unnecessary work and surely give - you and us a good deal of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/oklog/ulid/Gopkg.lock b/vendor/github.com/oklog/ulid/Gopkg.lock deleted file mode 100644 index 349b449a6ea..00000000000 --- a/vendor/github.com/oklog/ulid/Gopkg.lock +++ /dev/null @@ -1,15 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "github.com/pborman/getopt" - packages = ["v2"] - revision = "7148bc3a4c3008adfcab60cbebfd0576018f330b" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "6779b05abd5cd429c5393641d2453005a3cb74a400d161b2b5c5d0ca2e10e116" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/oklog/ulid/Gopkg.toml b/vendor/github.com/oklog/ulid/Gopkg.toml deleted file mode 100644 index 624a7a019c7..00000000000 --- a/vendor/github.com/oklog/ulid/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "github.com/pborman/getopt" diff --git a/vendor/github.com/oklog/ulid/README.md b/vendor/github.com/oklog/ulid/README.md deleted file mode 100644 index 0a3d2f82b25..00000000000 --- a/vendor/github.com/oklog/ulid/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# Universally Unique Lexicographically Sortable Identifier - -![Project status](https://img.shields.io/badge/version-1.3.0-yellow.svg) -[![Build Status](https://secure.travis-ci.org/oklog/ulid.png)](http://travis-ci.org/oklog/ulid) -[![Go Report Card](https://goreportcard.com/badge/oklog/ulid?cache=0)](https://goreportcard.com/report/oklog/ulid) -[![Coverage Status](https://coveralls.io/repos/github/oklog/ulid/badge.svg?branch=master&cache=0)](https://coveralls.io/github/oklog/ulid?branch=master) -[![GoDoc](https://godoc.org/github.com/oklog/ulid?status.svg)](https://godoc.org/github.com/oklog/ulid) -[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE) - -A Go port of [alizain/ulid](https://github.com/alizain/ulid) with binary format implemented. - -## Background - -A GUID/UUID can be suboptimal for many use-cases because: - -- It isn't the most character efficient way of encoding 128 bits -- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address -- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures -- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures - -A ULID however: - -- Is compatible with UUID/GUID's -- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact) -- Lexicographically sortable -- Canonically encoded as a 26 character string, as opposed to the 36 character UUID -- Uses Crockford's base32 for better efficiency and readability (5 bits per character) -- Case insensitive -- No special characters (URL safe) -- Monotonic sort order (correctly detects and handles the same millisecond) - -## Install - -```shell -go get github.com/oklog/ulid -``` - -## Usage - -An ULID is constructed with a `time.Time` and an `io.Reader` entropy source. -This design allows for greater flexibility in choosing your trade-offs. - -Please note that `rand.Rand` from the `math` package is *not* safe for concurrent use. -Instantiate one per long living go-routine or use a `sync.Pool` if you want to avoid the potential contention of a locked `rand.Source` as its been frequently observed in the package level functions. - - -```go -func ExampleULID() { - t := time.Unix(1000000, 0) - entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0) - fmt.Println(ulid.MustNew(ulid.Timestamp(t), entropy)) - // Output: 0000XSNJG0MQJHBF4QX1EFD6Y3 -} - -``` - -## Specification - -Below is the current specification of ULID as implemented in this repository. - -### Components - -**Timestamp** -- 48 bits -- UNIX-time in milliseconds -- Won't run out of space till the year 10895 AD - -**Entropy** -- 80 bits -- User defined entropy source. -- Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic) - -### Encoding - -[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown. -This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse. - -``` -0123456789ABCDEFGHJKMNPQRSTVWXYZ -``` - -### Binary Layout and Byte Order - -The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order). - -``` -0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 32_bit_uint_time_high | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 16_bit_uint_time_low | 16_bit_uint_random | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 32_bit_uint_random | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 32_bit_uint_random | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -``` - -### String Representation - -``` - 01AN4Z07BY 79KA1307SR9X4MV3 -|----------| |----------------| - Timestamp Entropy - 10 chars 16 chars - 48bits 80bits - base32 base32 -``` - -## Test - -```shell -go test ./... -``` - -## Benchmarks - -On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1 - -``` -BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op -BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op -BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op -BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op -BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op -BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op -BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op -BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op -BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op -BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op -BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op -BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op -BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op -BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op -BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op -BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op -BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op -BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op -BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op -BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op -BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op -BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op -``` - -## Prior Art - -- [alizain/ulid](https://github.com/alizain/ulid) -- [RobThree/NUlid](https://github.com/RobThree/NUlid) -- [imdario/go-ulid](https://github.com/imdario/go-ulid) diff --git a/vendor/github.com/oklog/ulid/ulid.go b/vendor/github.com/oklog/ulid/ulid.go deleted file mode 100644 index c5d0d66fd2a..00000000000 --- a/vendor/github.com/oklog/ulid/ulid.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2016 The Oklog Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ulid - -import ( - "bufio" - "bytes" - "database/sql/driver" - "encoding/binary" - "errors" - "io" - "math" - "math/bits" - "math/rand" - "time" -) - -/* -An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier - - The components are encoded as 16 octets. - Each component is encoded with the MSB first (network byte order). - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 32_bit_uint_time_high | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 16_bit_uint_time_low | 16_bit_uint_random | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 32_bit_uint_random | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 32_bit_uint_random | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -*/ -type ULID [16]byte - -var ( - // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong - // data size. - ErrDataSize = errors.New("ulid: bad data size when unmarshaling") - - // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with - // invalid Base32 encodings. - ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling") - - // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient - // size. - ErrBufferSize = errors.New("ulid: bad buffer size when marshaling") - - // ErrBigTime is returned when constructing an ULID with a time that is larger - // than MaxTime. - ErrBigTime = errors.New("ulid: time too big") - - // ErrOverflow is returned when unmarshaling a ULID whose first character is - // larger than 7, thereby exceeding the valid bit depth of 128. - ErrOverflow = errors.New("ulid: overflow when unmarshaling") - - // ErrMonotonicOverflow is returned by a Monotonic entropy source when - // incrementing the previous ULID's entropy bytes would result in overflow. - ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow") - - // ErrScanValue is returned when the value passed to scan cannot be unmarshaled - // into the ULID. - ErrScanValue = errors.New("ulid: source value must be a string or byte slice") -) - -// New returns an ULID with the given Unix milliseconds timestamp and an -// optional entropy source. Use the Timestamp function to convert -// a time.Time to Unix milliseconds. -// -// ErrBigTime is returned when passing a timestamp bigger than MaxTime. -// Reading from the entropy source may also return an error. -func New(ms uint64, entropy io.Reader) (id ULID, err error) { - if err = id.SetTime(ms); err != nil { - return id, err - } - - switch e := entropy.(type) { - case nil: - return id, err - case *monotonic: - err = e.MonotonicRead(ms, id[6:]) - default: - _, err = io.ReadFull(e, id[6:]) - } - - return id, err -} - -// MustNew is a convenience function equivalent to New that panics on failure -// instead of returning an error. -func MustNew(ms uint64, entropy io.Reader) ULID { - id, err := New(ms, entropy) - if err != nil { - panic(err) - } - return id -} - -// Parse parses an encoded ULID, returning an error in case of failure. -// -// ErrDataSize is returned if the len(ulid) is different from an encoded -// ULID's length. Invalid encodings produce undefined ULIDs. For a version that -// returns an error instead, see ParseStrict. -func Parse(ulid string) (id ULID, err error) { - return id, parse([]byte(ulid), false, &id) -} - -// ParseStrict parses an encoded ULID, returning an error in case of failure. -// -// It is like Parse, but additionally validates that the parsed ULID consists -// only of valid base32 characters. It is slightly slower than Parse. -// -// ErrDataSize is returned if the len(ulid) is different from an encoded -// ULID's length. Invalid encodings return ErrInvalidCharacters. -func ParseStrict(ulid string) (id ULID, err error) { - return id, parse([]byte(ulid), true, &id) -} - -func parse(v []byte, strict bool, id *ULID) error { - // Check if a base32 encoded ULID is the right length. - if len(v) != EncodedSize { - return ErrDataSize - } - - // Check if all the characters in a base32 encoded ULID are part of the - // expected base32 character set. - if strict && - (dec[v[0]] == 0xFF || - dec[v[1]] == 0xFF || - dec[v[2]] == 0xFF || - dec[v[3]] == 0xFF || - dec[v[4]] == 0xFF || - dec[v[5]] == 0xFF || - dec[v[6]] == 0xFF || - dec[v[7]] == 0xFF || - dec[v[8]] == 0xFF || - dec[v[9]] == 0xFF || - dec[v[10]] == 0xFF || - dec[v[11]] == 0xFF || - dec[v[12]] == 0xFF || - dec[v[13]] == 0xFF || - dec[v[14]] == 0xFF || - dec[v[15]] == 0xFF || - dec[v[16]] == 0xFF || - dec[v[17]] == 0xFF || - dec[v[18]] == 0xFF || - dec[v[19]] == 0xFF || - dec[v[20]] == 0xFF || - dec[v[21]] == 0xFF || - dec[v[22]] == 0xFF || - dec[v[23]] == 0xFF || - dec[v[24]] == 0xFF || - dec[v[25]] == 0xFF) { - return ErrInvalidCharacters - } - - // Check if the first character in a base32 encoded ULID will overflow. This - // happens because the base32 representation encodes 130 bits, while the - // ULID is only 128 bits. - // - // See https://github.com/oklog/ulid/issues/9 for details. - if v[0] > '7' { - return ErrOverflow - } - - // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid) - // to decode a base32 ULID. - - // 6 bytes timestamp (48 bits) - (*id)[0] = ((dec[v[0]] << 5) | dec[v[1]]) - (*id)[1] = ((dec[v[2]] << 3) | (dec[v[3]] >> 2)) - (*id)[2] = ((dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4)) - (*id)[3] = ((dec[v[5]] << 4) | (dec[v[6]] >> 1)) - (*id)[4] = ((dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3)) - (*id)[5] = ((dec[v[8]] << 5) | dec[v[9]]) - - // 10 bytes of entropy (80 bits) - (*id)[6] = ((dec[v[10]] << 3) | (dec[v[11]] >> 2)) - (*id)[7] = ((dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4)) - (*id)[8] = ((dec[v[13]] << 4) | (dec[v[14]] >> 1)) - (*id)[9] = ((dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3)) - (*id)[10] = ((dec[v[16]] << 5) | dec[v[17]]) - (*id)[11] = ((dec[v[18]] << 3) | dec[v[19]]>>2) - (*id)[12] = ((dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4)) - (*id)[13] = ((dec[v[21]] << 4) | (dec[v[22]] >> 1)) - (*id)[14] = ((dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3)) - (*id)[15] = ((dec[v[24]] << 5) | dec[v[25]]) - - return nil -} - -// MustParse is a convenience function equivalent to Parse that panics on failure -// instead of returning an error. -func MustParse(ulid string) ULID { - id, err := Parse(ulid) - if err != nil { - panic(err) - } - return id -} - -// MustParseStrict is a convenience function equivalent to ParseStrict that -// panics on failure instead of returning an error. -func MustParseStrict(ulid string) ULID { - id, err := ParseStrict(ulid) - if err != nil { - panic(err) - } - return id -} - -// String returns a lexicographically sortable string encoded ULID -// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3 -// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy -func (id ULID) String() string { - ulid := make([]byte, EncodedSize) - _ = id.MarshalTextTo(ulid) - return string(ulid) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface by -// returning the ULID as a byte slice. -func (id ULID) MarshalBinary() ([]byte, error) { - ulid := make([]byte, len(id)) - return ulid, id.MarshalBinaryTo(ulid) -} - -// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer. -// ErrBufferSize is returned when the len(dst) != 16. -func (id ULID) MarshalBinaryTo(dst []byte) error { - if len(dst) != len(id) { - return ErrBufferSize - } - - copy(dst, id[:]) - return nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by -// copying the passed data and converting it to an ULID. ErrDataSize is -// returned if the data length is different from ULID length. -func (id *ULID) UnmarshalBinary(data []byte) error { - if len(data) != len(*id) { - return ErrDataSize - } - - copy((*id)[:], data) - return nil -} - -// Encoding is the base 32 encoding alphabet used in ULID strings. -const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" - -// MarshalText implements the encoding.TextMarshaler interface by -// returning the string encoded ULID. -func (id ULID) MarshalText() ([]byte, error) { - ulid := make([]byte, EncodedSize) - return ulid, id.MarshalTextTo(ulid) -} - -// MarshalTextTo writes the ULID as a string to the given buffer. -// ErrBufferSize is returned when the len(dst) != 26. -func (id ULID) MarshalTextTo(dst []byte) error { - // Optimized unrolled loop ahead. - // From https://github.com/RobThree/NUlid - - if len(dst) != EncodedSize { - return ErrBufferSize - } - - // 10 byte timestamp - dst[0] = Encoding[(id[0]&224)>>5] - dst[1] = Encoding[id[0]&31] - dst[2] = Encoding[(id[1]&248)>>3] - dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)] - dst[4] = Encoding[(id[2]&62)>>1] - dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)] - dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)] - dst[7] = Encoding[(id[4]&124)>>2] - dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)] - dst[9] = Encoding[id[5]&31] - - // 16 bytes of entropy - dst[10] = Encoding[(id[6]&248)>>3] - dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)] - dst[12] = Encoding[(id[7]&62)>>1] - dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)] - dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)] - dst[15] = Encoding[(id[9]&124)>>2] - dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)] - dst[17] = Encoding[id[10]&31] - dst[18] = Encoding[(id[11]&248)>>3] - dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)] - dst[20] = Encoding[(id[12]&62)>>1] - dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)] - dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)] - dst[23] = Encoding[(id[14]&124)>>2] - dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)] - dst[25] = Encoding[id[15]&31] - - return nil -} - -// Byte to index table for O(1) lookups when unmarshaling. -// We use 0xFF as sentinel value for invalid indexes. -var dec = [...]byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, - 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, - 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, - 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, - 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, - 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, -} - -// EncodedSize is the length of a text encoded ULID. -const EncodedSize = 26 - -// UnmarshalText implements the encoding.TextUnmarshaler interface by -// parsing the data as string encoded ULID. -// -// ErrDataSize is returned if the len(v) is different from an encoded -// ULID's length. Invalid encodings produce undefined ULIDs. -func (id *ULID) UnmarshalText(v []byte) error { - return parse(v, false, id) -} - -// Time returns the Unix time in milliseconds encoded in the ULID. -// Use the top level Time function to convert the returned value to -// a time.Time. -func (id ULID) Time() uint64 { - return uint64(id[5]) | uint64(id[4])<<8 | - uint64(id[3])<<16 | uint64(id[2])<<24 | - uint64(id[1])<<32 | uint64(id[0])<<40 -} - -// maxTime is the maximum Unix time in milliseconds that can be -// represented in an ULID. -var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time() - -// MaxTime returns the maximum Unix time in milliseconds that -// can be encoded in an ULID. -func MaxTime() uint64 { return maxTime } - -// Now is a convenience function that returns the current -// UTC time in Unix milliseconds. Equivalent to: -// Timestamp(time.Now().UTC()) -func Now() uint64 { return Timestamp(time.Now().UTC()) } - -// Timestamp converts a time.Time to Unix milliseconds. -// -// Because of the way ULID stores time, times from the year -// 10889 produces undefined results. -func Timestamp(t time.Time) uint64 { - return uint64(t.Unix())*1000 + - uint64(t.Nanosecond()/int(time.Millisecond)) -} - -// Time converts Unix milliseconds in the format -// returned by the Timestamp function to a time.Time. -func Time(ms uint64) time.Time { - s := int64(ms / 1e3) - ns := int64((ms % 1e3) * 1e6) - return time.Unix(s, ns) -} - -// SetTime sets the time component of the ULID to the given Unix time -// in milliseconds. -func (id *ULID) SetTime(ms uint64) error { - if ms > maxTime { - return ErrBigTime - } - - (*id)[0] = byte(ms >> 40) - (*id)[1] = byte(ms >> 32) - (*id)[2] = byte(ms >> 24) - (*id)[3] = byte(ms >> 16) - (*id)[4] = byte(ms >> 8) - (*id)[5] = byte(ms) - - return nil -} - -// Entropy returns the entropy from the ULID. -func (id ULID) Entropy() []byte { - e := make([]byte, 10) - copy(e, id[6:]) - return e -} - -// SetEntropy sets the ULID entropy to the passed byte slice. -// ErrDataSize is returned if len(e) != 10. -func (id *ULID) SetEntropy(e []byte) error { - if len(e) != 10 { - return ErrDataSize - } - - copy((*id)[6:], e) - return nil -} - -// Compare returns an integer comparing id and other lexicographically. -// The result will be 0 if id==other, -1 if id < other, and +1 if id > other. -func (id ULID) Compare(other ULID) int { - return bytes.Compare(id[:], other[:]) -} - -// Scan implements the sql.Scanner interface. It supports scanning -// a string or byte slice. -func (id *ULID) Scan(src interface{}) error { - switch x := src.(type) { - case nil: - return nil - case string: - return id.UnmarshalText([]byte(x)) - case []byte: - return id.UnmarshalBinary(x) - } - - return ErrScanValue -} - -// Value implements the sql/driver.Valuer interface. This returns the value -// represented as a byte slice. If instead a string is desirable, a wrapper -// type can be created that calls String(). -// -// // stringValuer wraps a ULID as a string-based driver.Valuer. -// type stringValuer ULID -// -// func (id stringValuer) Value() (driver.Value, error) { -// return ULID(id).String(), nil -// } -// -// // Example usage. -// db.Exec("...", stringValuer(id)) -func (id ULID) Value() (driver.Value, error) { - return id.MarshalBinary() -} - -// Monotonic returns an entropy source that is guaranteed to yield -// strictly increasing entropy bytes for the same ULID timestamp. -// On conflicts, the previous ULID entropy is incremented with a -// random number between 1 and `inc` (inclusive). -// -// The provided entropy source must actually yield random bytes or else -// monotonic reads are not guaranteed to terminate, since there isn't -// enough randomness to compute an increment number. -// -// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`. -// The lower the value of `inc`, the easier the next ULID within the -// same millisecond is to guess. If your code depends on ULIDs having -// secure entropy bytes, then don't go under this default unless you know -// what you're doing. -// -// The returned io.Reader isn't safe for concurrent use. -func Monotonic(entropy io.Reader, inc uint64) io.Reader { - m := monotonic{ - Reader: bufio.NewReader(entropy), - inc: inc, - } - - if m.inc == 0 { - m.inc = math.MaxUint32 - } - - if rng, ok := entropy.(*rand.Rand); ok { - m.rng = rng - } - - return &m -} - -type monotonic struct { - io.Reader - ms uint64 - inc uint64 - entropy uint80 - rand [8]byte - rng *rand.Rand -} - -func (m *monotonic) MonotonicRead(ms uint64, entropy []byte) (err error) { - if !m.entropy.IsZero() && m.ms == ms { - err = m.increment() - m.entropy.AppendTo(entropy) - } else if _, err = io.ReadFull(m.Reader, entropy); err == nil { - m.ms = ms - m.entropy.SetBytes(entropy) - } - return err -} - -// increment the previous entropy number with a random number -// of up to m.inc (inclusive). -func (m *monotonic) increment() error { - if inc, err := m.random(); err != nil { - return err - } else if m.entropy.Add(inc) { - return ErrMonotonicOverflow - } - return nil -} - -// random returns a uniform random value in [1, m.inc), reading entropy -// from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1. -// Adapted from: https://golang.org/pkg/crypto/rand/#Int -func (m *monotonic) random() (inc uint64, err error) { - if m.inc <= 1 { - return 1, nil - } - - // Fast path for using a underlying rand.Rand directly. - if m.rng != nil { - // Range: [1, m.inc) - return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil - } - - // bitLen is the maximum bit length needed to encode a value < m.inc. - bitLen := bits.Len64(m.inc) - - // byteLen is the maximum byte length needed to encode a value < m.inc. - byteLen := uint(bitLen+7) / 8 - - // msbitLen is the number of bits in the most significant byte of m.inc-1. - msbitLen := uint(bitLen % 8) - if msbitLen == 0 { - msbitLen = 8 - } - - for inc == 0 || inc >= m.inc { - if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil { - return 0, err - } - - // Clear bits in the first byte to increase the probability - // that the candidate is < m.inc. - m.rand[0] &= uint8(int(1< +# Delta to Cumulative Processor + +The Delta to Cumulative Processor (`deltatocumulativeprocessor`) converts metrics from delta temporality to +cumulative, by accumulating samples in memory. + + | Status | | | ------------- |-----------| | Stability | [alpha]: metrics | @@ -16,12 +20,6 @@ [k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s - -## Description - -The delta to cumulative processor (`deltatocumulativeprocessor`) converts -metrics from delta temporality to cumulative, by accumulating samples in memory. - ## Configuration ``` yaml diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md index 67112dff91a..0c74b0dda8b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/documentation.md @@ -8,7 +8,7 @@ The following telemetry is emitted by this component. ### otelcol_deltatocumulative_datapoints -total number of datapoints processed. may have 'error' attribute, if processing failed [Development] +total number of datapoints processed. may have 'error' attribute, if processing failed | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | @@ -16,7 +16,7 @@ total number of datapoints processed. may have 'error' attribute, if processing ### otelcol_deltatocumulative_streams_limit -upper limit of tracked streams [Development] +upper limit of tracked streams | Unit | Metric Type | Value Type | Stability | | ---- | ----------- | ---------- | --------- | @@ -24,7 +24,7 @@ upper limit of tracked streams [Development] ### otelcol_deltatocumulative_streams_max_stale -duration after which streams inactive streams are dropped [Development] +duration after which streams inactive streams are dropped | Unit | Metric Type | Value Type | Stability | | ---- | ----------- | ---------- | --------- | @@ -32,7 +32,7 @@ duration after which streams inactive streams are dropped [Development] ### otelcol_deltatocumulative_streams_tracked -number of streams tracked [Development] +number of streams tracked | Unit | Metric Type | Value Type | Monotonic | Stability | | ---- | ----------- | ---------- | --------- | --------- | diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go index 67b43804d3c..8e51e3f86bf 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/maps/map.go @@ -7,7 +7,7 @@ import ( "fmt" "sync/atomic" - "github.com/puzpuzpuz/xsync/v3" + "github.com/puzpuzpuz/xsync/v4" ) func Limit(limit int64) Context { @@ -15,7 +15,7 @@ func Limit(limit int64) Context { } func New[K comparable, V any](ctx Context) *Parallel[K, V] { - return &Parallel[K, V]{ctx: ctx, elems: xsync.NewMapOf[K, V]()} + return &Parallel[K, V]{ctx: ctx, elems: xsync.NewMap[K, V]()} } // lowercase alias for unexported embedding @@ -32,7 +32,7 @@ type ctx = Context // Parallel enforces the [Context] size limit. type Parallel[K comparable, V any] struct { ctx - elems *xsync.MapOf[K, V] + elems *xsync.Map[K, V] } // Context holds size information about one or more maps. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml index 2b742fef914..34df18b6d23 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/metadata.yaml @@ -1,5 +1,10 @@ +display_name: Delta to Cumulative Processor type: deltatocumulative +description: | + The Delta to Cumulative Processor (`deltatocumulativeprocessor`) converts metrics from delta temporality to + cumulative, by accumulating samples in memory. + status: class: processor stability: @@ -15,8 +20,7 @@ telemetry: # streams deltatocumulative_datapoints: description: total number of datapoints processed. may have 'error' attribute, if processing failed - stability: - level: development + stability: development unit: "{datapoint}" sum: value_type: int @@ -24,16 +28,14 @@ telemetry: enabled: true deltatocumulative_streams_limit: description: upper limit of tracked streams - stability: - level: development + stability: development unit: "{stream}" gauge: value_type: int enabled: true deltatocumulative_streams_max_stale: description: duration after which streams inactive streams are dropped - stability: - level: development + stability: development unit: "s" gauge: value_type: int @@ -41,8 +43,7 @@ telemetry: # datapoints deltatocumulative_streams_tracked: description: number of streams tracked - stability: - level: development + stability: development unit: "{dps}" sum: value_type: int diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go index 849873b7095..c5940aacb19 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/processor.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/puzpuzpuz/xsync/v3" + "github.com/puzpuzpuz/xsync/v4" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pmetric" @@ -34,7 +34,7 @@ type deltaToCumulativeProcessor struct { ctx context.Context cancel context.CancelFunc - stale *xsync.MapOf[identity.Stream, time.Time] + stale *xsync.Map[identity.Stream, time.Time] tel telemetry.Metrics } @@ -55,7 +55,7 @@ func newProcessor(cfg *Config, tel telemetry.Metrics, next consumer.Metrics) *de ctx: ctx, cancel: cancel, - stale: xsync.NewMapOf[identity.Stream, time.Time](), + stale: xsync.NewMap[identity.Stream, time.Time](), tel: tel, } diff --git a/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/parser.go b/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/parser.go index 488c6f7fd58..ec1109a2dd6 100644 --- a/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/parser.go +++ b/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/parser.go @@ -30,18 +30,19 @@ var contextVarTokenMap = map[token.Token]contextVarKind{ // JSONPath represents a JSONPath parser. type JSONPath struct { - tokenizer *token.Tokenizer - tokens []token.TokenInfo - ast jsonPathAST - current int - mode []mode - config config.Config + tokenizer *token.Tokenizer + tokens []token.TokenInfo + ast jsonPathAST + current int + mode []mode + config config.Config + filterDepth int // tracks nesting depth inside filter expressions } // newParserPrivate creates a new JSONPath with the given tokens. func newParserPrivate(tokenizer *token.Tokenizer, tokens []token.TokenInfo, opts ...config.Option) *JSONPath { cfg := config.New(opts...) - return &JSONPath{tokenizer, tokens, jsonPathAST{lazyContextTracking: cfg.LazyContextTrackingEnabled()}, 0, []mode{modeNormal}, cfg} + return &JSONPath{tokenizer, tokens, jsonPathAST{lazyContextTracking: cfg.LazyContextTrackingEnabled(), jsonPathPlus: cfg.JSONPathPlusEnabled()}, 0, []mode{modeNormal}, cfg, 0} } // parse parses the JSONPath tokens and returns the root node of the AST. @@ -155,6 +156,12 @@ func (p *JSONPath) parseInnerSegment() (retValue *innerSegment, err error) { dotName := p.tokens[p.current].Literal p.current += 1 return &innerSegment{segmentDotMemberName, dotName, nil}, nil + } else if firstToken.Token == token.INTEGER && p.config.JSONPathPlusEnabled() && p.current >= 3 { + // JSONPath Plus: treat .201 as a member name (common for HTTP status codes in OpenAPI). + // Only when we're past the root (p.current >= 3 means at least $, ., and something before this). + dotName := p.tokens[p.current].Literal + p.current += 1 + return &innerSegment{segmentDotMemberName, dotName, nil}, nil } else if firstToken.Token == token.BRACKET_LEFT { prior := p.current p.current += 1 @@ -244,7 +251,7 @@ func (p *JSONPath) parseSelector() (retSelector *selector, err error) { p.current++ - return &selector{kind: selectorSubKindArrayIndex, index: i}, nil + return &selector{kind: selectorSubKindArrayIndex, index: i, jsonPathPlus: p.config.JSONPathPlusEnabled() && p.filterDepth == 0}, nil } else if p.tokens[p.current].Token == token.ARRAY_SLICE { slice, err := p.parseSliceSelector() if err != nil { @@ -341,6 +348,8 @@ func (p *JSONPath) parseFilterSelector() (*selector, error) { return nil, p.parseFailure(&p.tokens[p.current], "expected '?'") } p.current++ + p.filterDepth++ + defer func() { p.filterDepth-- }() expr, err := p.parseLogicalOrExpr() if err != nil { @@ -783,8 +792,9 @@ func (p *JSONPath) parseLiteral() (*literal, error) { type jsonPathAST struct { // "$" - segments []*segment + segments []*segment lazyContextTracking bool + jsonPathPlus bool // JSONPath Plus extensions enabled (unquoted brackets, mapping index fallback) } func (q jsonPathAST) ToString() string { diff --git a/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/selector.go b/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/selector.go index f71cb47e732..9f5ea06fb54 100644 --- a/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/selector.go +++ b/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/selector.go @@ -23,11 +23,12 @@ type slice struct { } type selector struct { - kind selectorSubKind - name string - index int64 - slice *slice - filter *filterSelector + kind selectorSubKind + name string + index int64 + slice *slice + filter *filterSelector + jsonPathPlus bool // when true, enables MappingNode fallback for array index selectors } func (s selector) ToString() string { diff --git a/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/token/token.go b/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/token/token.go index a9e39a42014..7e92c120e4f 100644 --- a/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/token/token.go +++ b/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/token/token.go @@ -408,14 +408,15 @@ type Tokens []TokenInfo // Tokenizer represents a JSONPath tokenizer. type Tokenizer struct { - input string - pos int - line int - column int - tokens []TokenInfo - stack []Token - illegalWhitespace bool - config config.Config + input string + pos int + line int + column int + tokens []TokenInfo + stack []Token + illegalWhitespace bool + config config.Config + bracketFilterState []bool // lazy-init: nil until first BRACKET_LEFT; tracks filter context per bracket depth } // NewTokenizer creates a new JSONPath tokenizer for the given input string. @@ -488,6 +489,10 @@ func (t *Tokenizer) Tokenize() Tokens { t.addToken(ARRAY_SLICE, 1, "") case ch == '?': t.addToken(FILTER, 1, "") + // Mark current bracket as filter context + if len(t.bracketFilterState) > 0 { + t.bracketFilterState[len(t.bracketFilterState)-1] = true + } case ch == '(': t.addToken(PAREN_LEFT, 1, "") t.stack = append(t.stack, PAREN_LEFT) @@ -501,10 +506,21 @@ func (t *Tokenizer) Tokenize() Tokens { case ch == '[': t.addToken(BRACKET_LEFT, 1, "") t.stack = append(t.stack, BRACKET_LEFT) + // Lazy-init bracketFilterState on first bracket + if t.bracketFilterState == nil { + t.bracketFilterState = make([]bool, 0, 4) + } + // Inherit parent filter state: if parent is in filter context, so is this bracket + inFilter := len(t.bracketFilterState) > 0 && t.bracketFilterState[len(t.bracketFilterState)-1] + t.bracketFilterState = append(t.bracketFilterState, inFilter) case ch == ']': if len(t.stack) > 0 && t.stack[len(t.stack)-1] == BRACKET_LEFT { t.addToken(BRACKET_RIGHT, 1, "") t.stack = t.stack[:len(t.stack)-1] + // Pop bracket filter state + if len(t.bracketFilterState) > 0 { + t.bracketFilterState = t.bracketFilterState[:len(t.bracketFilterState)-1] + } } else { t.addToken(ILLEGAL, 1, "unmatched closing bracket") } @@ -581,9 +597,19 @@ func (t *Tokenizer) Tokenize() Tokens { case isDigit(ch): t.scanNumber() case isLiteralChar(ch): - t.scanLiteral() + if t.config.JSONPathPlusEnabled() && t.isInsideBracket() && !t.isInFilterContext() { + t.scanUnquotedBracketString() + } else { + t.scanLiteral() + } default: - t.addToken(ILLEGAL, 1, string(ch)) + // JSONPath Plus: handle special characters inside brackets as unquoted strings + // e.g., application/vnd.api+json where / and + would otherwise be ILLEGAL + if t.config.JSONPathPlusEnabled() && t.isInsideBracket() && !t.isInFilterContext() && isUnquotedBracketStartChar(ch) { + t.scanUnquotedBracketString() + } else { + t.addToken(ILLEGAL, 1, string(ch)) + } } t.pos++ t.column++ @@ -611,8 +637,6 @@ func (t *Tokenizer) scanString(quote rune) { var literal strings.Builder illegal: for i := start; i < len(t.input); i++ { - b := literal.String() - _ = b if t.input[i] == byte(quote) { t.addToken(STRING_LITERAL, len(t.input[start:i])+2, literal.String()) t.pos = i @@ -684,6 +708,15 @@ func (t *Tokenizer) scanNumber() { t.column += i - start return } + // Peek ahead: if '.' is NOT followed by a digit, it's a CHILD separator, + // not a decimal point. Stop the number scan here. + if i+1 >= len(t.input) || !isDigit(t.input[i+1]) { + literal := t.input[start:i] + t.addToken(tokenType, len(literal), literal) + t.pos = i - 1 + t.column += i - start - 1 + return + } tokenType = FLOAT dotSeen = true continue @@ -716,7 +749,7 @@ func (t *Tokenizer) scanNumber() { // no leading zero tokenType = ILLEGAL } else if len(literal) > 2 && literal[0] == '-' && literal[1] == '0' && !dotSeen { - // no trailing dot + // no negative zero without fraction tokenType = ILLEGAL } else if len(literal) > 0 && literal[len(literal)-1] == '.' { // no trailing dot @@ -828,6 +861,48 @@ func (t *Tokenizer) peek() byte { return 0 } +// isInsideBracket returns true if the tokenizer is currently inside a bracket pair. +// Nil-safe: returns false when bracketFilterState has not been initialized. +func (t *Tokenizer) isInsideBracket() bool { + return len(t.bracketFilterState) > 0 +} + +// isInFilterContext returns true if the current bracket context is a filter expression. +func (t *Tokenizer) isInFilterContext() bool { + return len(t.bracketFilterState) > 0 && t.bracketFilterState[len(t.bracketFilterState)-1] +} + +// scanUnquotedBracketString scans an unquoted string inside brackets (JSONPath Plus extension). +// Handles values like: get, post, application/vnd.api+json, default, etc. +// Zero-allocation: uses direct substring of t.input, matching scanLiteral's pattern. +func (t *Tokenizer) scanUnquotedBracketString() { + start := t.pos + end := start + for end < len(t.input) { + ch := t.input[end] + if ch == ']' || ch == ',' || ch == '[' || ch == '\'' || ch == '"' || ch == '?' { + break + } + if isSpace(ch) { + break + } + end++ + } + // Trim trailing whitespace by adjusting end index + trimEnd := end + for trimEnd > start && isSpace(t.input[trimEnd-1]) { + trimEnd-- + } + if trimEnd <= start { + t.addToken(ILLEGAL, 1, string(t.input[t.pos])) + return + } + literal := t.input[start:trimEnd] + t.addToken(STRING_LITERAL, len(literal), literal) + t.pos = end - 1 + t.column += end - start - 1 +} + func isDigit(ch byte) bool { return '0' <= ch && ch <= '9' } @@ -841,6 +916,15 @@ func isSpace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\r' } +// isUnquotedBracketStartChar returns true if ch can start an unquoted bracket string +// but is not already handled by other cases (digits, literal chars, quotes, etc.). +// Note: + is NOT included here because $[+1] must remain ILLEGAL per RFC 9535. +// Characters like + that appear mid-value (e.g., application/vnd.api+json) are handled +// by scanUnquotedBracketString which scans until a delimiter is found. +func isUnquotedBracketStartChar(ch byte) bool { + return ch == '/' || ch == '%' || ch == '#' +} + // contextVariableKeywords maps context variable names to their token types. // These are JSONPath Plus extensions for accessing filter context. var contextVariableKeywords = map[string]Token{ diff --git a/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/yaml_query.go b/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/yaml_query.go index d597a604ea1..855f9030b19 100644 --- a/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/yaml_query.go +++ b/vendor/github.com/pb33f/jsonpath/pkg/jsonpath/yaml_query.go @@ -383,15 +383,15 @@ func (s innerSegment) Query(idx index, value *yaml.Node, root *yaml.Node) []*yam func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.Node { trackParents := parentTrackingEnabled(idx) + fc, hasFc := idx.(FilterContext) switch s.kind { case selectorSubKindName: if value.Kind != yaml.MappingNode { return nil } - // Check for inherited pending segment from wildcard/slice var inheritedPending string - if fc, ok := idx.(FilterContext); ok { + if hasFc { inheritedPending = fc.GetAndClearPendingPathSegment(value) } @@ -407,13 +407,11 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No if trackParents { idx.setParentNode(child, value) } - if fc, ok := idx.(FilterContext); ok { + if hasFc { thisSegment := normalizePathSegment(key) if inheritedPending != "" { - // Propagate combined pending to result for later consumption fc.SetPendingPathSegment(child, inheritedPending+thisSegment) } else { - // No wildcard ancestry - push directly to path fc.PushPathSegment(thisSegment) } fc.SetPropertyName(key) @@ -422,15 +420,31 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No } } case selectorSubKindArrayIndex: + if s.jsonPathPlus && value.Kind == yaml.MappingNode && s.index >= 0 { + // JSONPath Plus fallback: treat integer index as a string key lookup on mapping nodes. + // This handles YAML mappings with numeric keys like $.responses[200]. + keyStr := strconv.FormatInt(s.index, 10) + for i := 0; i < len(value.Content); i += 2 { + if value.Content[i].Value == keyStr { + child := value.Content[i+1] + idx.setPropertyKey(value.Content[i], value) + idx.setPropertyKey(child, value.Content[i]) + if trackParents { + idx.setParentNode(child, value) + } + return []*yaml.Node{child} + } + } + return nil + } if value.Kind != yaml.SequenceNode { return nil } if s.index >= int64(len(value.Content)) || s.index < -int64(len(value.Content)) { return nil } - // Check for inherited pending segment from wildcard/slice var inheritedPending string - if fc, ok := idx.(FilterContext); ok { + if hasFc { inheritedPending = fc.GetAndClearPendingPathSegment(value) } @@ -446,21 +460,18 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No if trackParents { idx.setParentNode(child, value) } - if fc, ok := idx.(FilterContext); ok { + if hasFc { thisSegment := normalizeIndexSegment(actualIndex) if inheritedPending != "" { - // Propagate combined pending to result for later consumption fc.SetPendingPathSegment(child, inheritedPending+thisSegment) } else { - // No wildcard ancestry - push directly to path fc.PushPathSegment(thisSegment) } } return []*yaml.Node{child} case selectorSubKindWildcard: - // Check for inherited pending segment from previous wildcard/slice var inheritedPending string - if fc, ok := idx.(FilterContext); ok { + if hasFc { inheritedPending = fc.GetAndClearPendingPathSegment(value) } @@ -469,11 +480,10 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No if trackParents { idx.setParentNode(child, value) } - // Track pending path segment and property name for this node - if fc, ok := idx.(FilterContext); ok { + if hasFc { thisSegment := normalizeIndexSegment(i) fc.SetPendingPathSegment(child, inheritedPending+thisSegment) - fc.SetPendingPropertyName(child, strconv.Itoa(i)) // For @parentProperty + fc.SetPendingPropertyName(child, strconv.Itoa(i)) } } return value.Content @@ -487,11 +497,10 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No if trackParents { idx.setParentNode(child, value) } - // Track pending path segment and property name for this node - if fc, ok := idx.(FilterContext); ok { + if hasFc { thisSegment := normalizePathSegment(keyNode.Value) fc.SetPendingPathSegment(child, inheritedPending+thisSegment) - fc.SetPendingPropertyName(child, keyNode.Value) // For @parentProperty + fc.SetPendingPropertyName(child, keyNode.Value) } result = append(result, child) } @@ -506,9 +515,8 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No if len(value.Content) == 0 { return nil } - // Check for inherited pending segment from previous wildcard/slice var inheritedPending string - if fc, ok := idx.(FilterContext); ok { + if hasFc { inheritedPending = fc.GetAndClearPendingPathSegment(value) } @@ -530,11 +538,10 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No if trackParents { idx.setParentNode(child, value) } - // Track pending path segment and property name for this node - if fc, ok := idx.(FilterContext); ok { + if hasFc { thisSegment := normalizeIndexSegment(int(i)) fc.SetPendingPathSegment(child, inheritedPending+thisSegment) - fc.SetPendingPropertyName(child, strconv.Itoa(int(i))) // For @parentProperty + fc.SetPendingPropertyName(child, strconv.Itoa(int(i))) } result = append(result, child) } @@ -544,11 +551,10 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No if trackParents { idx.setParentNode(child, value) } - // Track pending path segment and property name for this node - if fc, ok := idx.(FilterContext); ok { + if hasFc { thisSegment := normalizeIndexSegment(int(i)) fc.SetPendingPathSegment(child, inheritedPending+thisSegment) - fc.SetPendingPropertyName(child, strconv.Itoa(int(i))) // For @parentProperty + fc.SetPendingPropertyName(child, strconv.Itoa(int(i))) } result = append(result, child) } @@ -557,18 +563,14 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No return result case selectorSubKindFilter: var result []*yaml.Node - // Get parent property name - prefer pending property name from wildcard/slice, - // fall back to current PropertyName var parentPropName string var pushedPendingSegment bool - if fc, ok := idx.(FilterContext); ok { - // First check for pending property name from wildcard/slice + if hasFc { if pendingPropName := fc.GetAndClearPendingPropertyName(value); pendingPropName != "" { parentPropName = pendingPropName } else { parentPropName = fc.PropertyName() } - // Check if this node has a pending path segment from a wildcard/slice if pendingSeg := fc.GetAndClearPendingPathSegment(value); pendingSeg != "" { fc.PushPathSegment(pendingSeg) pushedPendingSegment = true @@ -585,7 +587,7 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No idx.setParentNode(valueNode, value) } - if fc, ok := idx.(FilterContext); ok { + if hasFc { fc.SetParentPropertyName(parentPropName) fc.SetPropertyName(keyNode.Value) fc.SetParent(value) @@ -597,7 +599,7 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No result = append(result, valueNode) } - if fc, ok := idx.(FilterContext); ok { + if hasFc { fc.PopPathSegment() } } @@ -607,7 +609,7 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No idx.setParentNode(child, value) } - if fc, ok := idx.(FilterContext); ok { + if hasFc { fc.SetParentPropertyName(parentPropName) fc.SetPropertyName(strconv.Itoa(i)) fc.SetParent(value) @@ -619,14 +621,13 @@ func (s selector) Query(idx index, value *yaml.Node, root *yaml.Node) []*yaml.No result = append(result, child) } - if fc, ok := idx.(FilterContext); ok { + if hasFc { fc.PopPathSegment() } } } - // Pop the pending segment if we pushed one if pushedPendingSegment { - if fc, ok := idx.(FilterContext); ok { + if hasFc { fc.PopPathSegment() } } diff --git a/vendor/github.com/pb33f/libopenapi-validator/.golangci.yml b/vendor/github.com/pb33f/libopenapi-validator/.golangci.yml deleted file mode 100644 index c4bb81a5ec8..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/.golangci.yml +++ /dev/null @@ -1,31 +0,0 @@ -version: "2" -linters: - default: none - enable: - - asciicheck - - bidichk - - errcheck - - govet - - ineffassign - - staticcheck - - unused - exclusions: - generated: lax - presets: - - comments - - common-false-positives - - legacy - - std-error-handling - paths: - - third_party$ - - builtin$ - - examples$ -formatters: - enable: - - gofumpt - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ diff --git a/vendor/github.com/pb33f/libopenapi-validator/.pre-commit-config.yaml b/vendor/github.com/pb33f/libopenapi-validator/.pre-commit-config.yaml deleted file mode 100644 index bc7e6fcc982..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/.pre-commit-config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# See https://pre-commit.com for more information -# See https://pre-commit.com/hooks.html for more hooks -repos: - - # Run golangci-lint as a pre-commit hook to catch issues before they are pushed - # See https://golangci-lint.run/ for more information - - repo: local - hooks: - - id: golangci-lint - name: Lint Go code - entry: go tool golangci-lint run - language: system - pass_filenames: false - types: [go] \ No newline at end of file diff --git a/vendor/github.com/pb33f/libopenapi-validator/LICENSE.md b/vendor/github.com/pb33f/libopenapi-validator/LICENSE.md deleted file mode 100644 index 8fcf9a09b96..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/pb33f/libopenapi-validator/Makefile b/vendor/github.com/pb33f/libopenapi-validator/Makefile deleted file mode 100644 index 6d18b84c291..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -all: gofumpt import lint - -init: - go install mvdan.cc/gofumpt@v0.7.0 - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.61.0 - go install github.com/daixiang0/gci@v0.13.5 - -lint: - golangci-lint run ./... - -gofumpt: - gofumpt -l -w . - -import: - gci write --skip-generated -s standard -s default -s localmodule -s blank -s dot -s alias . diff --git a/vendor/github.com/pb33f/libopenapi-validator/README.md b/vendor/github.com/pb33f/libopenapi-validator/README.md deleted file mode 100644 index f2c20050406..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/README.md +++ /dev/null @@ -1,100 +0,0 @@ -

- libopenapi -

- -# Enterprise grade OpenAPI validation tools for golang. - -![Pipeline](https://github.com/pb33f/libopenapi-validator/workflows/Build/badge.svg) -[![codecov](https://codecov.io/gh/pb33f/libopenapi-validator/branch/main/graph/badge.svg?)](https://codecov.io/gh/pb33f/libopenapi-validator) -[![discord](https://img.shields.io/discord/923258363540815912)](https://discord.gg/x7VACVuEGP) -[![Docs](https://img.shields.io/badge/godoc-reference-5fafd7)](https://pkg.go.dev/github.com/pb33f/libopenapi-validator) - -A validation module for [libopenapi](https://github.com/pb33f/libopenapi). - -`libopenapi-validator` will validate the following elements against an OpenAPI 3+ specification - -- *http.Request* - Validates the request against the OpenAPI specification -- *http.Response* - Validates the response against the OpenAPI specification -- *libopenapi.Document* - Validates the OpenAPI document against the OpenAPI specification -- *base.Schema* - Validates a schema against a JSON or YAML blob / unmarshalled object - -👉👉 [Check out the full documentation](https://pb33f.io/libopenapi/validation/) 👈👈 - ---- - -## Installation - -```bash -go get github.com/pb33f/libopenapi-validator -``` - -## Validate OpenAPI Document - -```bash -go run github.com/pb33f/libopenapi-validator/cmd/validate@latest [--regexengine] [--yaml2json] -``` - -## Install pre-commit hook - -To install the pre-commit hook, run the following command in your terminal: - -```bash -pre-commit install -``` - -### Options - -#### --regexengine -🔍 Example: Use a custom regex engine/flag (e.g., ecmascript) -```bash -go run github.com/pb33f/libopenapi-validator/cmd/validate@latest --regexengine=ecmascript -``` -🔧 Supported **--regexengine** flags/values (ℹ️ Default: re2) -- none -- ignorecase -- multiline -- explicitcapture -- compiled -- singleline -- ignorepatternwhitespace -- righttoleft -- debug -- ecmascript -- re2 -- unicode - -#### --yaml2json -🔍 Convert YAML files to JSON before validation (ℹ️ Default: false) - -[libopenapi](https://github.com/pb33f/libopenapi/blob/main/datamodel/spec_info.go#L115) passes `map[interface{}]interface{}` structures for deeply nested objects or complex mappings in the OpenAPI specification, which are not allowed in JSON. -These structures cannot be properly converted to JSON by libopenapi and cannot be validated by jsonschema, resulting in ambiguous errors. - -This flag allows pre-converting from YAML to JSON to bypass this limitation of the libopenapi. - -**When does this happen?** -- OpenAPI specs with deeply nested schema definitions -- Complex `allOf`, `oneOf`, or `anyOf` structures with multiple levels -- Specifications with intricate object mappings in examples or schema properties - -Enabling this flag pre-converts the YAML document from YAML to JSON, ensuring a clean JSON structure before validation. - -Example: -```bash -go run github.com/pb33f/libopenapi-validator/cmd/validate@latest --yaml2json -``` - -## Documentation - -- [The structure of the validator](https://pb33f.io/libopenapi/validation/#the-structure-of-the-validator) - - [Validation errors](https://pb33f.io/libopenapi/validation/#validation-errors) - - [Schema errors](https://pb33f.io/libopenapi/validation/#schema-errors) - - [High-level validation](https://pb33f.io/libopenapi/validation/#high-level-validation) -- [Validating http.Request](https://pb33f.io/libopenapi/validation/#validating-httprequest) -- [Validating http.Request and http.Response](https://pb33f.io/libopenapi/validation/#validating-httprequest-and-httpresponse) -- [Validating just http.Response](https://pb33f.io/libopenapi/validation/#validating-just-httpresponse) -- [Validating HTTP Parameters](https://pb33f.io/libopenapi/validation/#validating-http-parameters) -- [Validating an OpenAPI document](https://pb33f.io/libopenapi/validation/#validating-an-openapi-document) -- [Validating Schemas](https://pb33f.io/libopenapi/validation/#validating-schemas) - -[libopenapi](https://github.com/pb33f/libopenapi) and [libopenapi-validator](https://github.com/pb33f/libopenapi-validator) are -products of Princess Beef Heavy Industries, LLC diff --git a/vendor/github.com/pb33f/libopenapi-validator/cache/cache.go b/vendor/github.com/pb33f/libopenapi-validator/cache/cache.go deleted file mode 100644 index 7cbdd49343c..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/cache/cache.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package cache - -import ( - "github.com/pb33f/libopenapi/datamodel/high/base" - "github.com/santhosh-tekuri/jsonschema/v6" - "go.yaml.in/yaml/v4" -) - -// SchemaCacheEntry holds a compiled schema and its intermediate representations. -// This is stored in the cache to avoid re-rendering and re-compiling schemas on each request. -type SchemaCacheEntry struct { - Schema *base.Schema - RenderedInline []byte - ReferenceSchema string // String version of RenderedInline - RenderedJSON []byte - CompiledSchema *jsonschema.Schema - RenderedNode *yaml.Node -} - -// SchemaCache defines the interface for schema caching implementations. -// The key is a uint64 hash of the schema (from schema.GoLow().Hash()). -type SchemaCache interface { - Load(key uint64) (*SchemaCacheEntry, bool) - Store(key uint64, value *SchemaCacheEntry) - Range(f func(key uint64, value *SchemaCacheEntry) bool) -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/cache/default_cache.go b/vendor/github.com/pb33f/libopenapi-validator/cache/default_cache.go deleted file mode 100644 index c27211c7e7f..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/cache/default_cache.go +++ /dev/null @@ -1,54 +0,0 @@ -package cache - -import "sync" - -// DefaultCache is the default cache implementation using sync.Map for thread-safe concurrent access. -type DefaultCache struct { - m *sync.Map -} - -var _ SchemaCache = &DefaultCache{} - -// NewDefaultCache creates a new DefaultCache with an initialized sync.Map. -func NewDefaultCache() *DefaultCache { - return &DefaultCache{m: &sync.Map{}} -} - -// Load retrieves a schema from the cache. -func (c *DefaultCache) Load(key uint64) (*SchemaCacheEntry, bool) { - if c == nil || c.m == nil { - return nil, false - } - val, ok := c.m.Load(key) - if !ok { - return nil, false - } - schemaCache, ok := val.(*SchemaCacheEntry) - return schemaCache, ok -} - -// Store saves a schema to the cache. -func (c *DefaultCache) Store(key uint64, value *SchemaCacheEntry) { - if c == nil || c.m == nil { - return - } - c.m.Store(key, value) -} - -// Range calls f for each entry in the cache (for testing/inspection). -func (c *DefaultCache) Range(f func(key uint64, value *SchemaCacheEntry) bool) { - if c == nil || c.m == nil { - return - } - c.m.Range(func(k, v interface{}) bool { - key, ok := k.(uint64) - if !ok { - return true - } - val, ok := v.(*SchemaCacheEntry) - if !ok { - return true - } - return f(key, val) - }) -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/config/config.go b/vendor/github.com/pb33f/libopenapi-validator/config/config.go deleted file mode 100644 index 2198f43bc91..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/config/config.go +++ /dev/null @@ -1,293 +0,0 @@ -package config - -import ( - "log/slog" - - "github.com/santhosh-tekuri/jsonschema/v6" - - "github.com/pb33f/libopenapi-validator/cache" - "github.com/pb33f/libopenapi-validator/radix" -) - -// RegexCache can be set to enable compiled regex caching. -// It can be just a sync.Map, or a custom implementation with possible cleanup. -// -// Be aware that the cache should be thread safe -type RegexCache interface { - Load(key any) (value any, ok bool) // Get a compiled regex from the cache - Store(key, value any) // Set a compiled regex to the cache -} - -// ValidationOptions A container for validation configuration. -// -// Generally fluent With... style functions are used to establish the desired behavior. -type ValidationOptions struct { - RegexEngine jsonschema.RegexpEngine - RegexCache RegexCache // Enable compiled regex caching - FormatAssertions bool - ContentAssertions bool - SecurityValidation bool - OpenAPIMode bool // Enable OpenAPI-specific vocabulary validation - AllowScalarCoercion bool // Enable string->boolean/number coercion - Formats map[string]func(v any) error - SchemaCache cache.SchemaCache // Optional cache for compiled schemas - PathTree radix.PathLookup // O(k) path lookup via radix tree (built automatically) - pathTreeDisabled bool // Internal: true if radix tree auto-build was disabled via DisablePathTree - Logger *slog.Logger // Logger for debug/error output (nil = silent) - AllowXMLBodyValidation bool // Allows to convert XML to JSON for validating a request/response body. - AllowURLEncodedBodyValidation bool // Allows to convert URL Encoded to JSON for validating a request/response body. - - // strict mode options - detect undeclared properties even when additionalProperties: true - StrictMode bool // Enable strict property validation - StrictIgnorePaths []string // Instance JSONPath patterns to exclude from strict checks - StrictIgnoredHeaders []string // Headers to always ignore in strict mode (nil = use defaults) - strictIgnoredHeadersMerge bool // Internal: true if merging with defaults -} - -// Option Enables an 'Options pattern' approach -type Option func(*ValidationOptions) - -// NewValidationOptions creates a new ValidationOptions instance with default values. -func NewValidationOptions(opts ...Option) *ValidationOptions { - // create the set of default values - o := &ValidationOptions{ - FormatAssertions: false, - ContentAssertions: false, - SecurityValidation: true, - OpenAPIMode: true, // Enable OpenAPI vocabulary by default - SchemaCache: cache.NewDefaultCache(), // Enable caching by default - } - - for _, opt := range opts { - if opt != nil { - opt(o) - } - } - return o -} - -// WithExistingOpts returns an Option that will copy the values from the supplied ValidationOptions instance -func WithExistingOpts(options *ValidationOptions) Option { - return func(o *ValidationOptions) { - if options != nil { - o.RegexEngine = options.RegexEngine - o.RegexCache = options.RegexCache - o.FormatAssertions = options.FormatAssertions - o.ContentAssertions = options.ContentAssertions - o.SecurityValidation = options.SecurityValidation - o.OpenAPIMode = options.OpenAPIMode - o.AllowScalarCoercion = options.AllowScalarCoercion - o.Formats = options.Formats - o.SchemaCache = options.SchemaCache - o.PathTree = options.PathTree - o.pathTreeDisabled = options.pathTreeDisabled - o.Logger = options.Logger - o.AllowXMLBodyValidation = options.AllowXMLBodyValidation - o.AllowURLEncodedBodyValidation = options.AllowURLEncodedBodyValidation - o.StrictMode = options.StrictMode - o.StrictIgnorePaths = options.StrictIgnorePaths - o.StrictIgnoredHeaders = options.StrictIgnoredHeaders - o.strictIgnoredHeadersMerge = options.strictIgnoredHeadersMerge - } - } -} - -// WithLogger sets the logger for validation debug/error output. -// If not set, logging is silent (nil logger is handled gracefully). -func WithLogger(logger *slog.Logger) Option { - return func(o *ValidationOptions) { - o.Logger = logger - } -} - -// WithRegexEngine Assigns a custom regular-expression engine to be used during validation. -func WithRegexEngine(engine jsonschema.RegexpEngine) Option { - return func(o *ValidationOptions) { - o.RegexEngine = engine - } -} - -// WithRegexCache assigns a cache for compiled regular expressions. -// A sync.Map should be sufficient for most use cases. It does not implement any cleanup -func WithRegexCache(regexCache RegexCache) Option { - return func(o *ValidationOptions) { - o.RegexCache = regexCache - } -} - -// WithFormatAssertions enables checks for 'format' assertions (such as date, date-time, uuid, etc) -func WithFormatAssertions() Option { - return func(o *ValidationOptions) { - o.FormatAssertions = true - } -} - -// WithContentAssertions enables checks for contentType, contentEncoding, etc -func WithContentAssertions() Option { - return func(o *ValidationOptions) { - o.ContentAssertions = true - } -} - -// WithoutSecurityValidation disables security validation for request validation -func WithoutSecurityValidation() Option { - return func(o *ValidationOptions) { - o.SecurityValidation = false - } -} - -// WithCustomFormat adds custom formats and their validators that checks for custom 'format' assertions -// When you add different validators with the same name, they will be overridden, -// and only the last registration will take effect. -func WithCustomFormat(name string, validator func(v any) error) Option { - return func(o *ValidationOptions) { - if o.Formats == nil { - o.Formats = make(map[string]func(v any) error) - } - - o.Formats[name] = validator - } -} - -// WithOpenAPIMode enables OpenAPI-specific keyword validation (default: true) -func WithOpenAPIMode() Option { - return func(o *ValidationOptions) { - o.OpenAPIMode = true - } -} - -// WithoutOpenAPIMode disables OpenAPI-specific keyword validation -func WithoutOpenAPIMode() Option { - return func(o *ValidationOptions) { - o.OpenAPIMode = false - } -} - -// WithScalarCoercion enables string to boolean/number coercion (Jackson-style) -func WithScalarCoercion() Option { - return func(o *ValidationOptions) { - o.AllowScalarCoercion = true - } -} - -// WithXmlBodyValidation enables converting an XML body to a JSON when validating the schema from a request and response body -// The default option is set to false -func WithXmlBodyValidation() Option { - return func(o *ValidationOptions) { - o.AllowXMLBodyValidation = true - } -} - -// WithURLEncodedBodyValidation enables converting an URL Encoded body to a JSON when validating the schema from a request and response body -// The default option is set to false -func WithURLEncodedBodyValidation() Option { - return func(o *ValidationOptions) { - o.AllowURLEncodedBodyValidation = true - } -} - -// WithSchemaCache sets a custom cache implementation or disables caching if nil. -// Pass nil to disable schema caching and skip cache warming during validator initialization. -// The default cache is a thread-safe sync.Map wrapper. -func WithSchemaCache(schemaCache cache.SchemaCache) Option { - return func(o *ValidationOptions) { - o.SchemaCache = schemaCache - } -} - -// WithPathTree sets a custom radix tree for path matching. -// The default is built automatically from the OpenAPI specification. -func WithPathTree(pathTree radix.PathLookup) Option { - return func(o *ValidationOptions) { - o.PathTree = pathTree - } -} - -// DisablePathTree prevents automatic radix tree construction. -// Use this to fall back to regex-based path matching only. -func DisablePathTree() Option { - return func(o *ValidationOptions) { - o.pathTreeDisabled = true - } -} - -// WithStrictMode enables strict property validation. -// In strict mode, undeclared properties are reported as errors even when -// additionalProperties: true would normally allow them. -// -// This is useful for API governance scenarios where you want to ensure -// clients only send properties that are explicitly documented in the -// OpenAPI specification. -func WithStrictMode() Option { - return func(o *ValidationOptions) { - o.StrictMode = true - } -} - -// WithStrictIgnorePaths sets JSONPath patterns for paths to exclude from strict validation. -// Patterns use glob syntax: -// - * matches a single path segment -// - ** matches any depth (zero or more segments) -// - [*] matches any array index -// - \* escapes a literal asterisk -// -// Examples: -// - "$.body.metadata.*" - any property under metadata -// - "$.body.**.x-*" - any x-* property at any depth -// - "$.headers.X-*" - any header starting with X- -func WithStrictIgnorePaths(paths ...string) Option { - return func(o *ValidationOptions) { - o.StrictIgnorePaths = paths - } -} - -// WithStrictIgnoredHeaders replaces the default ignored headers list entirely. -// Use this to fully control which headers are ignored in strict mode. -// For the default list, see the strict package's DefaultIgnoredHeaders. -func WithStrictIgnoredHeaders(headers ...string) Option { - return func(o *ValidationOptions) { - o.StrictIgnoredHeaders = headers - o.strictIgnoredHeadersMerge = false - } -} - -// WithStrictIgnoredHeadersExtra adds headers to the default ignored list. -// Unlike WithStrictIgnoredHeaders, this merges with the defaults rather -// than replacing them. -func WithStrictIgnoredHeadersExtra(headers ...string) Option { - return func(o *ValidationOptions) { - o.StrictIgnoredHeaders = headers - o.strictIgnoredHeadersMerge = true - } -} - -// defaultIgnoredHeaders contains standard HTTP headers ignored by default. -// This is the fallback list used when no custom headers are configured. -var defaultIgnoredHeaders = []string{ - "content-type", "content-length", "accept", "authorization", - "user-agent", "host", "connection", "accept-encoding", - "accept-language", "cache-control", "pragma", "origin", - "referer", "cookie", "date", "etag", "expires", - "if-match", "if-none-match", "if-modified-since", - "last-modified", "transfer-encoding", "vary", "x-forwarded-for", - "x-forwarded-proto", "x-real-ip", "x-request-id", - "request-start-time", // Added by some API clients for timing -} - -// IsPathTreeDisabled returns true if radix tree auto-build was disabled via DisablePathTree. -func (o *ValidationOptions) IsPathTreeDisabled() bool { - return o.pathTreeDisabled -} - -// GetEffectiveStrictIgnoredHeaders returns the list of headers to ignore -// based on configuration. Returns defaults if not configured, merged list -// if extra headers were added, or replaced list if headers were fully replaced. -func (o *ValidationOptions) GetEffectiveStrictIgnoredHeaders() []string { - if o.StrictIgnoredHeaders == nil { - return defaultIgnoredHeaders - } - if o.strictIgnoredHeadersMerge { - return append(defaultIgnoredHeaders, o.StrictIgnoredHeaders...) - } - return o.StrictIgnoredHeaders -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/errors/error_utilities.go b/vendor/github.com/pb33f/libopenapi-validator/errors/error_utilities.go deleted file mode 100644 index 787ec1b42de..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/errors/error_utilities.go +++ /dev/null @@ -1,16 +0,0 @@ -package errors - -import ( - "net/http" -) - -// PopulateValidationErrors mutates the provided validation errors with additional useful error information, that is -// not necessarily available when the ValidationError was created and are standard for all errors. -// Specifically, the RequestPath, SpecPath and RequestMethod are populated. -func PopulateValidationErrors(validationErrors []*ValidationError, request *http.Request, path string) { - for _, validationError := range validationErrors { - validationError.SpecPath = path - validationError.RequestMethod = request.Method - validationError.RequestPath = request.URL.Path - } -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/errors/package.go b/vendor/github.com/pb33f/libopenapi-validator/errors/package.go deleted file mode 100644 index 19acc14e520..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/errors/package.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package errors contains all the error types used by the validator -package errors diff --git a/vendor/github.com/pb33f/libopenapi-validator/errors/parameter_errors.go b/vendor/github.com/pb33f/libopenapi-validator/errors/parameter_errors.go deleted file mode 100644 index 2100ba00860..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/errors/parameter_errors.go +++ /dev/null @@ -1,1046 +0,0 @@ -package errors - -import ( - "fmt" - "net/url" - "strings" - - "github.com/pb33f/libopenapi/datamodel/high/base" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/helpers" -) - -func IncorrectFormEncoding(param *v3.Parameter, qp *helpers.QueryParam, i int) *ValidationError { - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query parameter '%s' is not exploded correctly", param.Name), - Reason: fmt.Sprintf("The query parameter '%s' has a default or 'form' encoding defined, "+ - "however the value '%s' is encoded as an object or an array using commas. The contract defines "+ - "the explode value to set to 'true'", param.Name, qp.Values[i]), - SpecLine: param.GoLow().Explode.ValueNode.Line, - SpecCol: param.GoLow().Explode.ValueNode.Column, - ParameterName: param.Name, - Context: param, - HowToFix: fmt.Sprintf(HowToFixParamInvalidFormEncode, - helpers.CollapseCSVIntoFormStyle(param.Name, qp.Values[i])), - } -} - -func IncorrectSpaceDelimiting(param *v3.Parameter, qp *helpers.QueryParam) *ValidationError { - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query parameter '%s' delimited incorrectly", param.Name), - Reason: fmt.Sprintf("The query parameter '%s' has 'spaceDelimited' style defined, "+ - "and explode is defined as false. There are multiple values (%d) supplied, instead of a single"+ - " space delimited value", param.Name, len(qp.Values)), - SpecLine: param.GoLow().Style.ValueNode.Line, - SpecCol: param.GoLow().Style.ValueNode.Column, - ParameterName: param.Name, - Context: param, - HowToFix: fmt.Sprintf(HowToFixParamInvalidSpaceDelimitedObjectExplode, - helpers.CollapseCSVIntoSpaceDelimitedStyle(param.Name, qp.Values)), - } -} - -func IncorrectPipeDelimiting(param *v3.Parameter, qp *helpers.QueryParam) *ValidationError { - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query parameter '%s' delimited incorrectly", param.Name), - Reason: fmt.Sprintf("The query parameter '%s' has 'pipeDelimited' style defined, "+ - "and explode is defined as false. There are multiple values (%d) supplied, instead of a single"+ - " space delimited value", param.Name, len(qp.Values)), - SpecLine: param.GoLow().Style.ValueNode.Line, - SpecCol: param.GoLow().Style.ValueNode.Column, - ParameterName: param.Name, - Context: param, - HowToFix: fmt.Sprintf(HowToFixParamInvalidPipeDelimitedObjectExplode, - helpers.CollapseCSVIntoPipeDelimitedStyle(param.Name, qp.Values)), - } -} - -func InvalidDeepObject(param *v3.Parameter, qp *helpers.QueryParam) *ValidationError { - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query parameter '%s' is not a valid deepObject", param.Name), - Reason: fmt.Sprintf("The query parameter '%s' has the 'deepObject' style defined, "+ - "There are multiple values (%d) supplied, instead of a single "+ - "value", param.Name, len(qp.Values)), - SpecLine: param.GoLow().Style.ValueNode.Line, - SpecCol: param.GoLow().Style.ValueNode.Column, - ParameterName: param.Name, - Context: param, - HowToFix: fmt.Sprintf(HowToFixParamInvalidDeepObjectMultipleValues, - helpers.CollapseCSVIntoPipeDelimitedStyle(param.Name, qp.Values)), - } -} - -func QueryParameterMissing(param *v3.Parameter, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "required") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query parameter '%s' is missing", param.Name), - Reason: fmt.Sprintf("The query parameter '%s' is defined as being required, "+ - "however it's missing from the requests", param.Name), - SpecLine: param.GoLow().Required.KeyNode.Line, - SpecCol: param.GoLow().Required.KeyNode.Column, - ParameterName: param.Name, - HowToFix: HowToFixMissingValue, - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Required query parameter '%s' is missing", param.Name), - FieldName: param.Name, - FieldPath: "", - InstancePath: []string{}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func HeaderParameterMissing(param *v3.Parameter, pathTemplate string, operation string, renderedSchema string) *ValidationError { - escapedPath := strings.ReplaceAll(pathTemplate, "~", "~0") - escapedPath = strings.ReplaceAll(escapedPath, "/", "~1") - escapedPath = strings.TrimPrefix(escapedPath, "~1") - keywordLocation := fmt.Sprintf("/paths/%s/%s/parameters/%s/required", escapedPath, strings.ToLower(operation), param.Name) - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationHeader, - Message: fmt.Sprintf("Header parameter '%s' is missing", param.Name), - Reason: fmt.Sprintf("The header parameter '%s' is defined as being required, "+ - "however it's missing from the requests", param.Name), - SpecLine: param.GoLow().Required.KeyNode.Line, - SpecCol: param.GoLow().Required.KeyNode.Column, - ParameterName: param.Name, - HowToFix: HowToFixMissingValue, - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Required header parameter '%s' is missing", param.Name), - FieldName: param.Name, - FieldPath: "", - InstancePath: []string{}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func CookieParameterMissing(param *v3.Parameter, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "required") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationCookie, - Message: fmt.Sprintf("Cookie parameter '%s' is missing", param.Name), - Reason: fmt.Sprintf("The cookie parameter '%s' is defined as being required, "+ - "however it's missing from the request", param.Name), - SpecLine: param.GoLow().Required.KeyNode.Line, - SpecCol: param.GoLow().Required.KeyNode.Column, - ParameterName: param.Name, - HowToFix: HowToFixMissingValue, - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Required cookie parameter '%s' is missing", param.Name), - FieldName: param.Name, - FieldPath: "", - InstancePath: []string{}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func HeaderParameterCannotBeDecoded(param *v3.Parameter, val string, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationHeader, - Message: fmt.Sprintf("Header parameter '%s' cannot be decoded", param.Name), - Reason: fmt.Sprintf("The header parameter '%s' cannot be "+ - "extracted into an object, '%s' is malformed", param.Name, val), - SpecLine: param.GoLow().Schema.Value.Schema().Type.KeyNode.Line, - SpecCol: param.GoLow().Schema.Value.Schema().Type.KeyNode.Line, - ParameterName: param.Name, - HowToFix: HowToFixInvalidEncoding, - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Header value '%s' cannot be decoded as object (malformed encoding)", val), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectHeaderParamEnum(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - var enums []string - for i := range sch.Enum { - enums = append(enums, fmt.Sprint(sch.Enum[i].Value)) - } - validEnums := strings.Join(enums, ", ") - - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "enum") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationHeader, - Message: fmt.Sprintf("Header parameter '%s' does not match allowed values", param.Name), - Reason: fmt.Sprintf("The header parameter '%s' has pre-defined "+ - "values set via an enum. The value '%s' is not one of those values.", param.Name, ef), - SpecLine: param.GoLow().Schema.Value.Schema().Enum.KeyNode.Line, - SpecCol: param.GoLow().Schema.Value.Schema().Enum.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidEnum, ef, validEnums), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' does not match any enum values: [%s]", ef, validEnums), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectQueryParamArrayBoolean( - param *v3.Parameter, item string, sch *base.Schema, itemsSchema *base.Schema, pathTemplate string, operation string, renderedItemsSchema string, -) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "items/type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query array parameter '%s' is not a valid boolean", param.Name), - Reason: fmt.Sprintf("The query parameter (which is an array) '%s' is defined as being a boolean, "+ - "however the value '%s' is not a valid true/false value", param.Name, item), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: itemsSchema, - HowToFix: fmt.Sprintf(HowToFixParamInvalidBoolean, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array item '%s' is not a valid boolean", item), - FieldName: param.Name, - InstancePath: []string{param.Name, "[item]"}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedItemsSchema, - }}, - } -} - -func IncorrectParamArrayMaxNumItems(param *v3.Parameter, sch *base.Schema, expected, actual int64, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "maxItems") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query array parameter '%s' has too many items", param.Name), - Reason: fmt.Sprintf("The query parameter (which is an array) '%s' has a maximum item length of %d, "+ - "however the request provided %d items", param.Name, expected, actual), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixInvalidMaxItems, expected), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array has %d items, but maximum is %d", actual, expected), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectParamArrayMinNumItems(param *v3.Parameter, sch *base.Schema, expected, actual int64, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "minItems") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query array parameter '%s' does not have enough items", param.Name), - Reason: fmt.Sprintf("The query parameter (which is an array) '%s' has a minimum items length of %d, "+ - "however the request provided %d items", param.Name, expected, actual), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixInvalidMinItems, expected), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array has %d items, but minimum is %d", actual, expected), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectParamArrayUniqueItems(param *v3.Parameter, sch *base.Schema, duplicates string, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "uniqueItems") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query array parameter '%s' contains non-unique items", param.Name), - Reason: fmt.Sprintf("The query parameter (which is an array) '%s' contains the following duplicates: '%s'", param.Name, duplicates), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: "Ensure the array values are all unique", - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array contains duplicate values: %s", duplicates), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectCookieParamArrayBoolean( - param *v3.Parameter, item string, sch *base.Schema, itemsSchema *base.Schema, pathTemplate string, operation string, renderedItemsSchema string, -) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "items/type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationCookie, - Message: fmt.Sprintf("Cookie array parameter '%s' is not a valid boolean", param.Name), - Reason: fmt.Sprintf("The cookie parameter (which is an array) '%s' is defined as being a boolean, "+ - "however the value '%s' is not a valid true/false value", param.Name, item), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: itemsSchema, - HowToFix: fmt.Sprintf(HowToFixParamInvalidBoolean, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array item '%s' is not a valid boolean", item), - FieldName: param.Name, - InstancePath: []string{param.Name, "[item]"}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedItemsSchema, - }}, - } -} - -func IncorrectQueryParamArrayInteger( - param *v3.Parameter, item string, sch *base.Schema, itemsSchema *base.Schema, pathTemplate string, operation string, renderedItemsSchema string, -) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "items/type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query array parameter '%s' is not a valid integer", param.Name), - Reason: fmt.Sprintf("The query parameter (which is an array) '%s' is defined as being an integer, "+ - "however the value '%s' is not a valid integer", param.Name, item), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: itemsSchema, - HowToFix: fmt.Sprintf(HowToFixParamInvalidInteger, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array item '%s' is not a valid integer", item), - FieldName: param.Name, - InstancePath: []string{param.Name, "[item]"}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedItemsSchema, - }}, - } -} - -func IncorrectQueryParamArrayNumber( - param *v3.Parameter, item string, sch *base.Schema, itemsSchema *base.Schema, pathTemplate string, operation string, renderedItemsSchema string, -) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "items/type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query array parameter '%s' is not a valid number", param.Name), - Reason: fmt.Sprintf("The query parameter (which is an array) '%s' is defined as being a number, "+ - "however the value '%s' is not a valid number", param.Name, item), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: itemsSchema, - HowToFix: fmt.Sprintf(HowToFixParamInvalidNumber, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array item '%s' is not a valid number", item), - FieldName: param.Name, - InstancePath: []string{param.Name, "[item]"}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedItemsSchema, - }}, - } -} - -func IncorrectCookieParamArrayNumber( - param *v3.Parameter, item string, sch *base.Schema, itemsSchema *base.Schema, pathTemplate string, operation string, renderedItemsSchema string, -) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "items/type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationCookie, - Message: fmt.Sprintf("Cookie array parameter '%s' is not a valid number", param.Name), - Reason: fmt.Sprintf("The cookie parameter (which is an array) '%s' is defined as being a number, "+ - "however the value '%s' is not a valid number", param.Name, item), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: itemsSchema, - HowToFix: fmt.Sprintf(HowToFixParamInvalidNumber, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array item '%s' is not a valid number", item), - FieldName: param.Name, - InstancePath: []string{param.Name, "[item]"}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedItemsSchema, - }}, - } -} - -func IncorrectParamEncodingJSON(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - escapedPath := strings.ReplaceAll(pathTemplate, "~", "~0") - escapedPath = strings.ReplaceAll(escapedPath, "/", "~1") - escapedPath = strings.TrimPrefix(escapedPath, "~1") - keywordLocation := fmt.Sprintf("/paths/%s/%s/parameters/%s/content/application~1json/schema", escapedPath, strings.ToLower(operation), param.Name) - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query parameter '%s' is not valid JSON", param.Name), - Reason: fmt.Sprintf("The query parameter '%s' is defined as being a JSON object, "+ - "however the value '%s' is not valid JSON", param.Name, ef), - SpecLine: param.GoLow().FindContent(helpers.JSONContentType).ValueNode.Line, - SpecCol: param.GoLow().FindContent(helpers.JSONContentType).ValueNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: HowToFixInvalidJSON, - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not valid JSON", ef), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectQueryParamBool(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query parameter '%s' is not a valid boolean", param.Name), - Reason: fmt.Sprintf("The query parameter '%s' is defined as being a boolean, "+ - "however the value '%s' is not a valid boolean", param.Name, ef), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidBoolean, ef), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not a valid boolean", ef), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func InvalidQueryParamInteger(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query parameter '%s' is not a valid integer", param.Name), - Reason: fmt.Sprintf("The query parameter '%s' is defined as being an integer, "+ - "however the value '%s' is not a valid integer", param.Name, ef), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidInteger, ef), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not a valid integer", ef), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func InvalidQueryParamNumber(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query parameter '%s' is not a valid number", param.Name), - Reason: fmt.Sprintf("The query parameter '%s' is defined as being a number, "+ - "however the value '%s' is not a valid number", param.Name, ef), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidNumber, ef), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not a valid number", ef), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectQueryParamEnum(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - var enums []string - for i := range sch.Enum { - enums = append(enums, fmt.Sprint(sch.Enum[i].Value)) - } - validEnums := strings.Join(enums, ", ") - - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "enum") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query parameter '%s' does not match allowed values", param.Name), - Reason: fmt.Sprintf("The query parameter '%s' has pre-defined "+ - "values set via an enum. The value '%s' is not one of those values.", param.Name, ef), - SpecLine: param.GoLow().Schema.Value.Schema().Enum.KeyNode.Line, - SpecCol: param.GoLow().Schema.Value.Schema().Enum.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidEnum, ef, validEnums), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' does not match any enum values: [%s]", ef, validEnums), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectQueryParamEnumArray(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedItemsSchema string) *ValidationError { - var enums []string - // look at that model fly! - for i := range param.GoLow().Schema.Value.Schema().Items.Value.A.Schema().Enum.Value { - enums = append(enums, - fmt.Sprint(param.GoLow().Schema.Value.Schema().Items.Value.A.Schema().Enum.Value[i].Value.Value)) - } - validEnums := strings.Join(enums, ", ") - - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "items/enum") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query array parameter '%s' does not match allowed values", param.Name), - Reason: fmt.Sprintf("The query array parameter '%s' has pre-defined "+ - "values set via an enum. The value '%s' is not one of those values.", param.Name, ef), - SpecLine: param.GoLow().Schema.Value.Schema().Items.Value.A.Schema().Enum.KeyNode.Line, - SpecCol: param.GoLow().Schema.Value.Schema().Items.Value.A.Schema().Enum.KeyNode.Line, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidEnum, ef, validEnums), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array item '%s' does not match any enum values: [%s]", ef, validEnums), - FieldName: param.Name, - InstancePath: []string{param.Name, "[item]"}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedItemsSchema, - }}, - } -} - -func IncorrectReservedValues(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - escapedPath := strings.ReplaceAll(pathTemplate, "~", "~0") - escapedPath = strings.ReplaceAll(escapedPath, "/", "~1") - escapedPath = strings.TrimPrefix(escapedPath, "~1") - keywordLocation := fmt.Sprintf("/paths/%s/%s/parameters/%s/allowReserved", escapedPath, strings.ToLower(operation), param.Name) - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationQuery, - Message: fmt.Sprintf("Query parameter '%s' value contains reserved values", param.Name), - Reason: fmt.Sprintf("The query parameter '%s' has 'allowReserved' set to false, "+ - "however the value '%s' contains one of the following characters: :/?#[]@!$&'()*+,;=", param.Name, ef), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixReservedValues, url.QueryEscape(ef)), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' contains reserved characters but allowReserved is false", ef), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func InvalidHeaderParamInteger(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationHeader, - Message: fmt.Sprintf("Header parameter '%s' is not a valid integer", param.Name), - Reason: fmt.Sprintf("The header parameter '%s' is defined as being an integer, "+ - "however the value '%s' is not a valid integer", param.Name, ef), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidInteger, ef), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not a valid integer", ef), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func InvalidHeaderParamNumber(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationHeader, - Message: fmt.Sprintf("Header parameter '%s' is not a valid number", param.Name), - Reason: fmt.Sprintf("The header parameter '%s' is defined as being a number, "+ - "however the value '%s' is not a valid number", param.Name, ef), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidNumber, ef), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not a valid number", ef), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func InvalidCookieParamInteger(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationCookie, - Message: fmt.Sprintf("Cookie parameter '%s' is not a valid integer", param.Name), - Reason: fmt.Sprintf("The cookie parameter '%s' is defined as being an integer, "+ - "however the value '%s' is not a valid integer", param.Name, ef), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidInteger, ef), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not a valid integer", ef), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func InvalidCookieParamNumber(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationCookie, - Message: fmt.Sprintf("Cookie parameter '%s' is not a valid number", param.Name), - Reason: fmt.Sprintf("The cookie parameter '%s' is defined as being a number, "+ - "however the value '%s' is not a valid number", param.Name, ef), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidNumber, ef), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not a valid number", ef), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectHeaderParamBool(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationHeader, - Message: fmt.Sprintf("Header parameter '%s' is not a valid boolean", param.Name), - Reason: fmt.Sprintf("The header parameter '%s' is defined as being a boolean, "+ - "however the value '%s' is not a valid boolean", param.Name, ef), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidBoolean, ef), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not a valid boolean", ef), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectCookieParamBool(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationCookie, - Message: fmt.Sprintf("Cookie parameter '%s' is not a valid boolean", param.Name), - Reason: fmt.Sprintf("The cookie parameter '%s' is defined as being a boolean, "+ - "however the value '%s' is not a valid boolean", param.Name, ef), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidBoolean, ef), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not a valid boolean", ef), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectCookieParamEnum(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, operation string, renderedSchema string) *ValidationError { - var enums []string - for i := range sch.Enum { - enums = append(enums, fmt.Sprint(sch.Enum[i].Value)) - } - validEnums := strings.Join(enums, ", ") - - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "enum") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationCookie, - Message: fmt.Sprintf("Cookie parameter '%s' does not match allowed values", param.Name), - Reason: fmt.Sprintf("The cookie parameter '%s' has pre-defined "+ - "values set via an enum. The value '%s' is not one of those values.", param.Name, ef), - SpecLine: param.GoLow().Schema.Value.Schema().Enum.KeyNode.Line, - SpecCol: param.GoLow().Schema.Value.Schema().Enum.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidEnum, ef, validEnums), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' does not match any enum values: [%s]", ef, validEnums), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectHeaderParamArrayBoolean( - param *v3.Parameter, item string, sch *base.Schema, itemsSchema *base.Schema, pathTemplate string, operation string, renderedItemsSchema string, -) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "items/type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationHeader, - Message: fmt.Sprintf("Header array parameter '%s' is not a valid boolean", param.Name), - Reason: fmt.Sprintf("The header parameter (which is an array) '%s' is defined as being a boolean, "+ - "however the value '%s' is not a valid true/false value", param.Name, item), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: itemsSchema, - HowToFix: fmt.Sprintf(HowToFixParamInvalidBoolean, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array item '%s' is not a valid boolean", item), - FieldName: param.Name, - InstancePath: []string{param.Name, "[item]"}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedItemsSchema, - }}, - } -} - -func IncorrectHeaderParamArrayNumber( - param *v3.Parameter, item string, sch *base.Schema, itemsSchema *base.Schema, pathTemplate string, operation string, renderedItemsSchema string, -) *ValidationError { - keywordLocation := helpers.ConstructParameterJSONPointer(pathTemplate, operation, param.Name, "items/type") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationHeader, - Message: fmt.Sprintf("Header array parameter '%s' is not a valid number", param.Name), - Reason: fmt.Sprintf("The header parameter (which is an array) '%s' is defined as being a number, "+ - "however the value '%s' is not a valid number", param.Name, item), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: itemsSchema, - HowToFix: fmt.Sprintf(HowToFixParamInvalidNumber, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array item '%s' is not a valid number", item), - FieldName: param.Name, - InstancePath: []string{param.Name, "[item]"}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedItemsSchema, - }}, - } -} - -func IncorrectPathParamBool(param *v3.Parameter, item string, sch *base.Schema, pathTemplate string, renderedSchema string) *ValidationError { - escapedPath := strings.ReplaceAll(pathTemplate, "~", "~0") - escapedPath = strings.ReplaceAll(escapedPath, "/", "~1") - keywordLocation := fmt.Sprintf("/paths/%s/parameters/%s/schema/type", escapedPath, param.Name) - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationPath, - Message: fmt.Sprintf("Path parameter '%s' is not a valid boolean", param.Name), - Reason: fmt.Sprintf("The path parameter '%s' is defined as being a boolean, "+ - "however the value '%s' is not a valid boolean", param.Name, item), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidBoolean, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not a valid boolean", item), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectPathParamEnum(param *v3.Parameter, ef string, sch *base.Schema, pathTemplate string, renderedSchema string) *ValidationError { - escapedPath := strings.ReplaceAll(pathTemplate, "~", "~0") - escapedPath = strings.ReplaceAll(escapedPath, "/", "~1") - keywordLocation := fmt.Sprintf("/paths/%s/parameters/%s/schema/enum", escapedPath, param.Name) - - var enums []string - for i := range sch.Enum { - enums = append(enums, fmt.Sprint(sch.Enum[i].Value)) - } - validEnums := strings.Join(enums, ", ") - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationPath, - ParameterName: param.Name, - Message: fmt.Sprintf("Path parameter '%s' does not match allowed values", param.Name), - Reason: fmt.Sprintf("The path parameter '%s' has pre-defined "+ - "values set via an enum. The value '%s' is not one of those values.", param.Name, ef), - SpecLine: param.GoLow().Schema.Value.Schema().Enum.KeyNode.Line, - SpecCol: param.GoLow().Schema.Value.Schema().Enum.KeyNode.Column, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidEnum, ef, validEnums), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' does not match any enum values: [%s]", ef, validEnums), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectPathParamInteger(param *v3.Parameter, item string, sch *base.Schema, pathTemplate string, renderedSchema string) *ValidationError { - escapedPath := strings.ReplaceAll(pathTemplate, "~", "~0") - escapedPath = strings.ReplaceAll(escapedPath, "/", "~1") - keywordLocation := fmt.Sprintf("/paths/%s/parameters/%s/schema/type", escapedPath, param.Name) - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationPath, - Message: fmt.Sprintf("Path parameter '%s' is not a valid integer", param.Name), - Reason: fmt.Sprintf("The path parameter '%s' is defined as being an integer, "+ - "however the value '%s' is not a valid integer", param.Name, item), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidInteger, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not a valid integer", item), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectPathParamNumber(param *v3.Parameter, item string, sch *base.Schema, pathTemplate string, renderedSchema string) *ValidationError { - escapedPath := strings.ReplaceAll(pathTemplate, "~", "~0") - escapedPath = strings.ReplaceAll(escapedPath, "/", "~1") - keywordLocation := fmt.Sprintf("/paths/%s/parameters/%s/schema/type", escapedPath, param.Name) - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationPath, - Message: fmt.Sprintf("Path parameter '%s' is not a valid number", param.Name), - Reason: fmt.Sprintf("The path parameter '%s' is defined as being a number, "+ - "however the value '%s' is not a valid number", param.Name, item), - SpecLine: param.GoLow().Schema.KeyNode.Line, - SpecCol: param.GoLow().Schema.KeyNode.Column, - ParameterName: param.Name, - Context: sch, - HowToFix: fmt.Sprintf(HowToFixParamInvalidNumber, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Value '%s' is not a valid number", item), - FieldName: param.Name, - InstancePath: []string{param.Name}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectPathParamArrayNumber( - param *v3.Parameter, item string, sch *base.Schema, itemsSchema *base.Schema, pathTemplate string, renderedSchema string, -) *ValidationError { - escapedPath := strings.ReplaceAll(pathTemplate, "~", "~0") - escapedPath = strings.ReplaceAll(escapedPath, "/", "~1") - keywordLocation := fmt.Sprintf("/paths/%s/parameters/%s/schema/items/type", escapedPath, param.Name) - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationPath, - Message: fmt.Sprintf("Path array parameter '%s' is not a valid number", param.Name), - Reason: fmt.Sprintf("The path parameter (which is an array) '%s' is defined as being a number, "+ - "however the value '%s' is not a valid number", param.Name, item), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: itemsSchema, - HowToFix: fmt.Sprintf(HowToFixParamInvalidNumber, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array item '%s' is not a valid number", item), - FieldName: param.Name, - InstancePath: []string{param.Name, "[item]"}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectPathParamArrayInteger( - param *v3.Parameter, item string, sch *base.Schema, itemsSchema *base.Schema, pathTemplate string, renderedSchema string, -) *ValidationError { - escapedPath := strings.ReplaceAll(pathTemplate, "~", "~0") - escapedPath = strings.ReplaceAll(escapedPath, "/", "~1") - keywordLocation := fmt.Sprintf("/paths/%s/parameters/%s/schema/items/type", escapedPath, param.Name) - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationPath, - Message: fmt.Sprintf("Path array parameter '%s' is not a valid integer", param.Name), - Reason: fmt.Sprintf("The path parameter (which is an array) '%s' is defined as being an integer, "+ - "however the value '%s' is not a valid integer", param.Name, item), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: itemsSchema, - HowToFix: fmt.Sprintf(HowToFixParamInvalidNumber, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array item '%s' is not a valid integer", item), - FieldName: param.Name, - InstancePath: []string{param.Name, "[item]"}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func IncorrectPathParamArrayBoolean( - param *v3.Parameter, item string, sch *base.Schema, itemsSchema *base.Schema, pathTemplate string, renderedSchema string, -) *ValidationError { - escapedPath := strings.ReplaceAll(pathTemplate, "~", "~0") - escapedPath = strings.ReplaceAll(escapedPath, "/", "~1") - keywordLocation := fmt.Sprintf("/paths/%s/parameters/%s/schema/items/type", escapedPath, param.Name) - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationPath, - Message: fmt.Sprintf("Path array parameter '%s' is not a valid boolean", param.Name), - Reason: fmt.Sprintf("The path parameter (which is an array) '%s' is defined as being a boolean, "+ - "however the value '%s' is not a valid boolean", param.Name, item), - SpecLine: sch.Items.A.GoLow().Schema().Type.KeyNode.Line, - SpecCol: sch.Items.A.GoLow().Schema().Type.KeyNode.Column, - ParameterName: param.Name, - Context: itemsSchema, - HowToFix: fmt.Sprintf(HowToFixParamInvalidBoolean, item), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Array item '%s' is not a valid boolean", item), - FieldName: param.Name, - InstancePath: []string{param.Name, "[item]"}, - KeywordLocation: keywordLocation, - ReferenceSchema: renderedSchema, - }}, - } -} - -func PathParameterMissing(param *v3.Parameter, pathTemplate string, actualPath string) *ValidationError { - actualSegments := strings.Split(strings.Trim(actualPath, "/"), "/") - - encodedPath := strings.ReplaceAll(pathTemplate, "~", "~0") - encodedPath = strings.ReplaceAll(encodedPath, "/", "~1") - encodedPath = strings.TrimPrefix(encodedPath, "~1") - keywordLoc := fmt.Sprintf("/paths/%s/parameters/%s/required", encodedPath, param.Name) - - return &ValidationError{ - ValidationType: helpers.ParameterValidation, - ValidationSubType: helpers.ParameterValidationPath, - Message: fmt.Sprintf("Path parameter '%s' is missing", param.Name), - Reason: fmt.Sprintf("The path parameter '%s' is defined as being required, "+ - "however it's missing from the requests", param.Name), - SpecLine: param.GoLow().Required.KeyNode.Line, - SpecCol: param.GoLow().Required.KeyNode.Column, - ParameterName: param.Name, - HowToFix: HowToFixMissingValue, - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: fmt.Sprintf("Required path parameter '%s' is missing from path '%s'", param.Name, actualPath), - FieldName: param.Name, - FieldPath: "", - InstancePath: actualSegments, - KeywordLocation: keywordLoc, - }}, - } -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/errors/parameters_howtofix.go b/vendor/github.com/pb33f/libopenapi-validator/errors/parameters_howtofix.go deleted file mode 100644 index b884700f0bb..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/errors/parameters_howtofix.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package errors - -const ( - HowToFixReservedValues string = "parameter values need to URL Encoded to ensure reserved " + - "values are correctly encoded, for example: '%s'" - HowToFixParamInvalidInteger string = "Convert the value '%s' into an integer" - HowToFixParamInvalidNumber string = "Convert the value '%s' into a number" - HowToFixParamInvalidString string = "Convert the value '%s' into a string (cannot start with a number, or be a floating point)" - HowToFixParamInvalidBoolean string = "Convert the value '%s' into a true/false value" - HowToFixParamInvalidEnum string = "Instead of '%s', use one of the allowed values: '%s'" - HowToFixParamInvalidFormEncode string = "Use a form style encoding for parameter values, for example: '%s'" - HowToFixInvalidXml string = "Ensure xml is well-formed and matches schema structure" - HowToFixXmlPrefix string = "Make sure to prepend the correct prefix '%s' to the declared fields" - HowToFixXmlNamespace string = "Make sure to declare the 'xmlns:%s' with the correct namespace URI" - HowToFixFormDataReservedCharacters string = "Make sure to correcly encode specials characters to percent encoding, or set allowReserved to true" - HowToFixInvalidSchema string = "Ensure that the object being submitted, matches the schema correctly" - HowToFixInvalidTypeEncoding string = "Ensure that the object being submitted matches the property encoding Content-Type" - HowToFixParamInvalidSpaceDelimitedObjectExplode string = "When using 'explode' with space delimited parameters, " + - "they should be separated by spaces. For example: '%s'" - HowToFixParamInvalidPipeDelimitedObjectExplode string = "When using 'explode' with pipe delimited parameters, " + - "they should be separated by pipes '|'. For example: '%s'" - HowToFixParamInvalidDeepObjectMultipleValues string = "There can only be a single value per property name, " + - "deepObject parameters should contain the property key in square brackets next to the parameter name. For example: '%s'" - HowToFixInvalidJSON string = "The JSON submitted is invalid, please check the syntax" - HowToFixInvalidUrlEncoded string = "Ensure URL Encoded submitted is well-formed and matches schema structure" - HowToFixDecodingError string = "The object can't be decoded, so make sure it's being encoded correctly according to the spec." - HowToFixInvalidContentType string = "The content type is invalid, Use one of the %d supported types for this operation: %s" - HowToFixInvalidResponseCode string = "The service is responding with a code that is not defined in the spec, fix the service or add the code to the specification" - HowToFixInvalidEncoding string = "Ensure the correct encoding has been used on the object" - HowToFixMissingValue string = "Ensure the value has been set" - HowToFixPath string = "Check the path is correct, and check that the correct HTTP method has been used (e.g. GET, POST, PUT, DELETE)" - HowToFixPathMethod string = "Add the missing operation to the contract for the path" - HowToFixInvalidMaxItems string = "Reduce the number of items in the array to %d or less" - HowToFixInvalidMinItems string = "Increase the number of items in the array to %d or more" - HowToFixMissingHeader string = "Make sure the service responding sets the required headers with this response code" - HowToFixInvalidRenderedSchema string = "Check the request schema for circular references or invalid structures" - HowToFixInvalidJsonSchema string = "Check the request schema for invalid JSON Schema syntax, complex regex patterns, or unsupported schema constructs" -) diff --git a/vendor/github.com/pb33f/libopenapi-validator/errors/request_errors.go b/vendor/github.com/pb33f/libopenapi-validator/errors/request_errors.go deleted file mode 100644 index f53c2b8ed7c..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/errors/request_errors.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package errors - -import ( - "fmt" - "net/http" - "strings" - - "github.com/pb33f/libopenapi/orderedmap" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/helpers" -) - -func RequestContentTypeNotFound(op *v3.Operation, request *http.Request, specPath string) *ValidationError { - ct := request.Header.Get(helpers.ContentTypeHeader) - var ctypes []string - for pair := orderedmap.First(op.RequestBody.Content); pair != nil; pair = pair.Next() { - ctypes = append(ctypes, pair.Key()) - } - return &ValidationError{ - ValidationType: helpers.RequestBodyValidation, - ValidationSubType: helpers.RequestBodyContentType, - Message: fmt.Sprintf("%s operation request content type '%s' does not exist", - request.Method, ct), - Reason: fmt.Sprintf("The content type '%s' of the %s request submitted has not "+ - "been defined, it's an unknown type", ct, request.Method), - SpecLine: op.RequestBody.GoLow().Content.KeyNode.Line, - SpecCol: op.RequestBody.GoLow().Content.KeyNode.Column, - Context: op, - HowToFix: fmt.Sprintf(HowToFixInvalidContentType, orderedmap.Len(op.RequestBody.Content), strings.Join(ctypes, ", ")), - RequestPath: request.URL.Path, - RequestMethod: request.Method, - SpecPath: specPath, - } -} - -func OperationNotFound(pathItem *v3.PathItem, request *http.Request, method string, specPath string) *ValidationError { - return &ValidationError{ - ValidationType: helpers.RequestValidation, - ValidationSubType: helpers.ValidationMissingOperation, - Message: fmt.Sprintf("%s operation request content type '%s' does not exist", - request.Method, method), - Reason: fmt.Sprintf("The path was found, but there was no '%s' method found in the spec", request.Method), - SpecLine: pathItem.GoLow().KeyNode.Line, - SpecCol: pathItem.GoLow().KeyNode.Column, - Context: pathItem, - HowToFix: HowToFixPathMethod, - RequestPath: request.URL.Path, - RequestMethod: request.Method, - SpecPath: specPath, - } -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/errors/response_errors.go b/vendor/github.com/pb33f/libopenapi-validator/errors/response_errors.go deleted file mode 100644 index a519e8db8c6..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/errors/response_errors.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package errors - -import ( - "fmt" - "net/http" - "strings" - - "github.com/pb33f/libopenapi/orderedmap" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/helpers" -) - -func ResponseContentTypeNotFound(op *v3.Operation, - request *http.Request, - response *http.Response, - code string, - isDefault bool, -) *ValidationError { - ct := response.Header.Get(helpers.ContentTypeHeader) - mediaTypeString, _, _ := helpers.ExtractContentType(ct) - var ctypes []string - var specLine, specCol int - var contentMap *orderedmap.Map[string, *v3.MediaType] - - // check for a default type (applies to all codes without a match) - if !isDefault { - for pair := orderedmap.First(op.Responses.Codes.GetOrZero(code).Content); pair != nil; pair = pair.Next() { - ctypes = append(ctypes, pair.Key()) - } - specLine = op.Responses.Codes.GetOrZero(code).GoLow().Content.KeyNode.Line - specCol = op.Responses.Codes.GetOrZero(code).GoLow().Content.KeyNode.Column - contentMap = op.Responses.Codes.GetOrZero(code).Content - } else { - for pair := orderedmap.First(op.Responses.Default.Content); pair != nil; pair = pair.Next() { - ctypes = append(ctypes, pair.Key()) - } - specLine = op.Responses.Default.GoLow().Content.KeyNode.Line - specCol = op.Responses.Default.GoLow().Content.KeyNode.Column - contentMap = op.Responses.Default.Content - } - return &ValidationError{ - ValidationType: helpers.ResponseBodyValidation, - ValidationSubType: helpers.RequestBodyContentType, - Message: fmt.Sprintf("%s / %s operation response content type '%s' does not exist", - request.Method, code, mediaTypeString), - Reason: fmt.Sprintf("The content type '%s' of the %s response received has not "+ - "been defined, it's an unknown type", mediaTypeString, request.Method), - SpecLine: specLine, - SpecCol: specCol, - Context: op, - HowToFix: fmt.Sprintf(HowToFixInvalidContentType, - orderedmap.Len(contentMap), strings.Join(ctypes, ", ")), - } -} - -func ResponseCodeNotFound(op *v3.Operation, request *http.Request, code int) *ValidationError { - return &ValidationError{ - ValidationType: helpers.ResponseBodyValidation, - ValidationSubType: helpers.ResponseBodyResponseCode, - Message: fmt.Sprintf("%s operation request response code '%d' does not exist", - request.Method, code), - Reason: fmt.Sprintf("The response code '%d' of the %s request submitted has not "+ - "been defined, it's an unknown type", code, request.Method), - SpecLine: op.GoLow().Responses.KeyNode.Line, - SpecCol: op.GoLow().Responses.KeyNode.Column, - Context: op, - HowToFix: HowToFixInvalidResponseCode, - } -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/errors/strict_errors.go b/vendor/github.com/pb33f/libopenapi-validator/errors/strict_errors.go deleted file mode 100644 index aac6e3c576c..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/errors/strict_errors.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package errors - -import ( - "fmt" - "strings" -) - -// StrictValidationType is the validation type for strict mode errors. -const StrictValidationType = "strict" - -// StrictValidationSubTypes for different kinds of undeclared values. -const ( - StrictSubTypeProperty = "undeclared-property" - StrictSubTypeHeader = "undeclared-header" - StrictSubTypeQuery = "undeclared-query-param" - StrictSubTypeCookie = "undeclared-cookie" -) - -// UndeclaredPropertyError creates a ValidationError for an undeclared property. -func UndeclaredPropertyError( - path string, - name string, - value any, - declaredProperties []string, - direction string, - requestPath string, - requestMethod string, - specLine int, - specCol int, -) *ValidationError { - dirStr := direction - if dirStr == "" { - dirStr = "request" - } - - return &ValidationError{ - ValidationType: StrictValidationType, - ValidationSubType: StrictSubTypeProperty, - Message: fmt.Sprintf("%s property '%s' at '%s' is not declared in schema", - dirStr, name, path), - Reason: fmt.Sprintf("Strict mode: found property not in schema. "+ - "Declared properties: [%s]", strings.Join(declaredProperties, ", ")), - HowToFix: fmt.Sprintf("Add '%s' to the schema, remove it from the %s, "+ - "or add '%s' to StrictIgnorePaths", name, dirStr, path), - RequestPath: requestPath, - RequestMethod: requestMethod, - ParameterName: name, - Context: truncateForContext(value), - SpecLine: specLine, - SpecCol: specCol, - } -} - -// UndeclaredHeaderError creates a ValidationError for an undeclared header. -func UndeclaredHeaderError( - name string, - value string, - declaredHeaders []string, - direction string, - requestPath string, - requestMethod string, -) *ValidationError { - dirStr := direction - if dirStr == "" { - dirStr = "request" - } - - return &ValidationError{ - ValidationType: StrictValidationType, - ValidationSubType: StrictSubTypeHeader, - Message: fmt.Sprintf("%s header '%s' is not declared in specification", - dirStr, name), - Reason: fmt.Sprintf("Strict mode: found header not in spec. "+ - "Declared headers: [%s]", strings.Join(declaredHeaders, ", ")), - HowToFix: fmt.Sprintf("Add '%s' to the operation's parameters, remove it from the %s, "+ - "or add it to StrictIgnoredHeaders", name, dirStr), - RequestPath: requestPath, - RequestMethod: requestMethod, - ParameterName: name, - Context: value, - } -} - -// UndeclaredQueryParamError creates a ValidationError for an undeclared query parameter. -func UndeclaredQueryParamError( - path string, - name string, - value any, - declaredParams []string, - requestPath string, - requestMethod string, -) *ValidationError { - return &ValidationError{ - ValidationType: StrictValidationType, - ValidationSubType: StrictSubTypeQuery, - Message: fmt.Sprintf("query parameter '%s' at '%s' is not declared in specification", name, path), - Reason: fmt.Sprintf("Strict mode: found query parameter not in spec. "+ - "Declared parameters: [%s]", strings.Join(declaredParams, ", ")), - HowToFix: fmt.Sprintf("Add '%s' to the operation's query parameters, remove it from the request, "+ - "or add '%s' to StrictIgnorePaths", name, path), - RequestPath: requestPath, - RequestMethod: requestMethod, - ParameterName: name, - Context: truncateForContext(value), - } -} - -// UndeclaredCookieError creates a ValidationError for an undeclared cookie. -func UndeclaredCookieError( - path string, - name string, - value any, - declaredCookies []string, - requestPath string, - requestMethod string, -) *ValidationError { - return &ValidationError{ - ValidationType: StrictValidationType, - ValidationSubType: StrictSubTypeCookie, - Message: fmt.Sprintf("cookie '%s' at '%s' is not declared in specification", name, path), - Reason: fmt.Sprintf("Strict mode: found cookie not in spec. "+ - "Declared cookies: [%s]", strings.Join(declaredCookies, ", ")), - HowToFix: fmt.Sprintf("Add '%s' to the operation's cookie parameters, remove it from the request, "+ - "or add '%s' to StrictIgnorePaths", name, path), - RequestPath: requestPath, - RequestMethod: requestMethod, - ParameterName: name, - Context: truncateForContext(value), - } -} - -// truncateForContext creates a truncated string representation for error context. -func truncateForContext(v any) string { - switch val := v.(type) { - case string: - if len(val) > 50 { - return val[:47] + "..." - } - return val - case map[string]any: - return "{...}" - case []any: - return "[...]" - default: - s := fmt.Sprintf("%v", v) - if len(s) > 50 { - return s[:47] + "..." - } - return s - } -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/errors/urlencoded_errors.go b/vendor/github.com/pb33f/libopenapi-validator/errors/urlencoded_errors.go deleted file mode 100644 index 7a4c8917e69..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/errors/urlencoded_errors.go +++ /dev/null @@ -1,65 +0,0 @@ -package errors - -import ( - "fmt" - - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi/datamodel/high/base" -) - -func InvalidURLEncodedParsing(reason, referenceObject string) *ValidationError { - return &ValidationError{ - ValidationType: helpers.URLEncodedValidation, - ValidationSubType: helpers.Schema, - Message: "Unable to parse form-urlencoded body", - Reason: fmt.Sprintf("failed to parse form-urlencoded: %s", reason), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: reason, - ReferenceSchema: "", - ReferenceObject: referenceObject, - }}, - HowToFix: HowToFixInvalidUrlEncoded, - } -} - -func InvalidTypeEncoding(schema *base.Schema, name, contentType string) *ValidationError { - line := 1 - col := 0 - if low := schema.GoLow(); low != nil && low.Type.KeyNode != nil { - line = low.Type.KeyNode.Line - col = low.Type.KeyNode.Column - } - - return &ValidationError{ - ValidationType: helpers.URLEncodedValidation, - ValidationSubType: helpers.InvalidTypeEncoding, - Message: fmt.Sprintf("The value '%s' could not be parsed to the defined encoding", name), - Reason: fmt.Sprintf("The value '%s' is encoded as '%s' in the schema, however the value could not be parsed", name, contentType), - SpecLine: line, - SpecCol: col, - Context: schema, - HowToFix: HowToFixInvalidTypeEncoding, - } -} - -func ReservedURLEncodedValue(schema *base.Schema, name, value string) *ValidationError { - line := 1 - col := 0 - if schema != nil { - if low := schema.GoLow(); low != nil && low.Type.KeyNode != nil { - line = low.Type.KeyNode.Line - col = low.Type.KeyNode.Column - } - } - - return &ValidationError{ - ValidationType: helpers.URLEncodedValidation, - ValidationSubType: helpers.ReservedValues, - Message: fmt.Sprintf("Form value '%s' contains reserved characters", name), - Reason: fmt.Sprintf("The form value '%s' contains reserved characters but allowReserved is false. Value: '%s'", name, value), - SpecLine: line, - SpecCol: col, - Context: schema, - HowToFix: HowToFixFormDataReservedCharacters, - } -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/errors/validation_error.go b/vendor/github.com/pb33f/libopenapi-validator/errors/validation_error.go deleted file mode 100644 index 38ac82f5c35..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/errors/validation_error.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package errors - -import ( - "fmt" - - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/santhosh-tekuri/jsonschema/v6" -) - -// SchemaValidationFailure describes any failure that occurs when validating data -// against either an OpenAPI or JSON Schema. It aims to be a more user-friendly -// representation of the error than what is provided by the jsonschema library. -type SchemaValidationFailure struct { - // Reason is a human-readable message describing the reason for the error. - Reason string `json:"reason,omitempty" yaml:"reason,omitempty"` - - // InstancePath is the raw path segments from the root to the failing field - InstancePath []string `json:"instancePath,omitempty" yaml:"instancePath,omitempty"` - - // FieldName is the name of the specific field that failed validation (last segment of the path) - FieldName string `json:"fieldName,omitempty" yaml:"fieldName,omitempty"` - - // FieldPath is the JSONPath representation of the field location that failed validation (e.g., "$.user.email") - FieldPath string `json:"fieldPath,omitempty" yaml:"fieldPath,omitempty"` - - // KeywordLocation is the JSON Pointer (RFC 6901) path to the schema keyword that failed validation - // (e.g., "/properties/age/minimum") - KeywordLocation string `json:"keywordLocation,omitempty" yaml:"keywordLocation,omitempty"` - - // Line is the line number where the violation occurred. This may a local line number - // if the validation is a schema (only schemas are validated locally, so the line number will be relative to - // the Context object held by the ValidationError object). - Line int `json:"line,omitempty" yaml:"line,omitempty"` - - // Column is the column number where the violation occurred. This may a local column number - // if the validation is a schema (only schemas are validated locally, so the column number will be relative to - // the Context object held by the ValidationError object). - Column int `json:"column,omitempty" yaml:"column,omitempty"` - - // ReferenceSchema is the schema that was referenced in the validation failure. - ReferenceSchema string `json:"referenceSchema,omitempty" yaml:"referenceSchema,omitempty"` - - // ReferenceObject is the object that failed schema validation - ReferenceObject string `json:"referenceObject,omitempty" yaml:"referenceObject,omitempty"` - - // ReferenceExample is an example object generated from the schema that was referenced in the validation failure. - ReferenceExample string `json:"referenceExample,omitempty" yaml:"referenceExample,omitempty"` - - // The original jsonschema.ValidationError object, if the schema failure originated from the jsonschema library. - OriginalJsonSchemaError *jsonschema.ValidationError `json:"-" yaml:"-"` - - // Context is the raw schema object that failed validation (for programmatic access) - Context interface{} `json:"-" yaml:"-"` -} - -// Error returns a string representation of the error -func (s *SchemaValidationFailure) Error() string { - if s.FieldPath != "" { - return fmt.Sprintf("Reason: %s, FieldPath: %s", s.Reason, s.FieldPath) - } - return fmt.Sprintf("Reason: %s", s.Reason) -} - -// ValidationError is a struct that contains all the information about a validation error. -type ValidationError struct { - // Message is a human-readable message describing the error. - Message string `json:"message" yaml:"message"` - - // Reason is a human-readable message describing the reason for the error. - Reason string `json:"reason" yaml:"reason"` - - // ValidationType is a string that describes the type of validation that failed. - ValidationType string `json:"validationType" yaml:"validationType"` - - // ValidationSubType is a string that describes the subtype of validation that failed. - ValidationSubType string `json:"validationSubType" yaml:"validationSubType"` - - // SpecLine is the line number in the spec where the error occurred. - SpecLine int `json:"specLine" yaml:"specLine"` - - // SpecCol is the column number in the spec where the error occurred. - SpecCol int `json:"specColumn" yaml:"specColumn"` - - // HowToFix is a human-readable message describing how to fix the error. - HowToFix string `json:"howToFix" yaml:"howToFix"` - - // RequestPath is the path of the request - RequestPath string `json:"requestPath" yaml:"requestPath"` - - // SpecPath is the path from the specification that corresponds to the request - SpecPath string `json:"specPath" yaml:"specPath"` - - // RequestMethod is the HTTP method of the request - RequestMethod string `json:"requestMethod" yaml:"requestMethod"` - - // ParameterName is the name of the parameter that failed validation (for parameter validation errors) - ParameterName string `json:"parameterName,omitempty" yaml:"parameterName,omitempty"` - - // SchemaValidationErrors is a slice of SchemaValidationFailure objects that describe the validation errors - // This is only populated when the validation type is against a schema. - SchemaValidationErrors []*SchemaValidationFailure `json:"validationErrors,omitempty" yaml:"validationErrors,omitempty"` - - // Context is the object that the validation error occurred on. This is usually a pointer to a schema - // or a parameter object. - Context interface{} `json:"-" yaml:"-"` -} - -// Error returns a string representation of the error -func (v *ValidationError) Error() string { - if v.SchemaValidationErrors != nil { - if v.SpecLine > 0 && v.SpecCol > 0 { - return fmt.Sprintf("Error: %s, Reason: %s, Validation Errors: %s, Line: %d, Column: %d", - v.Message, v.Reason, v.SchemaValidationErrors, v.SpecLine, v.SpecCol) - } else { - return fmt.Sprintf("Error: %s, Reason: %s, Validation Errors: %s", - v.Message, v.Reason, v.SchemaValidationErrors) - } - } else { - if v.SpecLine > 0 && v.SpecCol > 0 { - return fmt.Sprintf("Error: %s, Reason: %s, Line: %d, Column: %d", - v.Message, v.Reason, v.SpecLine, v.SpecCol) - } else { - return fmt.Sprintf("Error: %s, Reason: %s", - v.Message, v.Reason) - } - } -} - -// IsPathMissingError returns true if the error has a ValidationType of "path" and a ValidationSubType of "missing" -func (v *ValidationError) IsPathMissingError() bool { - return v.ValidationType == helpers.PathValidation && v.ValidationSubType == helpers.ValidationMissing -} - -// IsOperationMissingError returns true if the error has a ValidationType of "request" and a ValidationSubType of "missingOperation" -func (v *ValidationError) IsOperationMissingError() bool { - return v.ValidationType == helpers.PathValidation && v.ValidationSubType == helpers.ValidationMissingOperation -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/errors/xml_errors.go b/vendor/github.com/pb33f/libopenapi-validator/errors/xml_errors.go deleted file mode 100644 index 6566615d5c8..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/errors/xml_errors.go +++ /dev/null @@ -1,104 +0,0 @@ -package errors - -import ( - "fmt" - - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi/datamodel/high/base" -) - -func MissingPrefix(schema *base.Schema, prefix string) *ValidationError { - line := 1 - col := 0 - if low := schema.GoLow(); low != nil && low.Type.KeyNode != nil { - line = low.Type.KeyNode.Line - col = low.Type.KeyNode.Column - } - - return &ValidationError{ - ValidationType: helpers.XmlValidation, - ValidationSubType: helpers.XmlValidationPrefix, - Message: fmt.Sprintf("The prefix '%s' is defined in the schema, however it's missing from the xml", prefix), - Reason: fmt.Sprintf("The prefix '%s' is defined in the schema, however it's missing from the xml content", prefix), - SpecLine: line, - SpecCol: col, - Context: schema, - HowToFix: fmt.Sprintf(HowToFixXmlPrefix, prefix), - } -} - -func InvalidPrefix(schema *base.Schema, prefix string) *ValidationError { - line := 1 - col := 0 - if low := schema.GoLow(); low != nil && low.Type.KeyNode != nil { - line = low.Type.KeyNode.Line - col = low.Type.KeyNode.Column - } - - return &ValidationError{ - ValidationType: helpers.XmlValidation, - ValidationSubType: helpers.XmlValidationPrefix, - Message: fmt.Sprintf("The prefix '%s' defined in the schema differs from the xml", prefix), - Reason: fmt.Sprintf("The prefix '%s' is defined in the schema, however the xml sent and invalid prefix", prefix), - SpecCol: col, - SpecLine: line, - Context: schema, - HowToFix: fmt.Sprintf(HowToFixXmlPrefix, prefix), - } -} - -func MissingNamespace(schema *base.Schema, namespace string) *ValidationError { - line := 1 - col := 0 - if low := schema.GoLow(); low != nil && low.Type.KeyNode != nil { - line = low.Type.KeyNode.Line - col = low.Type.KeyNode.Column - } - - return &ValidationError{ - ValidationType: helpers.XmlValidation, - ValidationSubType: helpers.XmlValidationNamespace, - Message: fmt.Sprintf("The namespace '%s' is defined in the schema, however it's missing from the xml", namespace), - Reason: fmt.Sprintf("The namespace '%s' is defined in the schema, however it's missing from the xml content", namespace), - SpecLine: line, - SpecCol: col, - Context: schema, - HowToFix: fmt.Sprintf(HowToFixXmlNamespace, namespace), - } -} - -func InvalidNamespace(schema *base.Schema, namespace, expectedNamespace, prefix string) *ValidationError { - line := 1 - col := 0 - if low := schema.GoLow(); low != nil && low.Type.KeyNode != nil { - line = low.Type.KeyNode.Line - col = low.Type.KeyNode.Column - } - - return &ValidationError{ - ValidationType: helpers.XmlValidation, - ValidationSubType: helpers.XmlValidationNamespace, - Message: fmt.Sprintf("The namespace from prefix '%s' differs from the xml", prefix), - Reason: fmt.Sprintf("The namespace from prefix '%s' is declared as '%s' in the schema, however in xml is declared as '%s'", - prefix, expectedNamespace, namespace), - SpecLine: line, - SpecCol: col, - Context: schema, - HowToFix: fmt.Sprintf(HowToFixXmlNamespace, namespace), - } -} - -func InvalidXMLParsing(reason, referenceObject string) *ValidationError { - return &ValidationError{ - ValidationType: helpers.XmlValidation, - ValidationSubType: helpers.Schema, - Message: "xml example is malformed", - Reason: fmt.Sprintf("failed to parse xml: %s", reason), - SchemaValidationErrors: []*SchemaValidationFailure{{ - Reason: reason, - ReferenceSchema: "", - ReferenceObject: referenceObject, - }}, - HowToFix: HowToFixInvalidXml, - } -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/helpers/constants.go b/vendor/github.com/pb33f/libopenapi-validator/helpers/constants.go deleted file mode 100644 index f1e1f3b654c..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/helpers/constants.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package helpers - -const ( - ParameterValidation = "parameter" - ParameterValidationPath = "path" - ParameterValidationQuery = "query" - ParameterValidationHeader = "header" - ParameterValidationCookie = "cookie" - RequestValidation = "request" - RequestBodyValidation = "requestBody" - XmlValidation = "xmlValidation" - XmlValidationPrefix = "prefix" - XmlValidationNamespace = "namespace" - URLEncodedValidation = "urlEncodedValidation" - InvalidTypeEncoding = "invalidTypeEncoding" - ReservedValues = "reservedValues" - Schema = "schema" - ResponseBodyValidation = "response" - RequestBodyContentType = "contentType" - // Deprecated: use ValidationMissingOperation - RequestMissingOperation = "missingOperation" - PathValidation = "path" - ValidationMissing = "missing" - ValidationMissingOperation = "missingOperation" - ResponseBodyResponseCode = "statusCode" - SecurityValidation = "security" - DocumentValidation = "document" - SpaceDelimited = "spaceDelimited" - PipeDelimited = "pipeDelimited" - DefaultDelimited = "default" - MatrixStyle = "matrix" - LabelStyle = "label" - Pipe = "|" - Comma = "," - Space = " " - SemiColon = ";" - Asterisk = "*" - Period = "." - Equals = "=" - Integer = "integer" - Number = "number" - Slash = "/" - Object = "object" - String = "string" - Array = "array" - Boolean = "boolean" - DeepObject = "deepObject" - Header = "header" - Cookie = "cookie" - Path = "path" - Form = "form" - Query = "query" - JSONContentType = "application/json" - URLEncodedContentType = "application/x-www-form-urlencoded" - JSONType = "json" - ContentTypeHeader = "Content-Type" - AuthorizationHeader = "Authorization" - Charset = "charset" - Boundary = "boundary" - Preferred = "preferred" - FailSegment = "**&&FAIL&&**" -) diff --git a/vendor/github.com/pb33f/libopenapi-validator/helpers/ignore_regex.go b/vendor/github.com/pb33f/libopenapi-validator/helpers/ignore_regex.go deleted file mode 100644 index 183b000d0f2..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/helpers/ignore_regex.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2023-2024 Princess Beef Heavy Industries, LLC / Dave Shanley -// https://pb33f.io - -package helpers - -import "regexp" - -var ( - // Ignore generic poly errors that just say "none matched" since we get specific errors - // But keep errors that say which subschemas matched (for multiple match scenarios) - IgnorePattern = `^'?(anyOf|allOf|oneOf|validation)'? failed(, none matched)?$` - IgnorePolyPattern = `^'?(anyOf|allOf|oneOf)'? failed(, none matched)?$` -) - -// IgnoreRegex is a regular expression that matches the IgnorePattern -var IgnoreRegex = regexp.MustCompile(IgnorePattern) - -// IgnorePolyRegex is a regular expression that matches the IgnorePattern -var IgnorePolyRegex = regexp.MustCompile(IgnorePolyPattern) diff --git a/vendor/github.com/pb33f/libopenapi-validator/helpers/json_pointer.go b/vendor/github.com/pb33f/libopenapi-validator/helpers/json_pointer.go deleted file mode 100644 index e7cda0e71db..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/helpers/json_pointer.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2026 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package helpers - -import ( - "fmt" - "strings" - - "github.com/go-openapi/jsonpointer" -) - -// EscapeJSONPointerSegment escapes a single segment for use in a JSON Pointer (RFC 6901). -// It replaces '~' with '~0' and '/' with '~1'. -func EscapeJSONPointerSegment(segment string) string { - return jsonpointer.Escape(segment) -} - -// ConstructParameterJSONPointer constructs a full JSON Pointer path for a parameter -// in the OpenAPI specification. -// Format: /paths/{path}/{method}/parameters/{paramName}/schema/{keyword} -// The path segment is automatically escaped according to RFC 6901. -// The keyword can be a simple keyword like "type" or a nested path like "items/type". -func ConstructParameterJSONPointer(pathTemplate, method, paramName, keyword string) string { - escapedPath := EscapeJSONPointerSegment(pathTemplate) - escapedPath = strings.TrimPrefix(escapedPath, "~1") // Remove leading slash encoding - method = strings.ToLower(method) - return fmt.Sprintf("/paths/%s/%s/parameters/%s/schema/%s", escapedPath, method, paramName, keyword) -} - -// ConstructResponseHeaderJSONPointer constructs a full JSON Pointer path for a response header -// in the OpenAPI specification. -// Format: /paths/{path}/{method}/responses/{statusCode}/headers/{headerName}/{keyword} -// The path segment is automatically escaped according to RFC 6901. -func ConstructResponseHeaderJSONPointer(pathTemplate, method, statusCode, headerName, keyword string) string { - escapedPath := EscapeJSONPointerSegment(pathTemplate) - escapedPath = strings.TrimPrefix(escapedPath, "~1") // Remove leading slash encoding - method = strings.ToLower(method) - return fmt.Sprintf("/paths/%s/%s/responses/%s/headers/%s/%s", escapedPath, method, statusCode, headerName, keyword) -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/helpers/operation_utilities.go b/vendor/github.com/pb33f/libopenapi-validator/helpers/operation_utilities.go deleted file mode 100644 index b66030a5b46..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/helpers/operation_utilities.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package helpers - -import ( - "mime" - "net/http" - - "github.com/pb33f/libopenapi/datamodel/high/v3" -) - -// ExtractOperation extracts the operation from the path item based on the request method. If there is no -// matching operation found, then nil is returned. -func ExtractOperation(request *http.Request, item *v3.PathItem) *v3.Operation { - switch request.Method { - case http.MethodGet: - return item.Get - case http.MethodPost: - return item.Post - case http.MethodPut: - return item.Put - case http.MethodDelete: - return item.Delete - case http.MethodOptions: - return item.Options - case http.MethodHead: - if item.Head != nil { - return item.Head - } - return item.Get - case http.MethodPatch: - return item.Patch - case http.MethodTrace: - return item.Trace - } - return nil -} - -// ExtractContentType extracts the content type from the request header. First return argument is the content type -// of the request.The second (optional) argument is the charset of the request. The third (optional) -// argument is the boundary of the type (only used with forms really). -func ExtractContentType(contentType string) (string, string, string) { - // mime.ParseMediaType: "If there is an error parsing the optional parameter, - // the media type will be returned along with the error ErrInvalidMediaParameter." - ct, params, _ := mime.ParseMediaType(contentType) - return ct, params["charset"], params["boundary"] -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/helpers/package.go b/vendor/github.com/pb33f/libopenapi-validator/helpers/package.go deleted file mode 100644 index 4a28fd4474f..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/helpers/package.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package helpers contains helper and utility functions used by the validator. Trying to avoid using the package -// name utils anymore, as it's too generic and can cause conflicts with other packages - however I feel this pattern -// will suffer the exact same fate with time. -package helpers diff --git a/vendor/github.com/pb33f/libopenapi-validator/helpers/parameter_utilities.go b/vendor/github.com/pb33f/libopenapi-validator/helpers/parameter_utilities.go deleted file mode 100644 index 69d29158877..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/helpers/parameter_utilities.go +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package helpers - -import ( - "fmt" - "net/http" - "slices" - "strconv" - "strings" - - "github.com/pb33f/libopenapi/datamodel/high/base" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" -) - -// QueryParam is a struct that holds the key, values and property name for a query parameter -// it's used for complex query types that need to be parsed and tracked differently depending -// on the encoding styles used. -type QueryParam struct { - Key string - Values []string - Property string -} - -// ExtractParamsForOperation will extract the parameters for the operation based on the request method. -// Both the path level params and the method level params will be returned. -func ExtractParamsForOperation(request *http.Request, item *v3.PathItem) []*v3.Parameter { - params := item.Parameters - switch request.Method { - case http.MethodGet: - if item.Get != nil { - params = append(params, item.Get.Parameters...) - } - case http.MethodPost: - if item.Post != nil { - params = append(params, item.Post.Parameters...) - } - case http.MethodPut: - if item.Put != nil { - params = append(params, item.Put.Parameters...) - } - case http.MethodDelete: - if item.Delete != nil { - params = append(params, item.Delete.Parameters...) - } - case http.MethodOptions: - if item.Options != nil { - params = append(params, item.Options.Parameters...) - } - case http.MethodHead: - if item.Head != nil { - params = append(params, item.Head.Parameters...) - } - case http.MethodPatch: - if item.Patch != nil { - params = append(params, item.Patch.Parameters...) - } - case http.MethodTrace: - if item.Trace != nil { - params = append(params, item.Trace.Parameters...) - } - } - return params -} - -// ExtractSecurityForOperation will extract the security requirements for the operation based on the request method. -func ExtractSecurityForOperation(request *http.Request, item *v3.PathItem) []*base.SecurityRequirement { - var schemes []*base.SecurityRequirement - switch request.Method { - case http.MethodGet: - if item.Get != nil { - schemes = append(schemes, item.Get.Security...) - } - case http.MethodPost: - if item.Post != nil { - schemes = append(schemes, item.Post.Security...) - } - case http.MethodPut: - if item.Put != nil { - schemes = append(schemes, item.Put.Security...) - } - case http.MethodDelete: - if item.Delete != nil { - schemes = append(schemes, item.Delete.Security...) - } - case http.MethodOptions: - if item.Options != nil { - schemes = append(schemes, item.Options.Security...) - } - case http.MethodHead: - if item.Head != nil { - schemes = append(schemes, item.Head.Security...) - } - case http.MethodPatch: - if item.Patch != nil { - schemes = append(schemes, item.Patch.Security...) - } - case http.MethodTrace: - if item.Trace != nil { - schemes = append(schemes, item.Trace.Security...) - } - } - return schemes -} - -// ExtractSecurityHeaderNames extracts header names from applicable security schemes. -// Returns header names from apiKey schemes with in:"header", plus "Authorization" -// for http/oauth2/openIdConnect schemes. -// -// This function is used by strict mode validation to recognize security headers -// as "declared" headers that should not trigger undeclared header errors. -func ExtractSecurityHeaderNames( - security []*base.SecurityRequirement, - securitySchemes map[string]*v3.SecurityScheme, -) []string { - if security == nil || securitySchemes == nil { - return nil - } - - seen := make(map[string]bool) - var headers []string - - for _, sec := range security { - if sec == nil || sec.ContainsEmptyRequirement { - continue // No security required for this option - } - - if sec.Requirements == nil { - continue - } - - for pair := sec.Requirements.First(); pair != nil; pair = pair.Next() { - schemeName := pair.Key() - scheme, ok := securitySchemes[schemeName] - if !ok || scheme == nil { - continue - } - - var headerName string - switch strings.ToLower(scheme.Type) { - case "apikey": - if strings.ToLower(scheme.In) == Header { - headerName = scheme.Name - } - case "http", "oauth2", "openidconnect": - headerName = "Authorization" - } - - if headerName != "" && !seen[strings.ToLower(headerName)] { - seen[strings.ToLower(headerName)] = true - headers = append(headers, headerName) - } - } - } - - return headers -} - -func cast(v string) any { - if v == "true" || v == "false" { - b, _ := strconv.ParseBool(v) - return b - } - if i, err := strconv.ParseFloat(v, 64); err == nil { - // check if this is an int or not - if !strings.Contains(v, Period) { - iv, _ := strconv.ParseInt(v, 10, 64) - return iv - } - return i - } - return v -} - -// ConstructParamMapFromDeepObjectEncoding will construct a map from the query parameters that are encoded as -// deep objects. It's kind of a crazy way to do things, but hey, each to their own. -func ConstructParamMapFromDeepObjectEncoding(values []*QueryParam, sch *base.Schema) map[string]interface{} { - // deepObject encoding is a technique used to encode objects into query parameters. Kinda nuts. - decoded := make(map[string]interface{}) - for _, v := range values { - if decoded[v.Key] == nil { - - props := make(map[string]interface{}) - rawValues := make([]interface{}, len(v.Values)) - for i := range v.Values { - rawValues[i] = cast(v.Values[i]) - } - // check if the schema for the param is an array - if sch != nil && slices.Contains(sch.Type, Array) { - props[v.Property] = rawValues - } - // check if schema has additional properties defined as an array - if sch != nil && sch.AdditionalProperties != nil && - sch.AdditionalProperties.IsA() { - s := sch.AdditionalProperties.A.Schema() - if s != nil && - slices.Contains(s.Type, Array) { - props[v.Property] = rawValues - } - } - - if len(props) == 0 { - props[v.Property] = cast(v.Values[0]) - } - decoded[v.Key] = props - } else { - - added := false - rawValues := make([]interface{}, len(v.Values)) - for i := range v.Values { - rawValues[i] = cast(v.Values[i]) - } - // check if the schema for the param is an array - if sch != nil && slices.Contains(sch.Type, Array) { - decoded[v.Key].(map[string]interface{})[v.Property] = rawValues - added = true - } - // check if schema has additional properties defined as an array - if sch != nil && sch.AdditionalProperties != nil && - sch.AdditionalProperties.IsA() && - slices.Contains(sch.AdditionalProperties.A.Schema().Type, Array) { - decoded[v.Key].(map[string]interface{})[v.Property] = rawValues - added = true - } - if !added { - decoded[v.Key].(map[string]interface{})[v.Property] = cast(v.Values[0]) - } - - } - } - return decoded -} - -// ConstructParamMapFromQueryParamInput will construct a param map from an existing map of *QueryParam slices. -func ConstructParamMapFromQueryParamInput(values map[string][]*QueryParam) map[string]interface{} { - decoded := make(map[string]interface{}) - for _, q := range values { - for _, v := range q { - decoded[v.Key] = cast(v.Values[0]) - } - } - return decoded -} - -// ConstructParamMapFromPipeEncoding will construct a map from the query parameters that are encoded as -// pipe separated values. Perhaps the most sane way to delimit/encode properties. -func ConstructParamMapFromPipeEncoding(values []*QueryParam) map[string]interface{} { - // Pipes are always a good alternative to commas, personally I think they're better, if I were encoding, I would - // use pipes instead of commas, so much can go wrong with a comma, but a pipe? hardly ever. - decoded := make(map[string]interface{}) - for _, v := range values { - props := make(map[string]interface{}) - // explode PSV into array - exploded := strings.Split(v.Values[0], Pipe) - for i := range exploded { - if i%2 == 0 { - props[exploded[i]] = cast(exploded[i+1]) - } - } - decoded[v.Key] = props - } - return decoded -} - -// ConstructParamMapFromSpaceEncoding will construct a map from the query parameters that are encoded as -// space delimited values. This is perhaps the worst way to delimit anything other than a paragraph of text. -func ConstructParamMapFromSpaceEncoding(values []*QueryParam) map[string]interface{} { - // Don't use spaces to delimit anything unless you really know what the hell you're doing. Perhaps the - // easiest way to blow something up, unless you're tokenizing strings... don't do this. - decoded := make(map[string]interface{}) - for _, v := range values { - props := make(map[string]interface{}) - // explode SSV into array - exploded := strings.Split(v.Values[0], Space) - for i := range exploded { - if i%2 == 0 { - props[exploded[i]] = cast(exploded[i+1]) - } - } - decoded[v.Key] = props - } - return decoded -} - -// ConstructMapFromCSV will construct a map from a comma separated value string. -func ConstructMapFromCSV(csv string) map[string]interface{} { - decoded := make(map[string]interface{}) - // explode SSV into array - exploded := strings.Split(csv, Comma) - for i := range exploded { - if i%2 == 0 { - if len(exploded) == i+1 { - break - } - decoded[exploded[i]] = cast(exploded[i+1]) - } - } - return decoded -} - -// ConstructKVFromCSV will construct a map from a comma separated value string that denotes key value pairs. -func ConstructKVFromCSV(values string) map[string]interface{} { - props := make(map[string]interface{}) - exploded := strings.Split(values, Comma) - for i := range exploded { - obK := strings.Split(exploded[i], Equals) - if len(obK) == 2 { - props[obK[0]] = cast(obK[1]) - } - } - return props -} - -// ConstructKVFromLabelEncoding will construct a map from a comma separated value string that denotes key value pairs. -func ConstructKVFromLabelEncoding(values string) map[string]interface{} { - props := make(map[string]interface{}) - exploded := strings.Split(values, Period) - for i := range exploded { - obK := strings.Split(exploded[i], Equals) - if len(obK) == 2 { - props[obK[0]] = cast(obK[1]) - } - } - return props -} - -// ConstructKVFromMatrixCSV will construct a map from a comma separated value string that denotes key value pairs. -func ConstructKVFromMatrixCSV(values string) map[string]interface{} { - props := make(map[string]interface{}) - exploded := strings.Split(values, SemiColon) - for i := range exploded { - obK := strings.Split(exploded[i], Equals) - if len(obK) == 2 { - props[obK[0]] = cast(obK[1]) - } - } - return props -} - -// ConstructParamMapFromFormEncodingArray will construct a map from the query parameters that are encoded as -// form encoded values. -func ConstructParamMapFromFormEncodingArray(values []*QueryParam) map[string]interface{} { - decoded := make(map[string]interface{}) - for _, v := range values { - props := make(map[string]interface{}) - // explode SSV into array - exploded := strings.Split(v.Values[0], Comma) - for i := range exploded { - if i%2 == 0 { - if len(exploded) > i+1 { - props[exploded[i]] = cast(exploded[i+1]) - } - } - } - decoded[v.Key] = props - } - return decoded -} - -// DoesFormParamContainDelimiter will determine if a form parameter contains a delimiter. -func DoesFormParamContainDelimiter(value, style string) bool { - if strings.Contains(value, Comma) && (style == "" || style == Form) { - return true - } - return false -} - -// ExplodeQueryValue will explode a query value based on the style (space, pipe, or form/default). -func ExplodeQueryValue(value, style string) []string { - switch style { - case SpaceDelimited: - return strings.Split(value, Space) - case PipeDelimited: - return strings.Split(value, Pipe) - default: - return strings.Split(value, Comma) - } -} - -func CollapseCSVIntoFormStyle(key string, value string) string { - return fmt.Sprintf("&%s=%s", key, - strings.Join(strings.Split(value, ","), fmt.Sprintf("&%s=", key))) -} - -func CollapseCSVIntoSpaceDelimitedStyle(key string, values []string) string { - return fmt.Sprintf("%s=%s", key, strings.Join(values, "%20")) -} - -func CollapseCSVIntoPipeDelimitedStyle(key string, values []string) string { - return fmt.Sprintf("%s=%s", key, strings.Join(values, Pipe)) -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/helpers/path_finder.go b/vendor/github.com/pb33f/libopenapi-validator/helpers/path_finder.go deleted file mode 100644 index 370dd634323..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/helpers/path_finder.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// https://pb33f.io - -package helpers - -import ( - "fmt" - "strings" - "unicode" - - "github.com/santhosh-tekuri/jsonschema/v6" -) - -// ExtractJSONPathFromValidationError traverses and processes a ValidationError to construct a JSONPath string representation of its instance location. -func ExtractJSONPathFromValidationError(e *jsonschema.ValidationError) string { - if len(e.Causes) > 0 { - for _, cause := range e.Causes { - ExtractJSONPathFromValidationError(cause) - } - } - - if len(e.InstanceLocation) > 0 { - - var b strings.Builder - b.WriteString("$") - - for _, seg := range e.InstanceLocation { - switch { - case isNumeric(seg): - b.WriteString(fmt.Sprintf("[%s]", seg)) - - case isSimpleIdentifier(seg): - b.WriteByte('.') - b.WriteString(seg) - - default: - esc := escapeBracketString(seg) - b.WriteString("['") - b.WriteString(esc) - b.WriteString("']") - } - } - return b.String() - } - return "" -} - -// isNumeric returns true if s is a non‐empty string of digits. -func isNumeric(s string) bool { - if s == "" { - return false - } - for _, r := range s { - if r < '0' || r > '9' { - return false - } - } - return true -} - -// isSimpleIdentifier returns true if s matches [A-Za-z_][A-Za-z0-9_]*. -func isSimpleIdentifier(s string) bool { - for i, r := range s { - if i == 0 { - if !unicode.IsLetter(r) && r != '_' { - return false - } - } else { - if !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' { - return false - } - } - } - return len(s) > 0 -} - -// escapeBracketString escapes backslashes and single‐quotes for inside ['...'] -func escapeBracketString(s string) string { - s = strings.ReplaceAll(s, `\`, `\\`) - s = strings.ReplaceAll(s, `'`, `\'`) - return s -} - -// ExtractJSONPathsFromValidationErrors takes a slice of ValidationError pointers and returns a slice of JSONPath strings -func ExtractJSONPathsFromValidationErrors(errors []*jsonschema.ValidationError) []string { - var paths []string - for _, err := range errors { - path := ExtractJSONPathFromValidationError(err) - if path != "" { - paths = append(paths, path) - } - } - return paths -} - -// ExtractFieldNameFromInstanceLocation returns the last segment of the instance location as the field name -func ExtractFieldNameFromInstanceLocation(instanceLocation []string) string { - if len(instanceLocation) == 0 { - return "" - } - return instanceLocation[len(instanceLocation)-1] -} - -// ExtractFieldNameFromStringLocation returns the last segment of the instance location as the field name -// when the location is provided as a string path -func ExtractFieldNameFromStringLocation(instanceLocation string) string { - if instanceLocation == "" { - return "" - } - - // Handle string format like "/properties/email" or "/0/name" - segments := strings.Split(strings.Trim(instanceLocation, "/"), "/") - if len(segments) == 0 || (len(segments) == 1 && segments[0] == "") { - return "" - } - - return segments[len(segments)-1] -} - -// ExtractJSONPathFromInstanceLocation creates a JSONPath string from instance location segments -func ExtractJSONPathFromInstanceLocation(instanceLocation []string) string { - if len(instanceLocation) == 0 { - return "" - } - - var b strings.Builder - b.WriteString("$") - - for _, seg := range instanceLocation { - switch { - case isNumeric(seg): - b.WriteString(fmt.Sprintf("[%s]", seg)) - - case isSimpleIdentifier(seg): - b.WriteByte('.') - b.WriteString(seg) - - default: - esc := escapeBracketString(seg) - b.WriteString("['") - b.WriteString(esc) - b.WriteString("']") - } - } - return b.String() -} - -// ExtractJSONPathFromStringLocation creates a JSONPath string from string-based instance location -func ExtractJSONPathFromStringLocation(instanceLocation string) string { - if instanceLocation == "" { - return "" - } - - // Convert string format like "/properties/email" to array format - segments := strings.Split(strings.Trim(instanceLocation, "/"), "/") - if len(segments) == 0 || (len(segments) == 1 && segments[0] == "") { - return "" - } - - return ExtractJSONPathFromInstanceLocation(segments) -} - -// ConvertStringLocationToPathSegments converts a string-based instance location to path segments array -// Handles edge cases like empty strings and root-only paths -func ConvertStringLocationToPathSegments(instanceLocation string) []string { - if instanceLocation == "" { - return []string{} - } - - segments := strings.Split(strings.Trim(instanceLocation, "/"), "/") - if len(segments) == 1 && segments[0] == "" { - return []string{} - } - - return segments -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/helpers/regex_maker.go b/vendor/github.com/pb33f/libopenapi-validator/helpers/regex_maker.go deleted file mode 100644 index 3fcbc2eae5c..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/helpers/regex_maker.go +++ /dev/null @@ -1,148 +0,0 @@ -package helpers - -import ( - "bytes" - "fmt" - "regexp" - "strings" -) - -var ( - baseDefaultPattern = "[^/]*" - DefaultPatternRegex = regexp.MustCompile("^([^/]*)$") - DefaultPatternRegexString = DefaultPatternRegex.String() -) - -// GetRegexForPath returns a compiled regular expression for the given path template. -// -// This function takes a path template string `tpl` and generates a regular expression -// that matches the structure of the template. The template can include placeholders -// enclosed in braces `{}` with optional custom patterns. -// -// Placeholders in the template can be defined as: -// - `{name}`: Matches any sequence of characters except '/' -// - `{name:pattern}`: Matches the specified custom pattern -// -// The function ensures that the template is well-formed, with balanced and properly -// nested braces. If the template is invalid, an error is returned. -// -// Parameters: -// - tpl: The path template string to convert into a regular expression. -// -// Returns: -// - *regexp.Regexp: A compiled regular expression that matches the template. -// - error: An error if the template is invalid or the regular expression cannot be compiled. -// -// Example: -// -// regex, err := GetRegexForPath("/orders/{id:[0-9]+}/items/{itemId}") -// // regex: ^/orders/([0-9]+)/items/([^/]+)$ -// // err: nil -func GetRegexForPath(tpl string) (*regexp.Regexp, error) { - // Check if it is well-formed. - idxs, errBraces := BraceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - - // Backup the original. - template := tpl - - pattern := bytes.NewBufferString("^") - var end int - - for i := 0; i < len(idxs); i += 2 { - - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := baseDefaultPattern - if len(parts) == 2 { - patt = parts[1] - } - - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("missing name or pattern in %q", tpl[idxs[i]:end]) - } - - // Build the regexp pattern. - _, err := fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) - if err != nil { - return nil, err - } - - } - - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - - pattern.WriteByte('$') - - patternString := pattern.String() - - if patternString == DefaultPatternRegexString { - return DefaultPatternRegex, nil - } - - // Compile full regexp. - reg, errCompile := regexp.Compile(patternString) - if errCompile != nil { - return nil, errCompile - } - - // Check for capturing groups which used to work in older versions - if reg.NumSubexp() != len(idxs)/2 { - return nil, fmt.Errorf("route %s contains capture groups in its regexp. Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)", template) - } - - // Done! - return reg, nil -} - -// BraceIndices returns the indices of the opening and closing braces in a string. -// -// It scans the input string `s` and identifies the positions of matching pairs -// of braces ('{' and '}'). The function ensures that the braces are balanced -// and properly nested. -// -// If the braces are unbalanced or improperly nested, an error is returned. -// -// Parameters: -// - s: The input string to scan for braces. -// -// Returns: -// - []int: A slice of integers where each pair of indices represents the -// start and end positions of a matching pair of braces. -// - error: An error if the braces are unbalanced or improperly nested. -// -// Example: -// -// indices, err := BraceIndices("/orders/{id}/items/{itemId}") -// // indices: [8, 12, 19, 26] -// // err: nil -func BraceIndices(s string) ([]int, error) { - var level, idx int - var idxs []int - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("unbalanced braces in %q", s) - } - return idxs, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/helpers/schema_compiler.go b/vendor/github.com/pb33f/libopenapi-validator/helpers/schema_compiler.go deleted file mode 100644 index 91e4ddaacc8..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/helpers/schema_compiler.go +++ /dev/null @@ -1,319 +0,0 @@ -package helpers - -import ( - "bytes" - "encoding/json" - "fmt" - - "github.com/santhosh-tekuri/jsonschema/v6" - - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/openapi_vocabulary" -) - -// ConfigureCompiler configures a JSON Schema compiler with the desired behavior. -func ConfigureCompiler(c *jsonschema.Compiler, o *config.ValidationOptions) { - if o == nil { - // Sanity - return - } - - // nil is the default so this is OK. - c.UseRegexpEngine(o.RegexEngine) - - if o.FormatAssertions { - c.AssertFormat() - } - - if o.ContentAssertions { - c.AssertContent() - } - - for n, v := range o.Formats { - c.RegisterFormat(&jsonschema.Format{ - Name: n, - Validate: v, - }) - } -} - -// NewCompilerWithOptions mints a new JSON schema compiler with custom configuration. -func NewCompilerWithOptions(o *config.ValidationOptions) *jsonschema.Compiler { - c := jsonschema.NewCompiler() - ConfigureCompiler(c, o) - return c -} - -// NewCompiledSchema establishes a programmatic representation of a JSON Schema document that is used for validation. -// Defaults to OpenAPI 3.1+ behavior (strict JSON Schema compliance). -func NewCompiledSchema(name string, jsonSchema []byte, o *config.ValidationOptions) (*jsonschema.Schema, error) { - return NewCompiledSchemaWithVersion(name, jsonSchema, o, 3.1) -} - -// NewCompiledSchemaWithVersion establishes a programmatic representation of a JSON Schema document that is used for validation. -// The version parameter determines which OpenAPI keywords are allowed: -// - version 3.0: Allows OpenAPI 3.0 keywords like 'nullable' -// - version 3.1+: Rejects OpenAPI 3.0 keywords like 'nullable' (strict JSON Schema compliance) -func NewCompiledSchemaWithVersion(name string, jsonSchema []byte, options *config.ValidationOptions, version float32) (*jsonschema.Schema, error) { - compiler := NewCompilerWithOptions(options) - compiler.UseLoader(NewCompilerLoader()) - - // register OpenAPI vocabulary with appropriate version and coercion settings - if options != nil && options.OpenAPIMode { - var vocabVersion openapi_vocabulary.VersionType - if version >= 3.15 { // use 3.15 to avoid floating point precision issues (3.2+) - vocabVersion = openapi_vocabulary.Version32 - } else if version >= 3.05 { // use 3.05 to avoid floating point precision issues (3.1) - vocabVersion = openapi_vocabulary.Version31 - } else { - vocabVersion = openapi_vocabulary.Version30 - } - - vocab := openapi_vocabulary.NewOpenAPIVocabularyWithCoercion(vocabVersion, options.AllowScalarCoercion) - compiler.RegisterVocabulary(vocab) - compiler.AssertVocabs() - - if version < 3.05 { - jsonSchema = transformOpenAPI30Schema(jsonSchema) - } - - if options.AllowScalarCoercion { - jsonSchema = transformSchemaForCoercion(jsonSchema) - } - } - - decodedSchema, err := jsonschema.UnmarshalJSON(bytes.NewReader(jsonSchema)) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal JSON schema: %w", err) - } - - if err = compiler.AddResource(name, decodedSchema); err != nil { - return nil, fmt.Errorf("failed to add resource to schema compiler: %w", err) - } - - jsch, err := compiler.Compile(name) - if err != nil { - return nil, fmt.Errorf("JSON schema compile failed: %s", err.Error()) - } - - return jsch, nil -} - -// transformOpenAPI30Schema transforms OpenAPI 3.0 schemas to JSON Schema compatible format -// This specifically handles the nullable keyword by converting it to proper type arrays -func transformOpenAPI30Schema(jsonSchema []byte) []byte { - var schema map[string]interface{} - if err := json.Unmarshal(jsonSchema, &schema); err != nil { - // If we can't parse it, return as-is - return jsonSchema - } - - transformed := transformNullableInSchema(schema) - - result, err := json.Marshal(transformed) - if err != nil { - // If we can't marshal the result, return original - return jsonSchema - } - - return result -} - -// transformNullableInSchema recursively transforms nullable keywords in a schema object -func transformNullableInSchema(schema interface{}) interface{} { - switch s := schema.(type) { - case map[string]interface{}: - result := make(map[string]interface{}) - - // copy all properties first - for key, value := range s { - result[key] = transformNullableInSchema(value) - } - - // check if this schema has nullable keyword - if nullable, ok := s["nullable"]; ok { - if nullableBool, ok := nullable.(bool); ok { - if nullableBool { - // Transform the schema to support null values - return transformNullableSchema(result) - } else { - // nullable: false - just remove the nullable keyword - delete(result, "nullable") - } - } - } - - return result - - case []interface{}: - result := make([]interface{}, len(s)) - for i, item := range s { - result[i] = transformNullableInSchema(item) - } - return result - - default: - return schema - } -} - -// transformNullableSchema transforms a schema with nullable: true to JSON Schema compatible format -func transformNullableSchema(schema map[string]interface{}) map[string]interface{} { - delete(schema, "nullable") - - // get the current type - currentType, hasType := schema["type"] - - if hasType { - // if there's already a type, convert it to include null - switch t := currentType.(type) { - case string: - // convert "string" to ["string", "null"] - schema["type"] = []interface{}{t, "null"} - case []interface{}: - // if it's already an array, add null if not present - found := false - for _, item := range t { - if str, ok := item.(string); ok && str == "null" { - found = true - break - } - } - if !found { - newTypes := make([]interface{}, len(t)+1) - copy(newTypes, t) - newTypes[len(t)] = "null" - schema["type"] = newTypes - } - } - } - allOf, hasAllOf := schema["allOf"] - if hasAllOf { - delete(schema, "allOf") - oneOfAdditions := []interface{}{ - map[string]interface{}{ - "allOf": allOf, - }, - map[string]interface{}{ - "type": "null", - }, - } - var oneOfSlice []interface{} - oneOf, hasOneOf := schema["oneOf"] - if hasOneOf { - oneOfSlice, _ = oneOf.([]interface{}) - } - oneOfSlice = append(oneOfSlice, oneOfAdditions...) - schema["oneOf"] = oneOfSlice - } - - // Handle enum values - add null if nullable but not already in enum - enum, hasEnum := schema["enum"] - if hasEnum { - if enumSlice, ok := enum.([]interface{}); ok { - // Check if null is already in enum - hasNull := false - for _, v := range enumSlice { - if v == nil { - hasNull = true - break - } - } - // Add null if not present - if !hasNull { - enumSlice = append(enumSlice, nil) - schema["enum"] = enumSlice - } - } - } - - return schema -} - -// transformSchemaForCoercion transforms schemas to allow scalar coercion (string->boolean/number) -func transformSchemaForCoercion(jsonSchema []byte) []byte { - var schema map[string]interface{} - if err := json.Unmarshal(jsonSchema, &schema); err != nil { - // If we can't parse it, return as-is - return jsonSchema - } - - transformed := transformCoercionInSchema(schema) - - result, err := json.Marshal(transformed) - if err != nil { - return jsonSchema - } - - return result -} - -// transformCoercionInSchema recursively transforms schemas to support scalar coercion -func transformCoercionInSchema(schema interface{}) interface{} { - switch s := schema.(type) { - case map[string]interface{}: - result := make(map[string]interface{}) - - // copy all properties first - for key, value := range s { - result[key] = transformCoercionInSchema(value) - } - - // transform type to allow string coercion for coercible types - if schemaType, hasType := s["type"]; hasType { - result["type"] = transformTypeForCoercion(schemaType) - } - - return result - - case []interface{}: - result := make([]interface{}, len(s)) - for i, item := range s { - result[i] = transformCoercionInSchema(item) - } - return result - - default: - return schema - } -} - -// transformTypeForCoercion transforms type fields to allow string coercion -func transformTypeForCoercion(schemaType interface{}) interface{} { - switch t := schemaType.(type) { - case string: - // transform scalar types to include string for coercion - if t == "boolean" || t == "number" || t == "integer" { - return []interface{}{t, "string"} - } - return t - - case []interface{}: - // if already an array, add string if it contains coercible types and doesn't already have string - hasCoercibleType := false - hasString := false - - for _, item := range t { - if str, ok := item.(string); ok { - if str == "boolean" || str == "number" || str == "integer" { - hasCoercibleType = true - } - if str == "string" { - hasString = true - } - } - } - - if hasCoercibleType && !hasString { - newTypes := make([]interface{}, len(t)+1) - copy(newTypes, t) - newTypes[len(t)] = "string" - return newTypes - } - - return t - - default: - return schemaType - } -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/helpers/url_loader.go b/vendor/github.com/pb33f/libopenapi-validator/helpers/url_loader.go deleted file mode 100644 index a5cd66b6a39..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/helpers/url_loader.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2023-2024 Princess Beef Heavy Industries, LLC / Dave Shanley -// https://pb33f.io - -package helpers - -import ( - "crypto/tls" - "fmt" - "net/http" - "time" - - "github.com/santhosh-tekuri/jsonschema/v6" -) - -// HTTPURLLoader is a type that implements the Loader interface for loading schemas from HTTP URLs. -// this change was made in jsonschema v6. The httploader package was removed and the HTTPURLLoader -// type was introduced. -// https://github.com/santhosh-tekuri/jsonschema/blob/boon/example_http_test.go -// TODO: make all this stuff configurable, right now it's all hard wired and not very flexible. -// -// use interfaces and abstractions on all this. -type HTTPURLLoader http.Client - -func (l *HTTPURLLoader) Load(url string) (any, error) { - client := (*http.Client)(l) - resp, err := client.Get(url) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK { - _ = resp.Body.Close() - return nil, fmt.Errorf("%s returned status code %d", url, resp.StatusCode) - } - defer resp.Body.Close() - - return jsonschema.UnmarshalJSON(resp.Body) -} - -func NewHTTPURLLoader(insecure bool) *HTTPURLLoader { - httpLoader := HTTPURLLoader(http.Client{ - Timeout: 15 * time.Second, - }) - if insecure { - httpLoader.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - } - return &httpLoader -} - -func NewCompilerLoader() jsonschema.SchemeURLLoader { - return jsonschema.SchemeURLLoader{ - "file": jsonschema.FileLoader{}, - "http": NewHTTPURLLoader(false), - "https": NewHTTPURLLoader(false), - } -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/helpers/version.go b/vendor/github.com/pb33f/libopenapi-validator/helpers/version.go deleted file mode 100644 index d59e50992d7..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/helpers/version.go +++ /dev/null @@ -1,15 +0,0 @@ -package helpers - -import ( - "strings" -) - -// VersionToFloat converts a version string to a float32 for easier comparison. -func VersionToFloat(version string) float32 { - switch { - case strings.HasPrefix(version, "3.0"): - return 3.0 - default: - return 3.1 - } -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/libopenapi-logo.png b/vendor/github.com/pb33f/libopenapi-validator/libopenapi-logo.png deleted file mode 100644 index 8336dc511c4..00000000000 Binary files a/vendor/github.com/pb33f/libopenapi-validator/libopenapi-logo.png and /dev/null differ diff --git a/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/coercion.go b/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/coercion.go deleted file mode 100644 index 3ba87b84137..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/coercion.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package openapi_vocabulary - -import ( - "regexp" - "strconv" - - "github.com/santhosh-tekuri/jsonschema/v6" -) - -// coercionExtension handles Jackson-style scalar coercion (string->boolean/number) -type coercionExtension struct { - schemaType any // string, []string, or nil - allowCoercion bool -} - -var ( - booleanRegex = regexp.MustCompile(`^(true|false)$`) - numberRegex = regexp.MustCompile(`^-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?$`) - integerRegex = regexp.MustCompile(`^-?(?:0|[1-9]\d*)$`) -) - -func (c *coercionExtension) Validate(ctx *jsonschema.ValidatorContext, v any) { - if !c.allowCoercion { - return // Coercion disabled - let normal validation handle it - } - - str, ok := v.(string) - if !ok { - return // Not a string - let normal validation handle it - } - - // check if we should coerce and validate the string format - if c.shouldCoerceToBoolean() { - if !c.isValidBooleanString(str) { - ctx.AddError(&CoercionError{ - SourceType: "string", - TargetType: "boolean", - Value: str, - Message: "string value cannot be coerced to boolean, must be 'true' or 'false'", - }) - } - return - } - - if c.shouldCoerceToNumber() { - if !c.isValidNumberString(str) { - ctx.AddError(&CoercionError{ - SourceType: "string", - TargetType: "number", - Value: str, - Message: "string value cannot be coerced to number, must be a valid numeric string", - }) - } - return - } - - if c.shouldCoerceToInteger() { - if !c.isValidIntegerString(str) { - ctx.AddError(&CoercionError{ - SourceType: "string", - TargetType: "integer", - Value: str, - Message: "string value cannot be coerced to integer, must be a valid integer string", - }) - } - return - } -} - -func (c *coercionExtension) shouldCoerceToBoolean() bool { - return c.hasType("boolean") -} - -func (c *coercionExtension) shouldCoerceToNumber() bool { - return c.hasType("number") -} - -func (c *coercionExtension) shouldCoerceToInteger() bool { - return c.hasType("integer") -} - -func (c *coercionExtension) hasType(targetType string) bool { - switch t := c.schemaType.(type) { - case string: - return t == targetType - case []any: - for _, item := range t { - if str, ok := item.(string); ok && str == targetType { - return true - } - } - } - return false -} - -func (c *coercionExtension) isValidBooleanString(s string) bool { - return booleanRegex.MatchString(s) -} - -func (c *coercionExtension) isValidNumberString(s string) bool { - if !numberRegex.MatchString(s) { - return false - } - // Additional validation using strconv - _, err := strconv.ParseFloat(s, 64) - return err == nil -} - -func (c *coercionExtension) isValidIntegerString(s string) bool { - if !integerRegex.MatchString(s) { - return false - } - // Additional validation using strconv - _, err := strconv.ParseInt(s, 10, 64) - return err == nil -} - -// CompileCoercion compiles the coercion extension if coercion is allowed and applicable -func CompileCoercion(ctx *jsonschema.CompilerContext, obj map[string]any, allowCoercion bool) (jsonschema.SchemaExt, error) { - if !allowCoercion { - return nil, nil // Coercion disabled - } - - // Get the type from the schema - schemaType, hasType := obj["type"] - if !hasType { - return nil, nil // No type specified - no coercion needed - } - - // Only apply coercion to scalar types - if !IsCoercibleType(schemaType) { - return nil, nil - } - - return &coercionExtension{ - schemaType: schemaType, - allowCoercion: true, - }, nil -} - -// IsCoercibleType checks if the schema type is one that supports coercion -func IsCoercibleType(schemaType any) bool { - switch t := schemaType.(type) { - case string: - return t == "boolean" || t == "number" || t == "integer" - case []any: - // for type arrays, check if any coercible type is present - for _, item := range t { - if str, ok := item.(string); ok { - if str == "boolean" || str == "number" || str == "integer" { - return true - } - } - } - } - return false -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/discriminator.go b/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/discriminator.go deleted file mode 100644 index b88ddbba300..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/discriminator.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package openapi_vocabulary - -import ( - "github.com/santhosh-tekuri/jsonschema/v6" -) - -// discriminatorExtension handles the OpenAPI discriminator keyword -type discriminatorExtension struct { - propertyName string - mapping map[string]string // value -> schema reference -} - -// Validate validates the discriminator property exists in the instance -func (d *discriminatorExtension) Validate(ctx *jsonschema.ValidatorContext, v any) { - obj, _ := v.(map[string]any) - - // check if discriminator property exists in the object - if d.propertyName != "" { - if _, exists := obj[d.propertyName]; !exists { - ctx.AddError(&DiscriminatorPropertyMissingError{ - PropertyName: d.propertyName, - }) - } - } -} - -// CompileDiscriminator compiles the OpenAPI discriminator keyword -func CompileDiscriminator(_ *jsonschema.CompilerContext, obj map[string]any, _ VersionType) (jsonschema.SchemaExt, error) { - v, exists := obj["discriminator"] - if !exists { - return nil, nil - } - - discriminator, ok := v.(map[string]any) - if !ok { - return nil, &OpenAPIKeywordError{ - Keyword: "discriminator", - Message: "discriminator must be an object", - } - } - - propertyNameValue, exists := discriminator["propertyName"] - if !exists { - return nil, &OpenAPIKeywordError{ - Keyword: "discriminator", - Message: "discriminator must have a propertyName field", - } - } - - propertyName, ok := propertyNameValue.(string) - if !ok { - return nil, &OpenAPIKeywordError{ - Keyword: "discriminator", - Message: "discriminator propertyName must be a string", - } - } - - var mapping map[string]string - if mappingValue, exists := discriminator["mapping"]; exists { - mappingObj, ok := mappingValue.(map[string]any) - if !ok { - return nil, &OpenAPIKeywordError{ - Keyword: "discriminator", - Message: "discriminator mapping must be an object", - } - } - - mapping = make(map[string]string) - for key, value := range mappingObj { - if strValue, ok := value.(string); ok { - mapping[key] = strValue - } else { - return nil, &OpenAPIKeywordError{ - Keyword: "discriminator", - Message: "discriminator mapping values must be strings", - } - } - } - } - - return &discriminatorExtension{ - propertyName: propertyName, - mapping: mapping, - }, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/errors.go b/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/errors.go deleted file mode 100644 index 521a5f4f8d4..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/errors.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package openapi_vocabulary - -import ( - "fmt" - - "golang.org/x/text/message" -) - -// OpenAPIKeywordError represents an error with an OpenAPI-specific keyword -type OpenAPIKeywordError struct { - Keyword string - Message string -} - -func (e *OpenAPIKeywordError) Error() string { - return fmt.Sprintf("OpenAPI keyword '%s': %s", e.Keyword, e.Message) -} - -// DiscriminatorPropertyMissingError represents an error when discriminator property is missing -type DiscriminatorPropertyMissingError struct { - PropertyName string -} - -func (e *DiscriminatorPropertyMissingError) KeywordPath() []string { - return []string{"discriminator"} -} - -func (e *DiscriminatorPropertyMissingError) LocalizedString(printer *message.Printer) string { - return fmt.Sprintf("discriminator property '%s' is missing", e.PropertyName) -} - -func (e *DiscriminatorPropertyMissingError) Error() string { - return fmt.Sprintf("discriminator property '%s' is missing", e.PropertyName) -} - -// CoercionError represents an error during scalar type coercion -type CoercionError struct { - SourceType string - TargetType string - Value string - Message string -} - -func (e *CoercionError) KeywordPath() []string { - return []string{"type"} -} - -func (e *CoercionError) LocalizedString(printer *message.Printer) string { - return fmt.Sprintf("cannot coerce %s '%s' to %s: %s", e.SourceType, e.Value, e.TargetType, e.Message) -} - -func (e *CoercionError) Error() string { - return fmt.Sprintf("cannot coerce %s '%s' to %s: %s", e.SourceType, e.Value, e.TargetType, e.Message) -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/metadata.go b/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/metadata.go deleted file mode 100644 index 04bd0b2ddf5..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/metadata.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package openapi_vocabulary - -import ( - "github.com/santhosh-tekuri/jsonschema/v6" -) - -// exampleExtension handles the OpenAPI example keyword (metadata only) -type exampleExtension struct { - example any -} - -func (e *exampleExtension) Validate(ctx *jsonschema.ValidatorContext, v any) { - // Example keyword is metadata only - no validation needed during runtime -} - -// deprecatedExtension handles the OpenAPI deprecated keyword (metadata only) -type deprecatedExtension struct { - deprecated bool -} - -func (d *deprecatedExtension) Validate(ctx *jsonschema.ValidatorContext, v any) { - // Deprecated keyword is metadata only - no validation needed during runtime -} - -// compileExample compiles the example keyword -func CompileExample(ctx *jsonschema.CompilerContext, obj map[string]any, version VersionType) (jsonschema.SchemaExt, error) { - v, exists := obj["example"] - if !exists { - return nil, nil - } - - // Example can be any valid JSON value, so we just store it - // The main validation is that it exists and is parseable (which it is if we got here) - return &exampleExtension{example: v}, nil -} - -// compileDeprecated compiles the deprecated keyword -func CompileDeprecated(ctx *jsonschema.CompilerContext, obj map[string]any, version VersionType) (jsonschema.SchemaExt, error) { - v, exists := obj["deprecated"] - if !exists { - return nil, nil - } - - // Validate that deprecated is a boolean - deprecated, ok := v.(bool) - if !ok { - return nil, &OpenAPIKeywordError{ - Keyword: "deprecated", - Message: "deprecated must be a boolean value", - } - } - - return &deprecatedExtension{deprecated: deprecated}, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/nullable.go b/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/nullable.go deleted file mode 100644 index bb63c1f64ca..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/nullable.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package openapi_vocabulary - -import ( - "github.com/santhosh-tekuri/jsonschema/v6" -) - -// compileNullable compiles the nullable keyword based on OpenAPI version -func CompileNullable(_ *jsonschema.CompilerContext, obj map[string]any, version VersionType) (jsonschema.SchemaExt, error) { - v, exists := obj["nullable"] - if !exists { - return nil, nil - } - - // check if nullable is used in OpenAPI 3.1+ (not allowed) - if version == Version31 || version == Version32 { - return nil, &OpenAPIKeywordError{ - Keyword: "nullable", - Message: "The `nullable` keyword is not supported in OpenAPI 3.1+. Use `type: ['string', 'null']` instead", - } - } - - // validate that nullable is a boolean - _, ok := v.(bool) - if !ok { - return nil, &OpenAPIKeywordError{ - Keyword: "nullable", - Message: "nullable must be a boolean value", - } - } - return nil, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/vocabulary.go b/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/vocabulary.go deleted file mode 100644 index 452d82a7c16..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/openapi_vocabulary/vocabulary.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package openapi_vocabulary - -import ( - "github.com/santhosh-tekuri/jsonschema/v6" -) - -// OpenAPIVocabularyURL is the vocabulary URL for OpenAPI-specific keywords -const OpenAPIVocabularyURL = "https://pb33f.io/openapi-validator/vocabulary" - -// VersionType represents OpenAPI specification versions -type VersionType int - -const ( - // Version30 represents OpenAPI 3.0.x - Version30 VersionType = iota - Version31 - Version32 -) - -// NewOpenAPIVocabulary creates a vocabulary for OpenAPI-specific keywords -// version determines which keywords are allowed/forbidden -func NewOpenAPIVocabulary(version VersionType) *jsonschema.Vocabulary { - return NewOpenAPIVocabularyWithCoercion(version, false) -} - -// NewOpenAPIVocabularyWithCoercion creates a vocabulary with optional scalar coercion -func NewOpenAPIVocabularyWithCoercion(version VersionType, allowCoercion bool) *jsonschema.Vocabulary { - return &jsonschema.Vocabulary{ - URL: OpenAPIVocabularyURL, - Schema: nil, // We don't validate the vocabulary schema itself - Compile: func(ctx *jsonschema.CompilerContext, obj map[string]any) (jsonschema.SchemaExt, error) { - return compileOpenAPIKeywords(ctx, obj, version, allowCoercion) - }, - } -} - -// compileOpenAPIKeywords compiles all OpenAPI-specific keywords found in the schema object -func compileOpenAPIKeywords(ctx *jsonschema.CompilerContext, - obj map[string]any, - version VersionType, - allowCoercion bool, -) (jsonschema.SchemaExt, error) { - var extensions []jsonschema.SchemaExt - - if ext, err := CompileNullable(ctx, obj, version); err != nil { - return nil, err - } else if ext != nil { - extensions = append(extensions, ext) - } - - if ext, err := CompileDiscriminator(ctx, obj, version); err != nil { - return nil, err - } else if ext != nil { - extensions = append(extensions, ext) - } - - if ext, err := CompileExample(ctx, obj, version); err != nil { - return nil, err - } else if ext != nil { - extensions = append(extensions, ext) - } - - if ext, err := CompileDeprecated(ctx, obj, version); err != nil { - return nil, err - } else if ext != nil { - extensions = append(extensions, ext) - } - - if ext, err := CompileCoercion(ctx, obj, allowCoercion); err != nil { - return nil, err - } else if ext != nil { - extensions = append(extensions, ext) - } - - if len(extensions) == 0 { - return nil, nil - } - - return &combinedExtension{extensions: extensions}, nil -} - -// combinedExtension combines multiple OpenAPI extensions into one -type combinedExtension struct { - extensions []jsonschema.SchemaExt -} - -func (c *combinedExtension) Validate(ctx *jsonschema.ValidatorContext, v any) { - for _, ext := range c.extensions { - ext.Validate(ctx, v) - } -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/parameters/cookie_parameters.go b/vendor/github.com/pb33f/libopenapi-validator/parameters/cookie_parameters.go deleted file mode 100644 index e08e3cf9167..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/parameters/cookie_parameters.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package parameters - -import ( - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" - - "github.com/pb33f/libopenapi/datamodel/high/base" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi-validator/paths" - "github.com/pb33f/libopenapi-validator/strict" -) - -func (v *paramValidator) ValidateCookieParams(request *http.Request) (bool, []*errors.ValidationError) { - pathItem, errs, foundPath := paths.FindPath(request, v.document, v.options) - if len(errs) > 0 { - return false, errs - } - return v.ValidateCookieParamsWithPathItem(request, pathItem, foundPath) -} - -func (v *paramValidator) ValidateCookieParamsWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) { - if pathItem == nil { - return false, []*errors.ValidationError{{ - ValidationType: helpers.PathValidation, - ValidationSubType: helpers.ValidationMissing, - Message: fmt.Sprintf("%s Path '%s' not found", request.Method, request.URL.Path), - Reason: fmt.Sprintf("The %s request contains a path of '%s' "+ - "however that path, or the %s method for that path does not exist in the specification", - request.Method, request.URL.Path, request.Method), - SpecLine: -1, - SpecCol: -1, - HowToFix: errors.HowToFixPath, - }} - } - // extract params for the operation - params := helpers.ExtractParamsForOperation(request, pathItem) - var validationErrors []*errors.ValidationError - operation := strings.ToLower(request.Method) - - // build a map of cookies from the request for efficient lookup - cookieMap := make(map[string]*http.Cookie) - for _, cookie := range request.Cookies() { - cookieMap[cookie.Name] = cookie - } - - for _, p := range params { - if p.In == helpers.Cookie { - // look up the cookie by name (cookies are case-sensitive) - cookie, found := cookieMap[p.Name] - if !found { - // cookie not present in request - check if required - if p.Required != nil && *p.Required { - validationErrors = append(validationErrors, errors.CookieParameterMissing(p, pathValue, operation, "")) - } - continue - } - - var sch *base.Schema - if p.Schema != nil { - sch = p.Schema.Schema() - } - - // Render schema once for ReferenceSchema field in errors - var renderedSchema string - if sch != nil { - rendered, _ := sch.RenderInline() - schemaBytes, _ := json.Marshal(rendered) - renderedSchema = string(schemaBytes) - } - - pType := sch.Type - - for _, ty := range pType { - switch ty { - case helpers.Integer: - if _, err := strconv.ParseInt(cookie.Value, 10, 64); err != nil { - validationErrors = append(validationErrors, - errors.InvalidCookieParamInteger(p, strings.ToLower(cookie.Value), sch, pathValue, operation, renderedSchema)) - break - } - // validate value matches allowed enum values - if sch.Enum != nil { - matchFound := false - for _, enumVal := range sch.Enum { - if strings.TrimSpace(cookie.Value) == fmt.Sprint(enumVal.Value) { - matchFound = true - break - } - } - if !matchFound { - validationErrors = append(validationErrors, - errors.IncorrectCookieParamEnum(p, strings.ToLower(cookie.Value), sch, pathValue, operation, renderedSchema)) - } - } - case helpers.Number: - if _, err := strconv.ParseFloat(cookie.Value, 64); err != nil { - validationErrors = append(validationErrors, - errors.InvalidCookieParamNumber(p, strings.ToLower(cookie.Value), sch, pathValue, operation, renderedSchema)) - break - } - // validate value matches allowed enum values - if sch.Enum != nil { - matchFound := false - for _, enumVal := range sch.Enum { - if strings.TrimSpace(cookie.Value) == fmt.Sprint(enumVal.Value) { - matchFound = true - break - } - } - if !matchFound { - validationErrors = append(validationErrors, - errors.IncorrectCookieParamEnum(p, strings.ToLower(cookie.Value), sch, pathValue, operation, renderedSchema)) - } - } - case helpers.Boolean: - if _, err := strconv.ParseBool(cookie.Value); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectCookieParamBool(p, strings.ToLower(cookie.Value), sch, pathValue, operation, renderedSchema)) - } - case helpers.Object: - if !p.IsExploded() { - encodedObj := helpers.ConstructMapFromCSV(cookie.Value) - - // if a schema was extracted - if sch != nil { - validationErrors = append(validationErrors, - ValidateParameterSchema(sch, encodedObj, "", - "Cookie parameter", - "The cookie parameter", - p.Name, - helpers.ParameterValidation, - helpers.ParameterValidationQuery, - v.options)...) - } - } - case helpers.Array: - - if !p.IsExploded() { - // well we're already in an array, so we need to check the items schema - // to ensure this array items matches the type - // only check if items is a schema, not a boolean - if sch.Items.IsA() { - validationErrors = append(validationErrors, - ValidateCookieArray(sch, p, cookie.Value, pathValue, operation, renderedSchema)...) - } - } - - case helpers.String: - - // check if the schema has an enum, and if so, match the value against one of - // the defined enum values. - if sch.Enum != nil { - matchFound := false - for _, enumVal := range sch.Enum { - if strings.TrimSpace(cookie.Value) == fmt.Sprint(enumVal.Value) { - matchFound = true - break - } - } - if !matchFound { - validationErrors = append(validationErrors, - errors.IncorrectCookieParamEnum(p, strings.ToLower(cookie.Value), sch, pathValue, operation, renderedSchema)) - break - } - } - validationErrors = append(validationErrors, - ValidateSingleParameterSchema( - sch, - cookie.Value, - "Cookie parameter", - "The cookie parameter", - p.Name, - helpers.ParameterValidation, - helpers.ParameterValidationCookie, - v.options, - pathValue, - operation, - )...) - } - } - } - } - - errors.PopulateValidationErrors(validationErrors, request, pathValue) - - if len(validationErrors) > 0 { - return false, validationErrors - } - - // strict mode: check for undeclared cookies - if v.options.StrictMode { - undeclaredCookies := strict.ValidateCookies(request, params, v.options) - for _, undeclared := range undeclaredCookies { - validationErrors = append(validationErrors, - errors.UndeclaredCookieError( - undeclared.Path, - undeclared.Name, - undeclared.Value, - undeclared.DeclaredProperties, - request.URL.Path, - request.Method, - )) - } - } - - if len(validationErrors) > 0 { - return false, validationErrors - } - return true, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/parameters/header_parameters.go b/vendor/github.com/pb33f/libopenapi-validator/parameters/header_parameters.go deleted file mode 100644 index a13c660a70e..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/parameters/header_parameters.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package parameters - -import ( - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" - - "github.com/pb33f/libopenapi/datamodel/high/base" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - lowbase "github.com/pb33f/libopenapi/datamodel/low/base" - - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi-validator/paths" - "github.com/pb33f/libopenapi-validator/strict" -) - -func (v *paramValidator) ValidateHeaderParams(request *http.Request) (bool, []*errors.ValidationError) { - pathItem, errs, foundPath := paths.FindPath(request, v.document, v.options) - if len(errs) > 0 { - return false, errs - } - return v.ValidateHeaderParamsWithPathItem(request, pathItem, foundPath) -} - -func (v *paramValidator) ValidateHeaderParamsWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) { - if pathItem == nil { - return false, []*errors.ValidationError{{ - ValidationType: helpers.PathValidation, - ValidationSubType: helpers.ValidationMissing, - Message: fmt.Sprintf("%s Path '%s' not found", request.Method, request.URL.Path), - Reason: fmt.Sprintf("The %s request contains a path of '%s' "+ - "however that path, or the %s method for that path does not exist in the specification", - request.Method, request.URL.Path, request.Method), - SpecLine: -1, - SpecCol: -1, - HowToFix: errors.HowToFixPath, - }} - } - // extract params for the operation - params := helpers.ExtractParamsForOperation(request, pathItem) - - var validationErrors []*errors.ValidationError - seenHeaders := make(map[string]bool) - operation := strings.ToLower(request.Method) - for _, p := range params { - if p.In == helpers.Header { - - seenHeaders[strings.ToLower(p.Name)] = true - if param := request.Header.Get(p.Name); param != "" { - - var sch *base.Schema - if p.Schema != nil { - sch = p.Schema.Schema() - } - - // Render schema once for ReferenceSchema field in errors - var renderedSchema string - if sch != nil { - rendered, _ := sch.RenderInline() - schemaBytes, _ := json.Marshal(rendered) - renderedSchema = string(schemaBytes) - } - - pType := sch.Type - - for _, ty := range pType { - switch ty { - case helpers.Integer: - if _, err := strconv.ParseInt(param, 10, 64); err != nil { - validationErrors = append(validationErrors, - errors.InvalidHeaderParamInteger(p, strings.ToLower(param), sch, pathValue, operation, renderedSchema)) - break - } - // check if the param is within the enum - if sch.Enum != nil { - matchFound := false - for _, enumVal := range sch.Enum { - if strings.TrimSpace(param) == fmt.Sprint(enumVal.Value) { - matchFound = true - break - } - } - if !matchFound { - validationErrors = append(validationErrors, - errors.IncorrectHeaderParamEnum(p, strings.ToLower(param), sch, pathValue, operation, renderedSchema)) - } - } - - case helpers.Number: - if _, err := strconv.ParseFloat(param, 64); err != nil { - validationErrors = append(validationErrors, - errors.InvalidHeaderParamNumber(p, strings.ToLower(param), sch, pathValue, operation, renderedSchema)) - break - } - // check if the param is within the enum - if sch.Enum != nil { - matchFound := false - for _, enumVal := range sch.Enum { - if strings.TrimSpace(param) == fmt.Sprint(enumVal.Value) { - matchFound = true - break - } - } - if !matchFound { - validationErrors = append(validationErrors, - errors.IncorrectHeaderParamEnum(p, strings.ToLower(param), sch, pathValue, operation, renderedSchema)) - } - } - - case helpers.Boolean: - if _, err := strconv.ParseBool(param); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectHeaderParamBool(p, strings.ToLower(param), sch, pathValue, operation, renderedSchema)) - } - - case helpers.Object: - - // check if the header is default encoded or not - var encodedObj map[string]interface{} - // we have found our header, check the explode type. - if p.IsDefaultHeaderEncoding() { - encodedObj = helpers.ConstructMapFromCSV(param) - } else { - if p.IsExploded() { // only option is to be exploded for KV extraction. - encodedObj = helpers.ConstructKVFromCSV(param) - } - } - - if len(encodedObj) == 0 { - validationErrors = append(validationErrors, - errors.HeaderParameterCannotBeDecoded(p, strings.ToLower(param), pathValue, operation, renderedSchema)) - break - } - - // if a schema was extracted - if sch != nil { - validationErrors = append(validationErrors, - ValidateParameterSchema(sch, - encodedObj, - "", - "Header parameter", - "The header parameter", - p.Name, - helpers.ParameterValidation, - helpers.ParameterValidationQuery, v.options)...) - } - - case helpers.Array: - if !p.IsExploded() { // only unexploded arrays are supported for cookie params - if sch.Items.IsA() { - validationErrors = append(validationErrors, - ValidateHeaderArray(sch, p, param, pathValue, operation, renderedSchema)...) - } - } - - case helpers.String: - - // check if the schema has an enum, and if so, match the value against one of - // the defined enum values. - if sch.Enum != nil { - matchFound := false - for _, enumVal := range sch.Enum { - if strings.TrimSpace(param) == fmt.Sprint(enumVal.Value) { - matchFound = true - break - } - } - if !matchFound { - validationErrors = append(validationErrors, - errors.IncorrectHeaderParamEnum(p, strings.ToLower(param), sch, pathValue, operation, renderedSchema)) - break - } - } - validationErrors = append(validationErrors, - ValidateSingleParameterSchema( - sch, - param, - "Header parameter", - "The header parameter", - p.Name, - helpers.ParameterValidation, - helpers.ParameterValidationHeader, - v.options, - pathValue, - operation, - )...) - } - } - if len(pType) == 0 { - // validate schema as there is no type information. - validationErrors = append(validationErrors, ValidateSingleParameterSchema(sch, - param, - p.Name, - lowbase.SchemaLabel, p.Name, helpers.ParameterValidation, helpers.ParameterValidationHeader, v.options, - pathValue, - operation)...) - } - } else { - if p.Required != nil && *p.Required { - // Render schema for missing required parameter - var renderedSchema string - if p.Schema != nil { - sch := p.Schema.Schema() - if sch != nil { - rendered, _ := sch.RenderInline() - schemaBytes, _ := json.Marshal(rendered) - renderedSchema = string(schemaBytes) - } - } - validationErrors = append(validationErrors, errors.HeaderParameterMissing(p, pathValue, operation, renderedSchema)) - } - } - } - } - - errors.PopulateValidationErrors(validationErrors, request, pathValue) - - if len(validationErrors) > 0 { - return false, validationErrors - } - - // strict mode: check for undeclared headers - if v.options.StrictMode { - // Extract security headers applicable to this operation - var securityHeaders []string - if v.document.Components != nil && v.document.Components.SecuritySchemes != nil { - security := helpers.ExtractSecurityForOperation(request, pathItem) - // Convert orderedmap to regular map for the helper - schemesMap := make(map[string]*v3.SecurityScheme) - for pair := v.document.Components.SecuritySchemes.First(); pair != nil; pair = pair.Next() { - schemesMap[pair.Key()] = pair.Value() - } - securityHeaders = helpers.ExtractSecurityHeaderNames(security, schemesMap) - } - - undeclaredHeaders := strict.ValidateRequestHeaders(request.Header, params, securityHeaders, v.options) - for _, undeclared := range undeclaredHeaders { - validationErrors = append(validationErrors, - errors.UndeclaredHeaderError( - undeclared.Name, - undeclared.Value.(string), - undeclared.DeclaredProperties, - undeclared.Direction.String(), - request.URL.Path, - request.Method, - )) - } - } - - if len(validationErrors) > 0 { - return false, validationErrors - } - return true, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/parameters/package.go b/vendor/github.com/pb33f/libopenapi-validator/parameters/package.go deleted file mode 100644 index b339aa77546..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/parameters/package.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package parameters contains all the logic, models and interfaces for validating OpenAPI 3+ Parameters. -// Cookie, Header, Path and Query parameters are all validated. -package parameters diff --git a/vendor/github.com/pb33f/libopenapi-validator/parameters/parameters.go b/vendor/github.com/pb33f/libopenapi-validator/parameters/parameters.go deleted file mode 100644 index 2fa168a149e..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/parameters/parameters.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package parameters - -import ( - "net/http" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/errors" -) - -// ParameterValidator is an interface that defines the methods for validating parameters -// There are 4 types of parameters: query, header, cookie and path. -// -// ValidateQueryParams will validate the query parameters for the request -// ValidateHeaderParams will validate the header parameters for the request -// ValidateCookieParamsWithPathItem will validate the cookie parameters for the request -// ValidatePathParams will validate the path parameters for the request -// -// Each method accepts an *http.Request and returns true if validation passed, -// false if validation failed and a slice of ValidationError pointers. -type ParameterValidator interface { - // ValidateQueryParams accepts an *http.Request and validates the query parameters against the OpenAPI specification. - // The method will locate the correct path, and operation, based on the verb. The parameters for the operation - // will be matched and validated against what has been supplied in the http.Request query string. - ValidateQueryParams(request *http.Request) (bool, []*errors.ValidationError) - - // ValidateQueryParamsWithPathItem accepts an *http.Request and validates the query parameters against the OpenAPI specification. - // The method will locate the correct path, and operation, based on the verb. The parameters for the operation - // will be matched and validated against what has been supplied in the http.Request query string. - ValidateQueryParamsWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) - - // ValidateHeaderParams validates the header parameters contained within *http.Request. It returns a boolean - // stating true if validation passed (false for failed), and a slice of errors if validation failed. - ValidateHeaderParams(request *http.Request) (bool, []*errors.ValidationError) - - // ValidateHeaderParamsWithPathItem validates the header parameters contained within *http.Request. It returns a boolean - // stating true if validation passed (false for failed), and a slice of errors if validation failed. - ValidateHeaderParamsWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) - - // ValidateCookieParams validates the cookie parameters contained within *http.Request. - // It returns a boolean stating true if validation passed (false for failed), and a slice of errors if validation failed. - ValidateCookieParams(request *http.Request) (bool, []*errors.ValidationError) - - // ValidateCookieParamsWithPathItem validates the cookie parameters contained within *http.Request. - // It returns a boolean stating true if validation passed (false for failed), and a slice of errors if validation failed. - ValidateCookieParamsWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) - - // ValidatePathParams validates the path parameters contained within *http.Request. It returns a boolean stating true - // if validation passed (false for failed), and a slice of errors if validation failed. - ValidatePathParams(request *http.Request) (bool, []*errors.ValidationError) - - // ValidatePathParamsWithPathItem validates the path parameters contained within *http.Request. It returns a boolean stating true - // if validation passed (false for failed), and a slice of errors if validation failed. - ValidatePathParamsWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) - - // ValidateSecurity validates the security requirements for the operation. It returns a boolean stating true - // if validation passed (false for failed), and a slice of errors if validation failed. - ValidateSecurity(request *http.Request) (bool, []*errors.ValidationError) - - // ValidateSecurityWithPathItem validates the security requirements for the operation. It returns a boolean stating true - // if validation passed (false for failed), and a slice of errors if validation failed. - ValidateSecurityWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) -} - -// NewParameterValidator will create a new ParameterValidator from an OpenAPI 3+ document -func NewParameterValidator(document *v3.Document, opts ...config.Option) ParameterValidator { - options := config.NewValidationOptions(opts...) - - return ¶mValidator{options: options, document: document} -} - -type paramValidator struct { - options *config.ValidationOptions - document *v3.Document -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/parameters/path_parameters.go b/vendor/github.com/pb33f/libopenapi-validator/parameters/path_parameters.go deleted file mode 100644 index d18bced1f71..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/parameters/path_parameters.go +++ /dev/null @@ -1,440 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package parameters - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" - - "github.com/pb33f/libopenapi/datamodel/high/base" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi-validator/paths" -) - -func (v *paramValidator) ValidatePathParams(request *http.Request) (bool, []*errors.ValidationError) { - pathItem, errs, foundPath := paths.FindPath(request, v.document, v.options) - if len(errs) > 0 { - return false, errs - } - return v.ValidatePathParamsWithPathItem(request, pathItem, foundPath) -} - -func (v *paramValidator) ValidatePathParamsWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) { - if pathItem == nil { - return false, []*errors.ValidationError{{ - ValidationType: helpers.PathValidation, - ValidationSubType: helpers.ValidationMissing, - Message: fmt.Sprintf("%s Path '%s' not found", request.Method, request.URL.Path), - Reason: fmt.Sprintf("The %s request contains a path of '%s' "+ - "however that path, or the %s method for that path does not exist in the specification", - request.Method, request.URL.Path, request.Method), - SpecLine: -1, - SpecCol: -1, - HowToFix: errors.HowToFixPath, - }} - } - // split the path into segments - submittedSegments := strings.Split(paths.StripRequestPath(request, v.document), helpers.Slash) - pathSegments := strings.Split(pathValue, helpers.Slash) - - // get the operation method for error reporting - operation := strings.ToLower(request.Method) - - // extract params for the operation - params := helpers.ExtractParamsForOperation(request, pathItem) - var validationErrors []*errors.ValidationError - for _, p := range params { - if p.In == helpers.Path { - // var paramTemplate string - for x := range pathSegments { - if pathSegments[x] == "" { // skip empty segments - continue - } - - var rgx *regexp.Regexp - - if v.options.RegexCache != nil { - if cachedRegex, found := v.options.RegexCache.Load(pathSegments[x]); found { - rgx = cachedRegex.(*regexp.Regexp) - } - } - - if rgx == nil { - - r, err := helpers.GetRegexForPath(pathSegments[x]) - if err != nil { - continue - } - - rgx = r - - if v.options.RegexCache != nil { - v.options.RegexCache.Store(pathSegments[x], r) - } - } - - matches := rgx.FindStringSubmatch(submittedSegments[x]) - matches = matches[1:] - - // Check if it is well-formed. - idxs, errBraces := helpers.BraceIndices(pathSegments[x]) - if errBraces != nil { - continue - } - - idx := 0 - - for _, match := range matches { - - isMatrix := false - isLabel := false - // isExplode := false - isSimple := true - paramTemplate := pathSegments[x][idxs[idx]+1 : idxs[idx+1]-1] - idx += 2 // move to the next brace pair - paramName := paramTemplate - - // check for an asterisk on the end of the parameter (explode) - if strings.HasSuffix(paramTemplate, helpers.Asterisk) { - // isExplode = true - paramName = paramTemplate[:len(paramTemplate)-1] - } - if strings.HasPrefix(paramTemplate, helpers.Period) { - isLabel = true - isSimple = false - paramName = paramName[1:] - } - if strings.HasPrefix(paramTemplate, helpers.SemiColon) { - isMatrix = true - isSimple = false - paramName = paramName[1:] - } - - // does this param name match the current path segment param name - if paramName != p.Name { - continue - } - - paramValue := match - - // URL decode the parameter value before validation - decodedParamValue, _ := url.PathUnescape(paramValue) - - if decodedParamValue == "" { - // Mandatory path parameter cannot be empty - if p.Required != nil && *p.Required { - validationErrors = append(validationErrors, errors.PathParameterMissing(p, pathValue, request.URL.Path)) - break - } - continue - } - - // extract the schema from the parameter - sch := p.Schema.Schema() - - // Render schema once for ReferenceSchema field in errors - var renderedSchema string - if sch != nil { - rendered, _ := sch.RenderInline() - schemaBytes, _ := json.Marshal(rendered) - renderedSchema = string(schemaBytes) - } - - // check enum (if present) - enumCheck := func(decodedValue string) { - matchFound := false - for _, enumVal := range sch.Enum { - if strings.TrimSpace(decodedValue) == fmt.Sprint(enumVal.Value) { - matchFound = true - break - } - } - if !matchFound { - validationErrors = append(validationErrors, - errors.IncorrectPathParamEnum(p, strings.ToLower(decodedValue), sch, pathValue, renderedSchema)) - } - } - - // for each type, check the value. - if sch != nil && sch.Type != nil { - for typ := range sch.Type { - switch sch.Type[typ] { - case helpers.String: - - // TODO: label and matrix style validation - - // check if the param is within the enum - if sch.Enum != nil { - enumCheck(decodedParamValue) - break - } - validationErrors = append(validationErrors, - ValidateSingleParameterSchema( - sch, - decodedParamValue, - "Path parameter", - "The path parameter", - p.Name, - helpers.ParameterValidation, - helpers.ParameterValidationPath, - v.options, - pathValue, - operation, - )...) - - case helpers.Integer: - // simple use case is already handled in find param. - rawParamValue, paramValueParsed, err := v.resolveInteger(sch, p, isLabel, isMatrix, decodedParamValue, pathValue, renderedSchema) - if err != nil { - validationErrors = append(validationErrors, err...) - break - } - // check if the param is within the enum - if sch.Enum != nil { - enumCheck(rawParamValue) - break - } - validationErrors = append(validationErrors, ValidateSingleParameterSchema( - sch, - paramValueParsed, - "Path parameter", - "The path parameter", - p.Name, - helpers.ParameterValidation, - helpers.ParameterValidationPath, - v.options, - pathValue, - operation, - )...) - - case helpers.Number: - // simple use case is already handled in find param. - rawParamValue, paramValueParsed, err := v.resolveNumber(sch, p, isLabel, isMatrix, decodedParamValue, pathValue, renderedSchema) - if err != nil { - validationErrors = append(validationErrors, err...) - break - } - // check if the param is within the enum - if sch.Enum != nil { - enumCheck(rawParamValue) - break - } - validationErrors = append(validationErrors, ValidateSingleParameterSchema( - sch, - paramValueParsed, - "Path parameter", - "The path parameter", - p.Name, - helpers.ParameterValidation, - helpers.ParameterValidationPath, - v.options, - pathValue, - operation, - )...) - - case helpers.Boolean: - if isLabel && p.Style == helpers.LabelStyle { - if _, err := strconv.ParseBool(decodedParamValue[1:]); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectPathParamBool(p, decodedParamValue[1:], sch, pathValue, renderedSchema)) - } - } - if isSimple { - if _, err := strconv.ParseBool(decodedParamValue); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectPathParamBool(p, decodedParamValue, sch, pathValue, renderedSchema)) - } - } - if isMatrix && p.Style == helpers.MatrixStyle { - // strip off the colon and the parameter name - decodedForMatrix := strings.Replace(decodedParamValue[1:], fmt.Sprintf("%s=", p.Name), "", 1) - if _, err := strconv.ParseBool(decodedForMatrix); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectPathParamBool(p, decodedForMatrix, sch, pathValue, renderedSchema)) - } - } - case helpers.Object: - var encodedObject interface{} - - if p.IsDefaultPathEncoding() { - encodedObject = helpers.ConstructMapFromCSV(decodedParamValue) - } else { - switch p.Style { - case helpers.LabelStyle: - if !p.IsExploded() { - encodedObject = helpers.ConstructMapFromCSV(decodedParamValue[1:]) - } else { - encodedObject = helpers.ConstructKVFromLabelEncoding(decodedParamValue) - } - case helpers.MatrixStyle: - if !p.IsExploded() { - decodedForMatrix := strings.Replace(decodedParamValue[1:], fmt.Sprintf("%s=", p.Name), "", 1) - encodedObject = helpers.ConstructMapFromCSV(decodedForMatrix) - } else { - decodedForMatrix := strings.Replace(decodedParamValue[1:], fmt.Sprintf("%s=", p.Name), "", 1) - encodedObject = helpers.ConstructKVFromMatrixCSV(decodedForMatrix) - } - default: - if p.IsExploded() { - encodedObject = helpers.ConstructKVFromCSV(decodedParamValue) - } - } - } - // if a schema was extracted - if sch != nil { - validationErrors = append(validationErrors, - ValidateParameterSchema(sch, - encodedObject, - "", - "Path parameter", - "The path parameter", - p.Name, - helpers.ParameterValidation, - helpers.ParameterValidationPath, v.options)...) - } - - case helpers.Array: - - // extract the items schema in order to validate the array items. - if sch.Items != nil && sch.Items.IsA() { - iSch := sch.Items.A.Schema() - - // Render items schema once for ReferenceSchema field in array errors - var renderedItemsSchema string - if iSch != nil { - rendered, _ := iSch.RenderInline() - schemaBytes, _ := json.Marshal(rendered) - renderedItemsSchema = string(schemaBytes) - } - - for n := range iSch.Type { - // determine how to explode the array - var arrayValues []string - if isSimple { - arrayValues = strings.Split(decodedParamValue, helpers.Comma) - } - if isLabel { - if !p.IsExploded() { - arrayValues = strings.Split(decodedParamValue[1:], helpers.Comma) - } else { - arrayValues = strings.Split(decodedParamValue[1:], helpers.Period) - } - } - if isMatrix { - if !p.IsExploded() { - decodedForMatrix := strings.Replace(decodedParamValue[1:], fmt.Sprintf("%s=", p.Name), "", 1) - arrayValues = strings.Split(decodedForMatrix, helpers.Comma) - } else { - decodedForMatrix := strings.ReplaceAll(decodedParamValue[1:], fmt.Sprintf("%s=", p.Name), "") - arrayValues = strings.Split(decodedForMatrix, helpers.SemiColon) - } - } - switch iSch.Type[n] { - case helpers.Integer: - for pv := range arrayValues { - if _, err := strconv.ParseInt(arrayValues[pv], 10, 64); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectPathParamArrayInteger(p, arrayValues[pv], sch, iSch, pathValue, renderedItemsSchema)) - } - } - case helpers.Number: - for pv := range arrayValues { - if _, err := strconv.ParseFloat(arrayValues[pv], 64); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectPathParamArrayNumber(p, arrayValues[pv], sch, iSch, pathValue, renderedItemsSchema)) - } - } - case helpers.Boolean: - for pv := range arrayValues { - bc := len(validationErrors) - if _, err := strconv.ParseBool(arrayValues[pv]); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectPathParamArrayBoolean(p, arrayValues[pv], sch, iSch, pathValue, renderedItemsSchema)) - continue - } - if len(validationErrors) == bc { - // ParseBool will parse 0 or 1 as false/true to we - // need to catch this edge case. - if arrayValues[pv] == "0" || arrayValues[pv] == "1" { - validationErrors = append(validationErrors, - errors.IncorrectPathParamArrayBoolean(p, arrayValues[pv], sch, iSch, pathValue, renderedItemsSchema)) - continue - } - } - } - } - } - } - } - } - } - } - } - } - } - - errors.PopulateValidationErrors(validationErrors, request, pathValue) - - if len(validationErrors) > 0 { - return false, validationErrors - } - return true, nil -} - -func (v *paramValidator) resolveNumber(sch *base.Schema, p *v3.Parameter, isLabel bool, isMatrix bool, paramValue string, pathValue string, renderedSchema string) (string, float64, []*errors.ValidationError) { - if isLabel && p.Style == helpers.LabelStyle { - paramValueParsed, err := strconv.ParseFloat(paramValue[1:], 64) - if err != nil { - return "", 0, []*errors.ValidationError{errors.IncorrectPathParamNumber(p, paramValue[1:], sch, pathValue, renderedSchema)} - } - return paramValue[1:], paramValueParsed, nil - } - if isMatrix && p.Style == helpers.MatrixStyle { - // strip off the colon and the parameter name - paramValue = strings.Replace(paramValue[1:], fmt.Sprintf("%s=", p.Name), "", 1) - paramValueParsed, err := strconv.ParseFloat(paramValue, 64) - if err != nil { - return "", 0, []*errors.ValidationError{errors.IncorrectPathParamNumber(p, paramValue[1:], sch, pathValue, renderedSchema)} - } - return paramValue, paramValueParsed, nil - } - paramValueParsed, err := strconv.ParseFloat(paramValue, 64) - if err != nil { - return "", 0, []*errors.ValidationError{errors.IncorrectPathParamNumber(p, paramValue, sch, pathValue, renderedSchema)} - } - return paramValue, paramValueParsed, nil -} - -func (v *paramValidator) resolveInteger(sch *base.Schema, p *v3.Parameter, isLabel bool, isMatrix bool, paramValue string, pathValue string, renderedSchema string) (string, int64, []*errors.ValidationError) { - if isLabel && p.Style == helpers.LabelStyle { - paramValueParsed, err := strconv.ParseInt(paramValue[1:], 10, 64) - if err != nil { - return "", 0, []*errors.ValidationError{errors.IncorrectPathParamInteger(p, paramValue[1:], sch, pathValue, renderedSchema)} - } - return paramValue[1:], paramValueParsed, nil - } - if isMatrix && p.Style == helpers.MatrixStyle { - // strip off the colon and the parameter name - paramValue = strings.Replace(paramValue[1:], fmt.Sprintf("%s=", p.Name), "", 1) - paramValueParsed, err := strconv.ParseInt(paramValue, 10, 64) - if err != nil { - return "", 0, []*errors.ValidationError{errors.IncorrectPathParamInteger(p, paramValue[1:], sch, pathValue, renderedSchema)} - } - return paramValue, paramValueParsed, nil - } - paramValueParsed, err := strconv.ParseInt(paramValue, 10, 64) - if err != nil { - return "", 0, []*errors.ValidationError{errors.IncorrectPathParamInteger(p, paramValue, sch, pathValue, renderedSchema)} - } - return paramValue, paramValueParsed, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/parameters/query_parameters.go b/vendor/github.com/pb33f/libopenapi-validator/parameters/query_parameters.go deleted file mode 100644 index 8dd9b1e92b0..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/parameters/query_parameters.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package parameters - -import ( - "encoding/json" - "fmt" - "net/http" - "regexp" - "strconv" - "strings" - - "github.com/pb33f/libopenapi/datamodel/high/base" - "github.com/pb33f/libopenapi/orderedmap" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi-validator/paths" - "github.com/pb33f/libopenapi-validator/strict" -) - -const rx = `[:\/\?#\[\]\@!\$&'\(\)\*\+,;=]` - -var rxRxp = regexp.MustCompile(rx) - -func (v *paramValidator) ValidateQueryParams(request *http.Request) (bool, []*errors.ValidationError) { - pathItem, errs, foundPath := paths.FindPath(request, v.document, v.options) - if len(errs) > 0 { - return false, errs - } - return v.ValidateQueryParamsWithPathItem(request, pathItem, foundPath) -} - -func (v *paramValidator) ValidateQueryParamsWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) { - if pathItem == nil { - return false, []*errors.ValidationError{{ - ValidationType: helpers.PathValidation, - ValidationSubType: helpers.ValidationMissing, - Message: fmt.Sprintf("%s Path '%s' not found", request.Method, request.URL.Path), - Reason: fmt.Sprintf("The %s request contains a path of '%s' "+ - "however that path, or the %s method for that path does not exist in the specification", - request.Method, request.URL.Path, request.Method), - SpecLine: -1, - SpecCol: -1, - HowToFix: errors.HowToFixPath, - }} - } - // extract params for the operation - params := helpers.ExtractParamsForOperation(request, pathItem) - queryParams := make(map[string][]*helpers.QueryParam) - var validationErrors []*errors.ValidationError - - // build a set of spec parameter names for exact matching - specParamNames := make(map[string]bool) - for _, p := range params { - if p.In == helpers.Query { - specParamNames[p.Name] = true - } - } - - for qKey, qVal := range request.URL.Query() { - // check if the query key exactly matches a spec parameter name (e.g., "match[]") - // if so, store it literally without deepObject stripping - if specParamNames[qKey] { - queryParams[qKey] = append(queryParams[qKey], &helpers.QueryParam{ - Key: qKey, - Values: qVal, - }) - } else if strings.IndexRune(qKey, '[') > 0 && strings.IndexRune(qKey, ']') > 0 { - // check if the param is encoded as a property / deepObject - stripped := qKey[:strings.IndexRune(qKey, '[')] - value := qKey[strings.IndexRune(qKey, '[')+1 : strings.IndexRune(qKey, ']')] - queryParams[stripped] = append(queryParams[stripped], &helpers.QueryParam{ - Key: stripped, - Values: qVal, - Property: value, - }) - } else { - queryParams[qKey] = append(queryParams[qKey], &helpers.QueryParam{ - Key: qKey, - Values: qVal, - }) - } - } - - // Get operation from request method (lowercase for JSON Pointer) - operation := strings.ToLower(request.Method) - - // look through the params for the query key -doneLooking: - for p := range params { - if params[p].In == helpers.Query { - - contentWrapped := false - var contentType string - // check if this param is found as a set of query strings - if jk, ok := queryParams[params[p].Name]; ok { - skipValues: - for _, fp := range jk { - // let's check styles first. - validationErrors = append(validationErrors, ValidateQueryParamStyle(params[p], jk)...) - - // there is a match, is the type correct - // this context is extracted from the 3.1 spec to explain what is going on here: - // For more complex scenarios, the content property can define the media type and schema of the - // parameter. A parameter MUST contain either a schema property, or a content property, but not both. - // The map MUST only contain one entry. (for content) - var sch *base.Schema - if params[p].Schema != nil { - sch = params[p].Schema.Schema() - } else { - // ok, no schema, check for a content type - for pair := orderedmap.First(params[p].Content); pair != nil; pair = pair.Next() { - sch = pair.Value().Schema.Schema() - contentWrapped = true - contentType = pair.Key() - break - } - } - - // Render schema once for ReferenceSchema field in errors - var renderedSchema string - if sch != nil { - rendered, _ := sch.RenderInline() - schemaBytes, _ := json.Marshal(rendered) - renderedSchema = string(schemaBytes) - } - - pType := sch.Type - - // for each param, check each type - for _, ef := range fp.Values { - - // check allowReserved values. If this is set to true, then we can allow the - // following characters - // :/?#[]@!$&'()*+,;= - // to be present as they are, without being URLEncoded. - if !params[p].AllowReserved { - if rxRxp.MatchString(ef) && params[p].IsExploded() { - validationErrors = append(validationErrors, - errors.IncorrectReservedValues(params[p], ef, sch, pathValue, operation, renderedSchema)) - } - } - for _, ty := range pType { - switch ty { - - case helpers.String: - validationErrors = append(validationErrors, v.validateSimpleParam(sch, ef, ef, params[p], pathValue, operation, renderedSchema)...) - case helpers.Integer: - efF, err := strconv.ParseInt(ef, 10, 64) - if err != nil { - validationErrors = append(validationErrors, - errors.InvalidQueryParamInteger(params[p], ef, sch, pathValue, operation, renderedSchema)) - break - } - validationErrors = append(validationErrors, v.validateSimpleParam(sch, ef, efF, params[p], pathValue, operation, renderedSchema)...) - case helpers.Number: - efF, err := strconv.ParseFloat(ef, 64) - if err != nil { - validationErrors = append(validationErrors, - errors.InvalidQueryParamNumber(params[p], ef, sch, pathValue, operation, renderedSchema)) - break - } - validationErrors = append(validationErrors, v.validateSimpleParam(sch, ef, efF, params[p], pathValue, operation, renderedSchema)...) - case helpers.Boolean: - if _, err := strconv.ParseBool(ef); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectQueryParamBool(params[p], ef, sch, pathValue, operation, renderedSchema)) - } - case helpers.Object: - - // check what style of encoding was used and then construct a map[string]interface{} - // and pass that in as encoded JSON. - var encodedObj map[string]interface{} - - switch params[p].Style { - case helpers.DeepObject: - encodedObj = helpers.ConstructParamMapFromDeepObjectEncoding(jk, sch) - case helpers.PipeDelimited: - encodedObj = helpers.ConstructParamMapFromPipeEncoding(jk) - case helpers.SpaceDelimited: - encodedObj = helpers.ConstructParamMapFromSpaceEncoding(jk) - default: - // form encoding is default. - if contentWrapped { - switch contentType { - case helpers.JSONContentType: - // we need to unmarshal the JSON into a map[string]interface{} - encodedParams := make(map[string]interface{}) - encodedObj = make(map[string]interface{}) - if err := json.Unmarshal([]byte(ef), &encodedParams); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectParamEncodingJSON(params[p], ef, sch, pathValue, operation, renderedSchema)) - break skipValues - } - encodedObj[params[p].Name] = encodedParams - } - } else { - encodedObj = helpers.ConstructParamMapFromFormEncodingArray(jk) - } - } - - numErrors := len(validationErrors) - validationErrors = append(validationErrors, - ValidateParameterSchema(sch, encodedObj[params[p].Name].(map[string]interface{}), - ef, - "Query parameter", - "The query parameter", - params[p].Name, - helpers.ParameterValidation, - helpers.ParameterValidationQuery, v.options)...) - if len(validationErrors) > numErrors { - // we've already added an error for this, so we can skip the rest of the values - break skipValues - } - - case helpers.Array: - // well we're already in an array, so we need to check the items schema - // to ensure this array items matches the type - // only check if items is a schema, not a boolean - if sch.Items != nil && sch.Items.IsA() { - validationErrors = append(validationErrors, - ValidateQueryArray(sch, params[p], ef, contentWrapped, v.options, pathValue, operation, renderedSchema)...) - } - } - } - } - } - } else { - // if the param is not in the requests, so let's check if this param is an - // object, and if we should use default encoding and explode values. - if params[p].Schema != nil { - sch := params[p].Schema.Schema() - - if len(sch.Type) > 0 && sch.Type[0] == helpers.Object && params[p].IsDefaultFormEncoding() { - // if the param is an object, and we're using default encoding, then we need to - // validate the schema. - decoded := helpers.ConstructParamMapFromQueryParamInput(queryParams) - validationErrors = append(validationErrors, - ValidateParameterSchema(sch, - decoded, - "", - "Query array parameter", - "The query parameter (which is an array)", - params[p].Name, - helpers.ParameterValidation, - helpers.ParameterValidationQuery, v.options)...) - break doneLooking - } - } - // if there is no match, check if the param is required or not. - if params[p].Required != nil && *params[p].Required { - // Render schema for missing parameter - var sch *base.Schema - if params[p].Schema != nil { - sch = params[p].Schema.Schema() - } else { - for pair := orderedmap.First(params[p].Content); pair != nil; pair = pair.Next() { - sch = pair.Value().Schema.Schema() - break - } - } - var renderedSchema string - if sch != nil { - rendered, _ := sch.RenderInline() - schemaBytes, _ := json.Marshal(rendered) - renderedSchema = string(schemaBytes) - } - validationErrors = append(validationErrors, errors.QueryParameterMissing(params[p], pathValue, operation, renderedSchema)) - } - } - } - } - - errors.PopulateValidationErrors(validationErrors, request, pathValue) - - if len(validationErrors) > 0 { - return false, validationErrors - } - - // strict mode: check for undeclared query parameters - if v.options.StrictMode { - undeclaredParams := strict.ValidateQueryParams(request, params, v.options) - for _, undeclared := range undeclaredParams { - validationErrors = append(validationErrors, - errors.UndeclaredQueryParamError( - undeclared.Path, - undeclared.Name, - undeclared.Value, - undeclared.DeclaredProperties, - request.URL.Path, - request.Method, - )) - } - } - - if len(validationErrors) > 0 { - return false, validationErrors - } - return true, nil -} - -func (v *paramValidator) validateSimpleParam(sch *base.Schema, rawParam string, parsedParam any, parameter *v3.Parameter, pathTemplate string, operation string, renderedSchema string) (validationErrors []*errors.ValidationError) { - // check if the param is within an enum - if sch.Enum != nil { - matchFound := false - for _, enumVal := range sch.Enum { - if strings.TrimSpace(rawParam) == fmt.Sprint(enumVal.Value) { - matchFound = true - break - } - } - if !matchFound { - return []*errors.ValidationError{errors.IncorrectQueryParamEnum(parameter, rawParam, sch, pathTemplate, operation, renderedSchema)} - } - } - - return ValidateSingleParameterSchema( - sch, - parsedParam, - "Query parameter", - "The query parameter", - parameter.Name, - helpers.ParameterValidation, - helpers.ParameterValidationQuery, - v.options, - pathTemplate, - operation, - ) -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/parameters/validate_parameter.go b/vendor/github.com/pb33f/libopenapi-validator/parameters/validate_parameter.go deleted file mode 100644 index 562a93c2ebd..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/parameters/validate_parameter.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package parameters - -import ( - "encoding/json" - "fmt" - "net/url" - "reflect" - "strings" - - "github.com/pb33f/libopenapi/datamodel/high/base" - "github.com/pb33f/libopenapi/utils" - "github.com/santhosh-tekuri/jsonschema/v6" - "golang.org/x/text/language" - "golang.org/x/text/message" - - stdError "errors" - - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" -) - -func ValidateSingleParameterSchema( - schema *base.Schema, - rawObject any, - entity string, - reasonEntity string, - name string, - validationType string, - subValType string, - o *config.ValidationOptions, - pathTemplate string, - operation string, -) (validationErrors []*errors.ValidationError) { - // Get the JSON Schema for the parameter definition. - jsonSchema, err := buildJsonRender(schema) - if err != nil { - return validationErrors - } - - // Attempt to compile the JSON Schema - jsch, err := helpers.NewCompiledSchema(name, jsonSchema, o) - if err != nil { - return validationErrors - } - - // Validate the object and report any errors. - scErrs := jsch.Validate(rawObject) - var werras *jsonschema.ValidationError - if stdError.As(scErrs, &werras) { - validationErrors = formatJsonSchemaValidationError(schema, werras, entity, reasonEntity, name, validationType, subValType, pathTemplate, operation) - } - return validationErrors -} - -// buildJsonRender build a JSON render of the schema. -func buildJsonRender(schema *base.Schema) ([]byte, error) { - if schema == nil { - // Sanity Check - return nil, stdError.New("buildJSONRender nil pointer") - } - - renderedSchema, err := schema.Render() - if err != nil { - return nil, err - } - - return utils.ConvertYAMLtoJSON(renderedSchema) -} - -// ValidateParameterSchema will validate a parameter against a raw object, or a blob of json/yaml. -// It will return a list of validation errors, if any. -// -// schema: the schema to validate against -// rawObject: the object to validate (leave empty if using a blob) -// rawBlob: the blob to validate (leave empty if using an object) -// entity: the entity being validated -// reasonEntity: the entity that caused the validation to be called -// name: the name of the parameter -// validationType: the type of validation being performed -// subValType: the type of sub-validation being performed -func ValidateParameterSchema( - schema *base.Schema, - rawObject any, - rawBlob, - entity, - reasonEntity, - name, - validationType, - subValType string, - validationOptions *config.ValidationOptions, -) []*errors.ValidationError { - var validationErrors []*errors.ValidationError - - // 1. build a JSON render of the schema. - renderCtx := base.NewInlineRenderContext() - renderedSchema, _ := schema.RenderInlineWithContext(renderCtx) - jsonSchema, _ := utils.ConvertYAMLtoJSON(renderedSchema) - - // 2. decode the object into a json blob. - var decodedObj interface{} - rawIsMap := false - validEncoding := false - if rawObject != nil { - // check what type of object it is - ot := reflect.TypeOf(rawObject) - var ok bool - switch ot.Kind() { - case reflect.Map: - if decodedObj, ok = rawObject.(map[string]interface{}); ok { - rawIsMap = true - validEncoding = true - } else { - rawIsMap = true - } - } - } else { - decodedString, _ := url.QueryUnescape(rawBlob) - err := json.Unmarshal([]byte(decodedString), &decodedObj) - if err != nil { - decodedObj = rawBlob - } - validEncoding = true - } - // 3. create a new json schema compiler and add the schema to it - jsch, err := helpers.NewCompiledSchema(name, jsonSchema, validationOptions) - if err != nil { - // schema compilation failed, return validation error instead of panicking - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: validationType, - ValidationSubType: subValType, - Message: fmt.Sprintf("%s '%s' failed schema compilation", entity, name), - Reason: fmt.Sprintf("%s '%s' schema compilation failed: %s", - reasonEntity, name, err.Error()), - SpecLine: 1, - SpecCol: 0, - ParameterName: name, - HowToFix: "check the parameter schema for invalid JSON Schema syntax, complex regex patterns, or unsupported schema constructs", - Context: string(jsonSchema), - }) - return validationErrors - } - - // 4. validate the object against the schema - var scErrs error - if validEncoding { - p := decodedObj - if rawIsMap { - if g, ko := rawObject.(map[string]interface{}); ko { - if len(g) == 0 || (g[""] != nil && g[""] == "") { - p = nil - } - } - } - if p != nil { - - // check if any of the items have an empty key - skip := false - if rawIsMap { - for k := range p.(map[string]interface{}) { - if k == "" { - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: validationType, - ValidationSubType: subValType, - Message: fmt.Sprintf("%s '%s' failed to validate", entity, name), - Reason: fmt.Sprintf("%s '%s' is defined as an object, "+ - "however it failed to pass a schema validation", reasonEntity, name), - SpecLine: schema.GoLow().Type.KeyNode.Line, - SpecCol: schema.GoLow().Type.KeyNode.Column, - SchemaValidationErrors: nil, - HowToFix: errors.HowToFixInvalidSchema, - }) - skip = true - break - } - } - } - if !skip { - scErrs = jsch.Validate(p) - } - } - } - var werras *jsonschema.ValidationError - if stdError.As(scErrs, &werras) { - validationErrors = formatJsonSchemaValidationError(schema, werras, entity, reasonEntity, name, validationType, subValType, "", "") - } - - // if there are no validationErrors, check that the supplied value is even JSON - if len(validationErrors) == 0 { - if rawIsMap { - if !validEncoding { - // add the error to the list - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: validationType, - ValidationSubType: subValType, - Message: fmt.Sprintf("%s '%s' cannot be decoded", entity, name), - Reason: fmt.Sprintf("%s '%s' is defined as an object, "+ - "however it failed to be decoded as an object", reasonEntity, name), - SpecLine: schema.GoLow().RootNode.Line, - SpecCol: schema.GoLow().RootNode.Column, - HowToFix: errors.HowToFixDecodingError, - }) - } - } - } - return validationErrors -} - -func formatJsonSchemaValidationError(schema *base.Schema, scErrs *jsonschema.ValidationError, entity string, reasonEntity string, name string, validationType string, subValType string, pathTemplate string, operation string) (validationErrors []*errors.ValidationError) { - // flatten the validationErrors - schFlatErrs := scErrs.BasicOutput().Errors - var schemaValidationErrors []*errors.SchemaValidationFailure - for q := range schFlatErrs { - er := schFlatErrs[q] - - errMsg := er.Error.Kind.LocalizedString(message.NewPrinter(language.Tag{})) - if er.KeywordLocation == "" || helpers.IgnoreRegex.MatchString(errMsg) { - continue // ignore this error, it's not useful - } - - // Construct full OpenAPI path for KeywordLocation if pathTemplate and operation are provided - keywordLocation := er.KeywordLocation - if pathTemplate != "" && operation != "" && validationType == helpers.ParameterValidation { - // er.KeywordLocation is relative to the schema (e.g., "/minLength" or "/enum") - keyword := strings.TrimPrefix(er.KeywordLocation, "/") - keywordLocation = helpers.ConstructParameterJSONPointer(pathTemplate, operation, name, keyword) - } - - fail := &errors.SchemaValidationFailure{ - Reason: errMsg, - FieldName: helpers.ExtractFieldNameFromStringLocation(er.InstanceLocation), - FieldPath: helpers.ExtractJSONPathFromStringLocation(er.InstanceLocation), - InstancePath: helpers.ConvertStringLocationToPathSegments(er.InstanceLocation), - KeywordLocation: keywordLocation, - OriginalJsonSchemaError: scErrs, - } - if schema != nil { - renderCtx := base.NewInlineRenderContext() - rendered, err := schema.RenderInlineWithContext(renderCtx) - if err == nil && rendered != nil { - renderedBytes, _ := json.Marshal(rendered) - fail.ReferenceSchema = string(renderedBytes) - } - } - schemaValidationErrors = append(schemaValidationErrors, fail) - } - schemaType := "undefined" - line := 0 - col := 0 - if len(schema.Type) > 0 { - schemaType = schema.Type[0] - line = schema.GoLow().Type.KeyNode.Line - col = schema.GoLow().Type.KeyNode.Column - } else { - var sTypes []string - seen := make(map[string]struct{}) - extractTypes := func(s *base.SchemaProxy) { - pSch := s.Schema() - if pSch != nil { - for _, typ := range pSch.Type { - if _, ok := seen[typ]; !ok { - sTypes = append(sTypes, typ) - seen[typ] = struct{}{} - } - } - } - } - processPoly := func(schemas []*base.SchemaProxy) { - for _, s := range schemas { - extractTypes(s) - } - } - - // check if there is polymorphism going on here. - if len(schema.AnyOf) > 0 || len(schema.AllOf) > 0 || len(schema.OneOf) > 0 { - processPoly(schema.AnyOf) - processPoly(schema.AllOf) - processPoly(schema.OneOf) - - sep := "or" - if len(schema.AllOf) > 0 { - sep = "and a" - } - schemaType = strings.Join(sTypes, fmt.Sprintf(" %s ", sep)) - } - - line = schema.GoLow().RootNode.Line - col = schema.GoLow().RootNode.Column - } - - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: validationType, - ValidationSubType: subValType, - Message: fmt.Sprintf("%s '%s' failed to validate", entity, name), - Reason: fmt.Sprintf("%s '%s' is defined as an %s, "+ - "however it failed to pass a schema validation", reasonEntity, name, schemaType), - SpecLine: line, - SpecCol: col, - ParameterName: name, - SchemaValidationErrors: schemaValidationErrors, - HowToFix: errors.HowToFixInvalidSchema, - }) - return validationErrors -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/parameters/validate_security.go b/vendor/github.com/pb33f/libopenapi-validator/parameters/validate_security.go deleted file mode 100644 index 15763aa4ef5..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/parameters/validate_security.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package parameters - -import ( - "fmt" - "net/http" - "strings" - - "github.com/pb33f/libopenapi/datamodel/high/base" - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - "github.com/pb33f/libopenapi/orderedmap" - - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi-validator/paths" -) - -func (v *paramValidator) ValidateSecurity(request *http.Request) (bool, []*errors.ValidationError) { - pathItem, errs, foundPath := paths.FindPath(request, v.document, v.options) - if len(errs) > 0 { - return false, errs - } - return v.ValidateSecurityWithPathItem(request, pathItem, foundPath) -} - -func (v *paramValidator) ValidateSecurityWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) { - if pathItem == nil { - return false, []*errors.ValidationError{{ - ValidationType: helpers.PathValidation, - ValidationSubType: helpers.ValidationMissing, - Message: fmt.Sprintf("%s Path '%s' not found", request.Method, request.URL.Path), - Reason: fmt.Sprintf("The %s request contains a path of '%s' "+ - "however that path, or the %s method for that path does not exist in the specification", - request.Method, request.URL.Path, request.Method), - SpecLine: -1, - SpecCol: -1, - HowToFix: errors.HowToFixPath, - }} - } - if !v.options.SecurityValidation { - return true, nil - } - // extract security for the operation - security := helpers.ExtractSecurityForOperation(request, pathItem) - - if security == nil { - return true, nil - } - - var allErrors []*errors.ValidationError - - // each security requirement in the array is OR'd - any one passing is sufficient - for _, sec := range security { - if sec.ContainsEmptyRequirement { - return true, nil - } - - // within a requirement, all schemes are AND'd - all must pass - requirementSatisfied := true - var requirementErrors []*errors.ValidationError - - for pair := orderedmap.First(sec.Requirements); pair != nil; pair = pair.Next() { - secName := pair.Key() - - // look up security from components - if v.document.Components == nil || v.document.Components.SecuritySchemes.GetOrZero(secName) == nil { - validationErrors := []*errors.ValidationError{ - { - Message: fmt.Sprintf("Security scheme '%s' is missing", secName), - Reason: fmt.Sprintf("The security scheme '%s' is defined as being required, "+ - "however it's missing from the components", secName), - ValidationType: helpers.SecurityValidation, - SpecLine: sec.GoLow().Requirements.ValueNode.Line, - SpecCol: sec.GoLow().Requirements.ValueNode.Column, - HowToFix: "Add the missing security scheme to the components", - }, - } - errors.PopulateValidationErrors(validationErrors, request, pathValue) - requirementSatisfied = false - requirementErrors = append(requirementErrors, validationErrors...) - continue - } - - secScheme := v.document.Components.SecuritySchemes.GetOrZero(secName) - schemeValid, schemeErrors := v.validateSecurityScheme(secScheme, sec, request, pathValue) - if !schemeValid { - requirementSatisfied = false - requirementErrors = append(requirementErrors, schemeErrors...) - } - } - - // if all schemes in this requirement passed (AND), the overall security passes (OR) - if requirementSatisfied { - return true, nil - } - allErrors = append(allErrors, requirementErrors...) - } - - return false, allErrors -} - -// validateSecurityScheme checks if a single security scheme is satisfied by the request. -func (v *paramValidator) validateSecurityScheme( - secScheme *v3.SecurityScheme, - sec *base.SecurityRequirement, - request *http.Request, - pathValue string, -) (bool, []*errors.ValidationError) { - switch strings.ToLower(secScheme.Type) { - case "http": - return v.validateHTTPSecurityScheme(secScheme, sec, request, pathValue) - case "apikey": - return v.validateAPIKeySecurityScheme(secScheme, sec, request, pathValue) - } - // unknown scheme type - consider it valid to avoid false negatives - return true, nil -} - -func (v *paramValidator) validateHTTPSecurityScheme( - secScheme *v3.SecurityScheme, - sec *base.SecurityRequirement, - request *http.Request, - pathValue string, -) (bool, []*errors.ValidationError) { - authorizationHeader := request.Header.Get("Authorization") - if authorizationHeader == "" { - validationErrors := []*errors.ValidationError{ - { - Message: fmt.Sprintf("Authorization header for '%s' scheme", secScheme.Scheme), - Reason: "Authorization header was not found", - ValidationType: helpers.SecurityValidation, - ValidationSubType: secScheme.Scheme, - SpecLine: sec.GoLow().Requirements.ValueNode.Line, - SpecCol: sec.GoLow().Requirements.ValueNode.Column, - HowToFix: "Add an 'Authorization' header to this request", - }, - } - errors.PopulateValidationErrors(validationErrors, request, pathValue) - return false, validationErrors - } - if len(authorizationHeader) < len(secScheme.Scheme) || !strings.EqualFold(authorizationHeader[:len(secScheme.Scheme)], secScheme.Scheme) { - validationErrors := []*errors.ValidationError{ - { - Message: fmt.Sprintf("Authorization header scheme '%s' mismatch", secScheme.Scheme), - Reason: "Authorization header had incorrect scheme", - ValidationType: helpers.SecurityValidation, - ValidationSubType: secScheme.Scheme, - SpecLine: sec.GoLow().Requirements.ValueNode.Line, - SpecCol: sec.GoLow().Requirements.ValueNode.Column, - HowToFix: fmt.Sprintf("Use the scheme '%s' in the Authorization header "+ - "for this request", secScheme.Scheme), - }, - } - errors.PopulateValidationErrors(validationErrors, request, pathValue) - return false, validationErrors - } - return true, nil -} - -func (v *paramValidator) validateAPIKeySecurityScheme( - secScheme *v3.SecurityScheme, - sec *base.SecurityRequirement, - request *http.Request, - pathValue string, -) (bool, []*errors.ValidationError) { - switch secScheme.In { - case "header": - if request.Header.Get(secScheme.Name) == "" { - validationErrors := []*errors.ValidationError{ - { - Message: fmt.Sprintf("API Key %s not found in header", secScheme.Name), - Reason: "API Key not found in http header for security scheme 'apiKey' with type 'header'", - ValidationType: helpers.SecurityValidation, - ValidationSubType: "apiKey", - SpecLine: sec.GoLow().Requirements.ValueNode.Line, - SpecCol: sec.GoLow().Requirements.ValueNode.Column, - HowToFix: fmt.Sprintf("Add the API Key via '%s' as a header of the request", secScheme.Name), - }, - } - errors.PopulateValidationErrors(validationErrors, request, pathValue) - return false, validationErrors - } - return true, nil - - case "query": - if request.URL.Query().Get(secScheme.Name) == "" { - copyUrl := *request.URL - fixed := ©Url - q := fixed.Query() - q.Add(secScheme.Name, "your-api-key") - fixed.RawQuery = q.Encode() - - validationErrors := []*errors.ValidationError{ - { - Message: fmt.Sprintf("API Key %s not found in query", secScheme.Name), - Reason: "API Key not found in URL query for security scheme 'apiKey' with type 'query'", - ValidationType: helpers.SecurityValidation, - ValidationSubType: "apiKey", - SpecLine: sec.GoLow().Requirements.ValueNode.Line, - SpecCol: sec.GoLow().Requirements.ValueNode.Column, - HowToFix: fmt.Sprintf("Add an API Key via '%s' to the query string "+ - "of the URL, for example '%s'", secScheme.Name, fixed.String()), - }, - } - errors.PopulateValidationErrors(validationErrors, request, pathValue) - return false, validationErrors - } - return true, nil - - case "cookie": - cookies := request.Cookies() - for _, cookie := range cookies { - if cookie.Name == secScheme.Name { - return true, nil - } - } - validationErrors := []*errors.ValidationError{ - { - Message: fmt.Sprintf("API Key %s not found in cookies", secScheme.Name), - Reason: "API Key not found in http request cookies for security scheme 'apiKey' with type 'cookie'", - ValidationType: helpers.SecurityValidation, - ValidationSubType: "apiKey", - SpecLine: sec.GoLow().Requirements.ValueNode.Line, - SpecCol: sec.GoLow().Requirements.ValueNode.Column, - HowToFix: fmt.Sprintf("Submit an API Key '%s' as a cookie with the request", secScheme.Name), - }, - } - errors.PopulateValidationErrors(validationErrors, request, pathValue) - return false, validationErrors - } - - return true, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/parameters/validation_functions.go b/vendor/github.com/pb33f/libopenapi-validator/parameters/validation_functions.go deleted file mode 100644 index f931588fa2d..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/parameters/validation_functions.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package parameters - -import ( - "encoding/json" - "fmt" - "slices" - "strconv" - "strings" - - "github.com/pb33f/libopenapi/datamodel/high/base" - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" -) - -// ValidateCookieArray will validate a cookie parameter that is an array -func ValidateCookieArray( - sch *base.Schema, param *v3.Parameter, value string, pathTemplate string, operation string, renderedSchema string, -) []*errors.ValidationError { - var validationErrors []*errors.ValidationError - itemsSchema := sch.Items.A.Schema() - - var renderedItemsSchema string - if itemsSchema != nil { - rendered, _ := itemsSchema.RenderInline() - schemaBytes, _ := json.Marshal(rendered) - renderedItemsSchema = string(schemaBytes) - } - - // header arrays can only be encoded as CSV - items := helpers.ExplodeQueryValue(value, helpers.DefaultDelimited) - - // now check each item in the array - for _, item := range items { - // for each type defined in the item's schema, check the item - for _, itemType := range itemsSchema.Type { - switch itemType { - case helpers.Integer, helpers.Number: - if _, err := strconv.ParseFloat(item, 64); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectCookieParamArrayNumber(param, item, sch, itemsSchema, pathTemplate, operation, renderedItemsSchema)) - } - case helpers.Boolean: - if _, err := strconv.ParseBool(item); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectCookieParamArrayBoolean(param, item, sch, itemsSchema, pathTemplate, operation, renderedItemsSchema)) - break - } - // check for edge-cases "0" and "1" which can also be parsed into valid booleans - if item == "0" || item == "1" { - validationErrors = append(validationErrors, - errors.IncorrectCookieParamArrayBoolean(param, item, sch, itemsSchema, pathTemplate, operation, renderedItemsSchema)) - } - case helpers.String: - // do nothing for now. - continue - } - } - } - return validationErrors -} - -// ValidateHeaderArray will validate a header parameter that is an array -func ValidateHeaderArray( - sch *base.Schema, param *v3.Parameter, value string, pathTemplate string, operation string, renderedSchema string, -) []*errors.ValidationError { - var validationErrors []*errors.ValidationError - itemsSchema := sch.Items.A.Schema() - - var renderedItemsSchema string - if itemsSchema != nil { - rendered, _ := itemsSchema.RenderInline() - schemaBytes, _ := json.Marshal(rendered) - renderedItemsSchema = string(schemaBytes) - } - - // header arrays can only be encoded as CSV - items := helpers.ExplodeQueryValue(value, helpers.DefaultDelimited) - - // now check each item in the array - for _, item := range items { - // for each type defined in the item's schema, check the item - for _, itemType := range itemsSchema.Type { - switch itemType { - case helpers.Integer, helpers.Number: - if _, err := strconv.ParseFloat(item, 64); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectHeaderParamArrayNumber(param, item, sch, itemsSchema, pathTemplate, operation, renderedItemsSchema)) - } - case helpers.Boolean: - if _, err := strconv.ParseBool(item); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectHeaderParamArrayBoolean(param, item, sch, itemsSchema, pathTemplate, operation, renderedItemsSchema)) - break - } - // check for edge-cases "0" and "1" which can also be parsed into valid booleans - if item == "0" || item == "1" { - validationErrors = append(validationErrors, - errors.IncorrectHeaderParamArrayBoolean(param, item, sch, itemsSchema, pathTemplate, operation, renderedItemsSchema)) - } - case helpers.String: - // do nothing for now. - continue - } - } - } - return validationErrors -} - -// ValidateQueryArray will validate a query parameter that is an array -func ValidateQueryArray( - sch *base.Schema, param *v3.Parameter, ef string, contentWrapped bool, validationOptions *config.ValidationOptions, pathTemplate string, operation string, renderedSchema string, -) []*errors.ValidationError { - var validationErrors []*errors.ValidationError - itemsSchema := sch.Items.A.Schema() - - var renderedItemsSchema string - if itemsSchema != nil { - rendered, _ := itemsSchema.RenderInline() - schemaBytes, _ := json.Marshal(rendered) - renderedItemsSchema = string(schemaBytes) - } - - // check for an exploded bit on the schema. - // if it's exploded, then we need to check each item in the array - // if it's not exploded, then we need to check the whole array as a string - var items []string - if param.IsExploded() { - items = helpers.ExplodeQueryValue(ef, param.Style) - } else { - // check for a style of form (or no style) and if so, explode the value - if param.Style == "" || param.Style == helpers.Form { - if !contentWrapped { - items = helpers.ExplodeQueryValue(ef, param.Style) - } else { - items = []string{ef} - } - } else { - switch param.Style { - case helpers.PipeDelimited, helpers.SpaceDelimited: - items = helpers.ExplodeQueryValue(ef, param.Style) - } - } - } - - // check if the param is within an enum - checkEnum := func(item string) { - // check if the array param is within an enum - if sch.Items.IsA() { - itemsSch := sch.Items.A.Schema() - if itemsSch.Enum != nil { - matchFound := false - for _, enumVal := range itemsSch.Enum { - if strings.TrimSpace(item) == fmt.Sprint(enumVal.Value) { - matchFound = true - break - } - } - if !matchFound { - validationErrors = append(validationErrors, - errors.IncorrectQueryParamEnumArray(param, item, sch, pathTemplate, operation, renderedItemsSchema)) - } - } - } - } - - // now check each item in the array - seen := make(map[string]struct{}) - uniqueItems := true - var duplicates []string - for _, item := range items { - - if _, exists := seen[item]; exists { - uniqueItems = false - duplicates = append(duplicates, item) - } - seen[item] = struct{}{} - - // for each type defined in the item's schema, check the item - for _, itemType := range itemsSchema.Type { - switch itemType { - case helpers.Integer: - if _, err := strconv.ParseInt(item, 10, 64); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectQueryParamArrayInteger(param, item, sch, itemsSchema, pathTemplate, operation, renderedItemsSchema)) - break - } - // will it blend? - checkEnum(item) - case helpers.Number: - if _, err := strconv.ParseFloat(item, 64); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectQueryParamArrayNumber(param, item, sch, itemsSchema, pathTemplate, operation, renderedItemsSchema)) - break - } - // will it blend? - checkEnum(item) - - case helpers.Boolean: - if _, err := strconv.ParseBool(item); err != nil { - validationErrors = append(validationErrors, - errors.IncorrectQueryParamArrayBoolean(param, item, sch, itemsSchema, pathTemplate, operation, renderedItemsSchema)) - } - case helpers.Object: - validationErrors = append(validationErrors, - ValidateParameterSchema(itemsSchema, - nil, - item, - "Query array parameter", - "The query parameter (which is an array)", - param.Name, - helpers.ParameterValidation, - helpers.ParameterValidationQuery, validationOptions)...) - - case helpers.String: - - // will it float? - checkEnum(item) - } - } - } - - // check for min and max items - if sch.MaxItems != nil { - if len(items) > int(*sch.MaxItems) { - validationErrors = append(validationErrors, - errors.IncorrectParamArrayMaxNumItems(param, sch, *sch.MaxItems, int64(len(items)), pathTemplate, operation, renderedSchema)) - } - } - - if sch.MinItems != nil { - if len(items) < int(*sch.MinItems) { - validationErrors = append(validationErrors, - errors.IncorrectParamArrayMinNumItems(param, sch, *sch.MinItems, int64(len(items)), pathTemplate, operation, renderedSchema)) - } - } - - // check for unique items - if sch.UniqueItems != nil { - if *sch.UniqueItems && !uniqueItems { - validationErrors = append(validationErrors, - errors.IncorrectParamArrayUniqueItems(param, sch, strings.Join(duplicates, ", "), pathTemplate, operation, renderedSchema)) - } - } - return validationErrors -} - -// ValidateQueryParamStyle will validate a query parameter by style -func ValidateQueryParamStyle(param *v3.Parameter, as []*helpers.QueryParam) []*errors.ValidationError { - var validationErrors []*errors.ValidationError -stopValidation: - for _, qp := range as { - for i := range qp.Values { - switch param.Style { - case helpers.DeepObject: - // check if the object has additional properties defined that treat this as an array - if param.Schema != nil { - pSchema := param.Schema.Schema() - if slices.Contains(pSchema.Type, helpers.Array) { - continue - } - if pSchema.AdditionalProperties != nil && pSchema.AdditionalProperties.IsA() { - addPropSchema := pSchema.AdditionalProperties.A.Schema() - if len(addPropSchema.Type) > 0 { - if slices.Contains(addPropSchema.Type, helpers.Array) { - // an array can have more than one value. - continue - } - } - } - } - if len(qp.Values) > 1 { - validationErrors = append(validationErrors, errors.InvalidDeepObject(param, qp)) - break stopValidation - } - - case helpers.PipeDelimited: - // check if explode is false, but we have used an array style - if !param.IsExploded() { - if len(qp.Values) > 1 { - validationErrors = append(validationErrors, errors.IncorrectPipeDelimiting(param, qp)) - break stopValidation - } - } - case helpers.SpaceDelimited: - // check if explode is false, but we have used an array style - if !param.IsExploded() { - if len(qp.Values) > 1 { - validationErrors = append(validationErrors, errors.IncorrectSpaceDelimiting(param, qp)) - break stopValidation - } - } - default: - // check for a delimited list. - if helpers.DoesFormParamContainDelimiter(qp.Values[i], param.Style) { - if param.Explode != nil && *param.Explode { - validationErrors = append(validationErrors, errors.IncorrectFormEncoding(param, qp, i)) - break stopValidation - } - } - } - } - } - return validationErrors // defaults to true if no style is set. -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/paths/package.go b/vendor/github.com/pb33f/libopenapi-validator/paths/package.go deleted file mode 100644 index c651ce5ca58..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/paths/package.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package paths contains all the logic, models and interfaces for validating OpenAPI 3+ Paths. -package paths diff --git a/vendor/github.com/pb33f/libopenapi-validator/paths/paths.go b/vendor/github.com/pb33f/libopenapi-validator/paths/paths.go deleted file mode 100644 index 635cfdb9275..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/paths/paths.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package paths - -import ( - "fmt" - "net/http" - "net/url" - "path/filepath" - "regexp" - "strings" - - "github.com/pb33f/libopenapi/orderedmap" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" -) - -// FindPath will find the path in the document that matches the request path. If a successful match was found, then -// the first return value will be a pointer to the PathItem. The second return value will contain any validation errors -// that were picked up when locating the path. -// The third return value will be the path that was found in the document, as it pertains to the contract, so all path -// parameters will not have been replaced with their values from the request - allowing model lookups. -// -// This function first tries a fast O(k) radix tree lookup (where k is path depth). If the radix tree -// doesn't find a match, it falls back to regex-based matching which handles complex path patterns -// like matrix-style ({;param}), label-style ({.param}), and OData-style (entities('{Entity}')). -// -// Path matching follows the OpenAPI specification: literal (concrete) paths take precedence over -// parameterized paths, regardless of definition order in the specification. -func FindPath(request *http.Request, document *v3.Document, options *config.ValidationOptions) (*v3.PathItem, []*errors.ValidationError, string) { - stripped := StripRequestPath(request, document) - - // Fast path: try radix tree first (O(k) where k = path depth) - // If no path lookup is provided, we will fall back to regex-based matching. - if options != nil && options.PathTree != nil { - if pathItem, matchedPath, found := options.PathTree.Lookup(stripped); found { - if pathHasMethod(pathItem, request.Method) { - return pathItem, nil, matchedPath - } - return pathItem, missingOperationError(request, matchedPath), matchedPath - } - } - - // Slow path: fall back to regex matching for complex paths (matrix, label, OData, etc.) - basePaths := getBasePaths(document) - - reqPathSegments := strings.Split(stripped, "/") - if reqPathSegments[0] == "" { - reqPathSegments = reqPathSegments[1:] - } - - var regexCache config.RegexCache - if options != nil { - regexCache = options.RegexCache - } - - candidates := make([]pathCandidate, 0, document.Paths.PathItems.Len()) - - for pair := orderedmap.First(document.Paths.PathItems); pair != nil; pair = pair.Next() { - path := pair.Key() - pathItem := pair.Value() - - pathForMatching := normalizePathForMatching(path, stripped) - - segs := strings.Split(pathForMatching, "/") - if segs[0] == "" { - segs = segs[1:] - } - - ok := comparePaths(segs, reqPathSegments, basePaths, regexCache) - if !ok { - continue - } - - // Compute specificity score and check if method exists - score := computeSpecificityScore(path) - hasMethod := pathHasMethod(pathItem, request.Method) - - candidates = append(candidates, pathCandidate{ - pathItem: pathItem, - path: path, - score: score, - hasMethod: hasMethod, - }) - } - - if len(candidates) == 0 { - validationErrors := []*errors.ValidationError{ - { - ValidationType: helpers.PathValidation, - ValidationSubType: helpers.ValidationMissing, - Message: fmt.Sprintf("%s Path '%s' not found", request.Method, request.URL.Path), - Reason: fmt.Sprintf("The %s request contains a path of '%s' "+ - "however that path, or the %s method for that path does not exist in the specification", - request.Method, request.URL.Path, request.Method), - SpecLine: -1, - SpecCol: -1, - HowToFix: errors.HowToFixPath, - }, - } - errors.PopulateValidationErrors(validationErrors, request, "") - return nil, validationErrors, "" - } - - bestWithMethod, bestOverall := selectMatches(candidates) - - if bestWithMethod != nil { - return bestWithMethod.pathItem, nil, bestWithMethod.path - } - - // path matches exist but none have the required method - return bestOverall.pathItem, missingOperationError(request, bestOverall.path), bestOverall.path -} - -// normalizePathForMatching removes the fragment from a path template unless -// the request path itself contains a fragment. -func normalizePathForMatching(path, requestPath string) string { - if strings.Contains(requestPath, "#") { - return path - } - if idx := strings.IndexByte(path, '#'); idx >= 0 { - return path[:idx] - } - return path -} - -func getBasePaths(document *v3.Document) []string { - // extract base path from document to check against paths. - var basePaths []string - for _, s := range document.Servers { - u, err := url.Parse(s.URL) - // if the host contains special characters, we should attempt to split and parse only the relative path - if err != nil { - // split at first occurrence - _, serverPath, _ := strings.Cut(strings.Replace(s.URL, "//", "", 1), "/") - - if !strings.HasPrefix(serverPath, "/") { - serverPath = "/" + serverPath - } - - u, _ = url.Parse(serverPath) - } - - if u != nil && u.Path != "" { - basePaths = append(basePaths, u.Path) - } - } - - return basePaths -} - -// StripRequestPath strips the base path from the request path, based on the server paths provided in the specification -func StripRequestPath(request *http.Request, document *v3.Document) string { - basePaths := getBasePaths(document) - - // strip any base path - stripped := stripBaseFromPath(request.URL.EscapedPath(), basePaths) - if request.URL.Fragment != "" { - stripped = fmt.Sprintf("%s#%s", stripped, request.URL.Fragment) - } - if len(stripped) > 0 && !strings.HasPrefix(stripped, "/") { - stripped = "/" + stripped - } - return stripped -} - -func checkPathAgainstBase(docPath, urlPath string, basePaths []string) bool { - if docPath == urlPath { - return true - } - for _, basePath := range basePaths { - if basePath[len(basePath)-1] == '/' { - basePath = basePath[:len(basePath)-1] - } - merged := fmt.Sprintf("%s%s", basePath, urlPath) - if docPath == merged { - return true - } - } - return false -} - -func stripBaseFromPath(path string, basePaths []string) string { - for i := range basePaths { - if strings.HasPrefix(path, basePaths[i]) { - return path[len(basePaths[i]):] - } - } - return path -} - -func comparePaths(mapped, requested, basePaths []string, regexCache config.RegexCache) bool { - if len(mapped) != len(requested) { - return false // short circuit out - } - var imploded []string - for i, seg := range mapped { - s := seg - var rgx *regexp.Regexp - - if regexCache != nil { - if cachedRegex, found := regexCache.Load(s); found { - rgx = cachedRegex.(*regexp.Regexp) - } - } - - if rgx == nil { - r, err := helpers.GetRegexForPath(seg) - if err != nil { - return false - } - - rgx = r - - if regexCache != nil { - regexCache.Store(seg, r) - } - } - - if rgx.MatchString(requested[i]) { - s = requested[i] - } - imploded = append(imploded, s) - } - l := filepath.Join(imploded...) - r := filepath.Join(requested...) - return checkPathAgainstBase(l, r, basePaths) -} - -// missingOperationError returns a validation error for when a path was found but the HTTP method doesn't exist. -func missingOperationError(request *http.Request, matchedPath string) []*errors.ValidationError { - validationErrors := []*errors.ValidationError{{ - ValidationType: helpers.PathValidation, - ValidationSubType: helpers.ValidationMissingOperation, - Message: fmt.Sprintf("%s Path '%s' not found", request.Method, request.URL.Path), - Reason: fmt.Sprintf("The %s method for that path does not exist in the specification", - request.Method), - SpecLine: -1, - SpecCol: -1, - HowToFix: errors.HowToFixPath, - }} - errors.PopulateValidationErrors(validationErrors, request, matchedPath) - return validationErrors -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/paths/specificity.go b/vendor/github.com/pb33f/libopenapi-validator/paths/specificity.go deleted file mode 100644 index ef84b796a32..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/paths/specificity.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2023-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package paths - -import ( - "net/http" - "strings" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" -) - -// pathCandidate represents a potential path match with metadata for selection. -type pathCandidate struct { - pathItem *v3.PathItem - path string - score int - hasMethod bool -} - -// computeSpecificityScore calculates how specific a path template is. -// literal segments score higher than parameterized segments, ensuring -// "/pets/mine" is preferred over "/pets/{id}" per OpenAPI spec. -// -// scoring: -// - literal segment: 1000 points -// - parameter segment: 1 point -// -// this weighting ensures any path with more literal segments always wins, -// regardless of parameter positions. -func computeSpecificityScore(pathTemplate string) int { - segments := strings.Split(pathTemplate, "/") - score := 0 - - for _, seg := range segments { - if seg == "" { - continue - } - if isParameterSegment(seg) { - score += 1 - } else { - score += 1000 - } - } - return score -} - -// isParameterSegment returns true if the segment contains a path parameter. -// handles standard {param}, label {.param}, and exploded {param*} formats. -func isParameterSegment(seg string) bool { - return strings.Contains(seg, "{") && strings.Contains(seg, "}") -} - -// pathHasMethod checks if the PathItem has an operation for the given HTTP method. -func pathHasMethod(pathItem *v3.PathItem, method string) bool { - switch method { - case http.MethodGet: - return pathItem.Get != nil - case http.MethodPost: - return pathItem.Post != nil - case http.MethodPut: - return pathItem.Put != nil - case http.MethodDelete: - return pathItem.Delete != nil - case http.MethodOptions: - return pathItem.Options != nil - case http.MethodHead: - // Treat HEAD as present when either - // a Head operation exists or, if Head is absent, when a Get exists - // per HTTP semantics (HEAD can be handled by GET if no explicit - // HEAD operation is defined). - return pathItem.Head != nil || pathItem.Get != nil - case http.MethodPatch: - return pathItem.Patch != nil - case http.MethodTrace: - return pathItem.Trace != nil - } - return false -} - -// selectMatches finds the best matching candidates in a single pass. -// returns the highest-scoring candidate with the method (or nil), and -// the highest-scoring candidate overall (for error reporting). -func selectMatches(candidates []pathCandidate) (withMethod, highest *pathCandidate) { - for i := range candidates { - c := &candidates[i] - - if c.hasMethod && (withMethod == nil || c.score > withMethod.score) { - withMethod = c - } - - if highest == nil || c.score > highest.score { - highest = c - } - } - return withMethod, highest -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/radix/path_tree.go b/vendor/github.com/pb33f/libopenapi-validator/radix/path_tree.go deleted file mode 100644 index ae7c634bb58..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/radix/path_tree.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2026 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package radix - -import ( - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" -) - -// PathLookup defines the interface for radix tree path matching implementations. -// The default implementation provides O(k) lookup where k is the path segment count. -// -// Note: This interface handles URL path matching only. HTTP method validation -// is performed separately after the PathItem is retrieved, since a single path -// (e.g., "/users/{id}") can support multiple HTTP methods (GET, POST, PUT, DELETE). -type PathLookup interface { - // Lookup finds the PathItem for a given URL path. - // Returns the matched PathItem, the path template (e.g., "/users/{id}"), and whether found. - Lookup(urlPath string) (pathItem *v3.PathItem, matchedPath string, found bool) -} - -// PathTree is a radix tree optimized for OpenAPI path matching. -// It provides O(k) lookup where k is the number of path segments (typically 3-5), -// with minimal allocations during lookup. -// -// This is a thin wrapper around the generic Tree, specialized for -// OpenAPI PathItem values. It implements the PathLookup interface. -type PathTree struct { - tree *Tree[*v3.PathItem] -} - -// Ensure PathTree implements PathLookup at compile time. -var _ PathLookup = (*PathTree)(nil) - -// NewPathTree creates a new empty radix tree for path matching. -func NewPathTree() *PathTree { - return &PathTree{ - tree: New[*v3.PathItem](), - } -} - -// Insert adds a path and its PathItem to the tree. -// Path should be in OpenAPI format, e.g., "/users/{id}/posts" -func (t *PathTree) Insert(path string, pathItem *v3.PathItem) { - t.tree.Insert(path, pathItem) -} - -// Lookup finds the PathItem for a given request path. -// Returns the PathItem, the matched path template, and whether a match was found. -func (t *PathTree) Lookup(urlPath string) (*v3.PathItem, string, bool) { - return t.tree.Lookup(urlPath) -} - -// Size returns the number of paths stored in the tree. -func (t *PathTree) Size() int { - return t.tree.Size() -} - -// Walk calls the given function for each path in the tree. -func (t *PathTree) Walk(fn func(path string, pathItem *v3.PathItem) bool) { - t.tree.Walk(fn) -} - -// BuildPathTree creates a PathTree from an OpenAPI document. -// This should be called once during validator initialization. -func BuildPathTree(doc *v3.Document) *PathTree { - tree := NewPathTree() - - if doc == nil || doc.Paths == nil || doc.Paths.PathItems == nil { - return tree - } - - for pair := doc.Paths.PathItems.First(); pair != nil; pair = pair.Next() { - path := pair.Key() - pathItem := pair.Value() - tree.Insert(path, pathItem) - } - - return tree -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/radix/tree.go b/vendor/github.com/pb33f/libopenapi-validator/radix/tree.go deleted file mode 100644 index 8383ad4ef20..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/radix/tree.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2026 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package radix provides a radix tree (prefix tree) implementation optimized for -// URL path matching with support for parameterized segments. -// -// The tree provides O(k) lookup complexity where k is the number of path segments -// (typically 3-5 for REST APIs), making it ideal for routing and path matching. -// -// Example usage: -// -// tree := radix.New[*MyHandler]() -// tree.Insert("/users/{id}", handler1) -// tree.Insert("/users/{id}/posts", handler2) -// -// handler, path, found := tree.Lookup("/users/123/posts") -// // handler = handler2, path = "/users/{id}/posts", found = true -package radix - -import "strings" - -// Tree is a radix tree optimized for URL path matching. -// It supports both literal path segments and parameterized segments like {id}. -// T is the type of value stored at leaf nodes. -type Tree[T any] struct { - root *node[T] - size int -} - -// node represents a node in the radix tree. -type node[T any] struct { - // children maps literal path segments to child nodes - children map[string]*node[T] - - // paramChild handles parameterized segments like {id} - // Only one param child is allowed per node - paramChild *node[T] - - // paramName stores the parameter name without braces (e.g., "id" from "{id}") - paramName string - - // leaf contains the stored value and path template for endpoints - leaf *leafData[T] -} - -// leafData stores the value and original path template for a leaf node. -type leafData[T any] struct { - value T - path string -} - -// New creates a new empty radix tree. -func New[T any]() *Tree[T] { - return &Tree[T]{ - root: &node[T]{ - children: make(map[string]*node[T]), - }, - } -} - -// Insert adds a path and its associated value to the tree. -// The path should use {param} syntax for parameterized segments. -// Examples: "/users", "/users/{id}", "/users/{userId}/posts/{postId}" -// -// Returns true if a new path was inserted, false if an existing path was updated. -func (t *Tree[T]) Insert(path string, value T) bool { - if t.root == nil { - t.root = &node[T]{children: make(map[string]*node[T])} - } - - segments := splitPath(path) - n := t.root - isNew := true - - for _, seg := range segments { - if isParam(seg) { - // Parameter segment - if n.paramChild == nil { - n.paramChild = &node[T]{ - children: make(map[string]*node[T]), - paramName: extractParamName(seg), - } - } - n = n.paramChild - } else { - // Literal segment - child, exists := n.children[seg] - if !exists { - child = &node[T]{children: make(map[string]*node[T])} - n.children[seg] = child - } - n = child - } - } - - // Check if this is a new path or an update - if n.leaf != nil { - isNew = false - } else { - t.size++ - } - - // Set the leaf data - n.leaf = &leafData[T]{ - value: value, - path: path, - } - - return isNew -} - -// Lookup finds the value for a given URL path. -// Returns the value, the matched path template, and whether a match was found. -// -// Literal matches take precedence over parameter matches per OpenAPI specification. -// For example, "/users/admin" will match "/users/admin" before "/users/{id}". -func (t *Tree[T]) Lookup(urlPath string) (value T, matchedPath string, found bool) { - var zero T - if t.root == nil { - return zero, "", false - } - - segments := splitPath(urlPath) - leaf := t.lookupRecursive(t.root, segments, 0) - - if leaf != nil { - return leaf.value, leaf.path, true - } - return zero, "", false -} - -// lookupRecursive performs the tree traversal. -// It prioritizes literal matches over parameter matches. -func (t *Tree[T]) lookupRecursive(n *node[T], segments []string, depth int) *leafData[T] { - // Base case: consumed all segments - if depth == len(segments) { - return n.leaf - } - - seg := segments[depth] - - // Try literal match first (higher specificity) - if child, exists := n.children[seg]; exists { - if result := t.lookupRecursive(child, segments, depth+1); result != nil { - return result - } - } - - // Fall back to parameter match - if n.paramChild != nil { - if result := t.lookupRecursive(n.paramChild, segments, depth+1); result != nil { - return result - } - } - - return nil -} - -// Size returns the number of paths stored in the tree. -func (t *Tree[T]) Size() int { - return t.size -} - -// Clear removes all entries from the tree. -func (t *Tree[T]) Clear() { - t.root = &node[T]{children: make(map[string]*node[T])} - t.size = 0 -} - -// Walk calls the given function for each path in the tree. -// The function receives the path template and its associated value. -// If the function returns false, iteration stops. -func (t *Tree[T]) Walk(fn func(path string, value T) bool) { - if t.root == nil { - return - } - t.walkRecursive(t.root, fn) -} - -func (t *Tree[T]) walkRecursive(n *node[T], fn func(path string, value T) bool) bool { - if n.leaf != nil { - if !fn(n.leaf.path, n.leaf.value) { - return false - } - } - - for _, child := range n.children { - if !t.walkRecursive(child, fn) { - return false - } - } - - if n.paramChild != nil { - if !t.walkRecursive(n.paramChild, fn) { - return false - } - } - - return true -} - -// splitPath splits a path into segments, removing empty segments. -// "/users/{id}/posts" -> ["users", "{id}", "posts"] -func splitPath(path string) []string { - path = strings.Trim(path, "/") - if path == "" { - return nil - } - - parts := strings.Split(path, "/") - - // Filter out empty segments (from double slashes, etc.) - result := make([]string, 0, len(parts)) - for _, p := range parts { - if p != "" { - result = append(result, p) - } - } - return result -} - -// isParam checks if a segment is a parameter (e.g., "{id}") -func isParam(seg string) bool { - return len(seg) > 2 && seg[0] == '{' && seg[len(seg)-1] == '}' -} - -// extractParamName extracts the parameter name from a segment. -// "{id}" -> "id", "{userId}" -> "userId" -func extractParamName(seg string) string { - if len(seg) > 2 && seg[0] == '{' && seg[len(seg)-1] == '}' { - return seg[1 : len(seg)-1] - } - return seg -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/requests/package.go b/vendor/github.com/pb33f/libopenapi-validator/requests/package.go deleted file mode 100644 index a45bf31a50b..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/requests/package.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package requests contains all the logic, models and interfaces for validating OpenAPI 3+ Requests. -// The package depends on *http.Request -package requests diff --git a/vendor/github.com/pb33f/libopenapi-validator/requests/request_body.go b/vendor/github.com/pb33f/libopenapi-validator/requests/request_body.go deleted file mode 100644 index 16468908790..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/requests/request_body.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package requests - -import ( - "net/http" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/errors" -) - -// RequestBodyValidator is an interface that defines the methods for validating request bodies for Operations. -// -// ValidateRequestBodyWithPathItem method accepts an *http.Request and returns true if validation passed, -// false if validation failed and a slice of ValidationError pointers. -type RequestBodyValidator interface { - // ValidateRequestBody will validate the request body for an operation. The first return value will be true if the - // request body is valid, false if it is not. The second return value will be a slice of ValidationError pointers if - // the body is not valid. - ValidateRequestBody(request *http.Request) (bool, []*errors.ValidationError) - - // ValidateRequestBodyWithPathItem will validate the request body for an operation. The first return value will be true if the - // request body is valid, false if it is not. The second return value will be a slice of ValidationError pointers if - // the body is not valid. - ValidateRequestBodyWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) -} - -// NewRequestBodyValidator will create a new RequestBodyValidator from an OpenAPI 3+ document -func NewRequestBodyValidator(document *v3.Document, opts ...config.Option) RequestBodyValidator { - options := config.NewValidationOptions(opts...) - - return &requestBodyValidator{options: options, document: document} -} - -type requestBodyValidator struct { - options *config.ValidationOptions - document *v3.Document -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/requests/validate_body.go b/vendor/github.com/pb33f/libopenapi-validator/requests/validate_body.go deleted file mode 100644 index 8b69ccfae8b..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/requests/validate_body.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package requests - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi-validator/paths" - "github.com/pb33f/libopenapi-validator/schema_validation" -) - -func (v *requestBodyValidator) ValidateRequestBody(request *http.Request) (bool, []*errors.ValidationError) { - pathItem, errs, foundPath := paths.FindPath(request, v.document, v.options) - if len(errs) > 0 { - return false, errs - } - return v.ValidateRequestBodyWithPathItem(request, pathItem, foundPath) -} - -func (v *requestBodyValidator) ValidateRequestBodyWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) { - if pathItem == nil { - return false, []*errors.ValidationError{{ - ValidationType: helpers.PathValidation, - ValidationSubType: helpers.ValidationMissing, - Message: fmt.Sprintf("%s Path '%s' not found", request.Method, request.URL.Path), - Reason: fmt.Sprintf("The %s request contains a path of '%s' "+ - "however that path, or the %s method for that path does not exist in the specification", - request.Method, request.URL.Path, request.Method), - SpecLine: -1, - SpecCol: -1, - HowToFix: errors.HowToFixPath, - }} - } - operation := helpers.ExtractOperation(request, pathItem) - if operation == nil { - return false, []*errors.ValidationError{errors.OperationNotFound(pathItem, request, request.Method, pathValue)} - } - if operation.RequestBody == nil { - return true, nil - } - - // extract the content type from the request - contentType := request.Header.Get(helpers.ContentTypeHeader) - required := false - if operation.RequestBody.Required != nil { - required = *operation.RequestBody.Required - } - if contentType == "" { - if !required { - // request body is not required, the validation stop there. - return true, nil - } - return false, []*errors.ValidationError{errors.RequestContentTypeNotFound(operation, request, pathValue)} - } - - // extract the media type from the content type header. - mediaType, ok := v.extractContentType(contentType, operation) - if !ok { - return false, []*errors.ValidationError{errors.RequestContentTypeNotFound(operation, request, pathValue)} - } - - // Nothing to validate - if mediaType.Schema == nil { - return true, nil - } - - // extract schema from media type - schema := mediaType.Schema.Schema() - - isJson := strings.Contains(strings.ToLower(contentType), helpers.JSONType) - - // we currently only support JSON, XML and URLEncoded validation for request bodies - if !isJson { - isXml := schema_validation.IsXMLContentType(contentType) - isUrlEncoded := schema_validation.IsURLEncodedContentType(contentType) - - xmlValid := isXml && v.options.AllowXMLBodyValidation - urlEncodedValid := isUrlEncoded && v.options.AllowURLEncodedBodyValidation - - if !xmlValid && !urlEncodedValid { - return true, nil - } - - if request != nil && request.Body != nil { - requestBody, _ := io.ReadAll(request.Body) - _ = request.Body.Close() - - stringedBody := string(requestBody) - var jsonBody any - var prevalidationErrors []*errors.ValidationError - - switch { - case xmlValid: - jsonBody, prevalidationErrors = schema_validation.TransformXMLToSchemaJSON(stringedBody, schema) - case urlEncodedValid: - jsonBody, prevalidationErrors = schema_validation.TransformURLEncodedToSchemaJSON(stringedBody, schema, mediaType.Encoding) - } - - if len(prevalidationErrors) > 0 { - return false, prevalidationErrors - } - - transformedBytes, err := json.Marshal(jsonBody) - if err != nil { - switch { - case isXml: - return false, []*errors.ValidationError{errors.InvalidXMLParsing(err.Error(), stringedBody)} - case isUrlEncoded: - return false, []*errors.ValidationError{errors.InvalidURLEncodedParsing(err.Error(), stringedBody)} - } - } - - request.Body = io.NopCloser(bytes.NewBuffer(transformedBytes)) - } - } - - validationSucceeded, validationErrors := ValidateRequestSchema(&ValidateRequestSchemaInput{ - Request: request, - Schema: schema, - Version: helpers.VersionToFloat(v.document.Version), - Options: []config.Option{config.WithExistingOpts(v.options)}, - }) - - errors.PopulateValidationErrors(validationErrors, request, pathValue) - - return validationSucceeded, validationErrors -} - -func (v *requestBodyValidator) extractContentType(contentType string, operation *v3.Operation) (*v3.MediaType, bool) { - ct, _, _ := helpers.ExtractContentType(contentType) - mediaType, ok := operation.RequestBody.Content.Get(ct) - if ok { - return mediaType, true - } - ctMediaRange := strings.SplitN(ct, "/", 2) - for contentPair := operation.RequestBody.Content.First(); contentPair != nil; contentPair = contentPair.Next() { - s := contentPair.Key() - mediaTypeValue := contentPair.Value() - opMediaRange := strings.SplitN(s, "/", 2) - if (opMediaRange[0] == "*" || opMediaRange[0] == ctMediaRange[0]) && - (opMediaRange[1] == "*" || opMediaRange[1] == ctMediaRange[1]) { - return mediaTypeValue, true - } - } - return nil, false -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/requests/validate_request.go b/vendor/github.com/pb33f/libopenapi-validator/requests/validate_request.go deleted file mode 100644 index 275b556bac5..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/requests/validate_request.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package requests - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "reflect" - "regexp" - "strconv" - - "github.com/pb33f/libopenapi/datamodel/high/base" - "github.com/pb33f/libopenapi/utils" - "github.com/santhosh-tekuri/jsonschema/v6" - "go.yaml.in/yaml/v4" - "golang.org/x/text/language" - "golang.org/x/text/message" - - "github.com/pb33f/libopenapi-validator/cache" - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi-validator/schema_validation" - "github.com/pb33f/libopenapi-validator/strict" -) - -var instanceLocationRegex = regexp.MustCompile(`^/(\d+)`) - -// ValidateRequestSchemaInput contains parameters for request schema validation. -type ValidateRequestSchemaInput struct { - Request *http.Request // Required: The HTTP request to validate - Schema *base.Schema // Required: The OpenAPI schema to validate against - Version float32 // Required: OpenAPI version (3.0 or 3.1) - Options []config.Option // Optional: Functional options (defaults applied if empty/nil) -} - -// ValidateRequestSchema will validate a http.Request pointer against a schema. -// If validation fails, it will return a list of validation errors as the second return value. -// The schema will be stored and reused from cache if available, otherwise it will be compiled on each call. -func ValidateRequestSchema(input *ValidateRequestSchemaInput) (bool, []*errors.ValidationError) { - validationOptions := config.NewValidationOptions(input.Options...) - var validationErrors []*errors.ValidationError - var renderedSchema, jsonSchema []byte - var referenceSchema string - var compiledSchema *jsonschema.Schema - var cachedNode *yaml.Node - - if input.Schema == nil { - return false, []*errors.ValidationError{{ - ValidationType: helpers.RequestBodyValidation, - ValidationSubType: helpers.Schema, - Message: "schema is nil", - Reason: "The schema to validate against is nil", - }} - } else if input.Schema.GoLow() == nil { - return false, []*errors.ValidationError{{ - ValidationType: helpers.RequestBodyValidation, - ValidationSubType: helpers.Schema, - Message: "schema cannot be rendered", - Reason: "The schema does not have low-level information and cannot be rendered. Please ensure the schema is loaded from a document.", - }} - } - - if validationOptions.SchemaCache != nil { - hash := input.Schema.GoLow().Hash() - if cached, ok := validationOptions.SchemaCache.Load(hash); ok && cached != nil && cached.CompiledSchema != nil { - renderedSchema = cached.RenderedInline - referenceSchema = cached.ReferenceSchema - jsonSchema = cached.RenderedJSON - compiledSchema = cached.CompiledSchema - cachedNode = cached.RenderedNode - } - } - - // Cache miss or no cache - render and compile - if compiledSchema == nil { - renderCtx := base.NewInlineRenderContext() - var renderErr error - renderedSchema, renderErr = input.Schema.RenderInlineWithContext(renderCtx) - referenceSchema = string(renderedSchema) - - // If rendering failed (e.g., circular reference), return the render error - if renderErr != nil { - violation := &errors.SchemaValidationFailure{ - Reason: renderErr.Error(), - ReferenceSchema: referenceSchema, - } - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: helpers.RequestBodyValidation, - ValidationSubType: helpers.Schema, - Message: fmt.Sprintf("%s request body for '%s' failed schema rendering", - input.Request.Method, input.Request.URL.Path), - Reason: fmt.Sprintf("The request schema failed to render: %s", - renderErr.Error()), - SpecLine: 1, - SpecCol: 0, - SchemaValidationErrors: []*errors.SchemaValidationFailure{violation}, - HowToFix: errors.HowToFixInvalidRenderedSchema, - Context: referenceSchema, - }) - return false, validationErrors - } - - jsonSchema, _ = utils.ConvertYAMLtoJSON(renderedSchema) - - var err error - schemaName := fmt.Sprintf("%x", input.Schema.GoLow().Hash()) - compiledSchema, err = helpers.NewCompiledSchemaWithVersion( - schemaName, - jsonSchema, - validationOptions, - input.Version, - ) - if err != nil { - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: helpers.RequestBodyValidation, - ValidationSubType: helpers.Schema, - Message: fmt.Sprintf("%s request body for '%s' failed schema compilation", - input.Request.Method, input.Request.URL.Path), - Reason: fmt.Sprintf("The request schema failed to compile: %s", err.Error()), - SpecLine: 1, - SpecCol: 0, - HowToFix: "check the request schema for invalid JSON Schema syntax, complex regex patterns, or unsupported schema constructs", - Context: input.Schema, - }) - return false, validationErrors - } - - if validationOptions.SchemaCache != nil { - hash := input.Schema.GoLow().Hash() - validationOptions.SchemaCache.Store(hash, &cache.SchemaCacheEntry{ - Schema: input.Schema, - RenderedInline: renderedSchema, - ReferenceSchema: referenceSchema, - RenderedJSON: jsonSchema, - CompiledSchema: compiledSchema, - }) - } - } - - request := input.Request - schema := input.Schema - - var requestBody []byte - if request != nil && request.Body != nil { - requestBody, _ = io.ReadAll(request.Body) - - // close the request body, so it can be re-read later by another player in the chain - _ = request.Body.Close() - request.Body = io.NopCloser(bytes.NewBuffer(requestBody)) - - } - - var decodedObj interface{} - - if len(requestBody) > 0 { - err := json.Unmarshal(requestBody, &decodedObj) - if err != nil { - // cannot decode the request body, so it's not valid - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: helpers.RequestBodyValidation, - ValidationSubType: helpers.Schema, - Message: fmt.Sprintf("%s request body for '%s' failed to validate schema", - request.Method, request.URL.Path), - Reason: fmt.Sprintf("The request body cannot be decoded: %s", err.Error()), - SpecLine: 1, - SpecCol: 0, - HowToFix: errors.HowToFixInvalidSchema, - Context: schema, - }) - return false, validationErrors - } - } - - // no request body? but we do have a schema? - if len(requestBody) == 0 && len(jsonSchema) > 0 { - - line := schema.ParentProxy.GetSchemaKeyNode().Line - col := schema.ParentProxy.GetSchemaKeyNode().Line - if schema.Type != nil { - line = schema.GoLow().Type.KeyNode.Line - col = schema.GoLow().Type.KeyNode.Column - } - - // cannot decode the request body, so it's not valid - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: helpers.RequestBodyValidation, - ValidationSubType: helpers.Schema, - Message: fmt.Sprintf("%s request body is empty for '%s'", - request.Method, request.URL.Path), - Reason: "The request body is empty but there is a schema defined", - SpecLine: line, - SpecCol: col, - HowToFix: errors.HowToFixInvalidSchema, - Context: schema, - }) - return false, validationErrors - } - - // validate the object against the schema - scErrs := compiledSchema.Validate(decodedObj) - if scErrs != nil { - - jk := scErrs.(*jsonschema.ValidationError) - - // flatten the validationErrors - schFlatErrs := jk.BasicOutput().Errors - var schemaValidationErrors []*errors.SchemaValidationFailure - - // Use cached node if available, otherwise parse - renderedNode := cachedNode - if renderedNode == nil { - renderedNode = new(yaml.Node) - _ = yaml.Unmarshal(renderedSchema, renderedNode) - } - for q := range schFlatErrs { - er := schFlatErrs[q] - - errMsg := er.Error.Kind.LocalizedString(message.NewPrinter(language.Tag{})) - - if er.KeywordLocation == "" || helpers.IgnoreRegex.MatchString(errMsg) { - continue // ignore this error, it's useless tbh, utter noise. - } - if er.Error != nil { - - // locate the violated property in the schema - located := schema_validation.LocateSchemaPropertyNodeByJSONPath(renderedNode.Content[0], er.KeywordLocation) - - // extract the element specified by the instance - val := instanceLocationRegex.FindStringSubmatch(er.InstanceLocation) - var referenceObject string - - if len(val) > 0 { - referenceIndex, _ := strconv.Atoi(val[1]) - if reflect.ValueOf(decodedObj).Type().Kind() == reflect.Slice { - found := decodedObj.([]any)[referenceIndex] - recoded, _ := json.MarshalIndent(found, "", " ") - referenceObject = string(recoded) - } - } - if referenceObject == "" { - referenceObject = string(requestBody) - } - - errMsg := er.Error.Kind.LocalizedString(message.NewPrinter(language.Tag{})) - - violation := &errors.SchemaValidationFailure{ - Reason: errMsg, - FieldName: helpers.ExtractFieldNameFromStringLocation(er.InstanceLocation), - FieldPath: helpers.ExtractJSONPathFromStringLocation(er.InstanceLocation), - InstancePath: helpers.ConvertStringLocationToPathSegments(er.InstanceLocation), - KeywordLocation: er.KeywordLocation, - ReferenceSchema: referenceSchema, - ReferenceObject: referenceObject, - OriginalJsonSchemaError: jk, - } - // if we have a location within the schema, add it to the error - if located != nil { - - line := located.Line - // if the located node is a map or an array, then the actual human interpretable - // line on which the violation occurred is the line of the key, not the value. - if located.Kind == yaml.MappingNode || located.Kind == yaml.SequenceNode { - if line > 0 { - line-- - } - } - - // location of the violation within the rendered schema. - violation.Line = line - violation.Column = located.Column - } - schemaValidationErrors = append(schemaValidationErrors, violation) - } - } - - line := 1 - col := 0 - if schema.GoLow().Type.KeyNode != nil { - line = schema.GoLow().Type.KeyNode.Line - col = schema.GoLow().Type.KeyNode.Column - } - - // add the error to the list - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: helpers.RequestBodyValidation, - ValidationSubType: helpers.Schema, - Message: fmt.Sprintf("%s request body for '%s' failed to validate schema", - request.Method, request.URL.Path), - Reason: "The request body is defined as an object. " + - "However, it does not meet the schema requirements of the specification", - SpecLine: line, - SpecCol: col, - SchemaValidationErrors: schemaValidationErrors, - HowToFix: errors.HowToFixInvalidSchema, - Context: schema, - }) - } - if len(validationErrors) > 0 { - return false, validationErrors - } - - // strict mode: check for undeclared properties in request body - if validationOptions.StrictMode && decodedObj != nil { - strictValidator := strict.NewValidator(validationOptions, input.Version) - strictResult := strictValidator.Validate(strict.Input{ - Schema: schema, - Data: decodedObj, - Direction: strict.DirectionRequest, - Options: validationOptions, - BasePath: "$.body", - Version: input.Version, - }) - - if !strictResult.Valid { - for _, undeclared := range strictResult.UndeclaredValues { - validationErrors = append(validationErrors, - errors.UndeclaredPropertyError( - undeclared.Path, - undeclared.Name, - undeclared.Value, - undeclared.DeclaredProperties, - undeclared.Direction.String(), - request.URL.Path, - request.Method, - undeclared.SpecLine, - undeclared.SpecCol, - )) - } - } - } - - if len(validationErrors) > 0 { - return false, validationErrors - } - return true, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/responses/package.go b/vendor/github.com/pb33f/libopenapi-validator/responses/package.go deleted file mode 100644 index 2d6c9a5939d..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/responses/package.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package responses contains all the logic, models and interfaces for validating OpenAPI 3+ Responses -// The package depends on *http.Response -package responses diff --git a/vendor/github.com/pb33f/libopenapi-validator/responses/response_body.go b/vendor/github.com/pb33f/libopenapi-validator/responses/response_body.go deleted file mode 100644 index 62bac3f6452..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/responses/response_body.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package responses - -import ( - "net/http" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/errors" -) - -// ResponseBodyValidator is an interface that defines the methods for validating response bodies for Operations. -// -// ValidateResponseBody method accepts an *http.Request and returns true if validation passed, -// false if validation failed and a slice of ValidationError pointers. -type ResponseBodyValidator interface { - // ValidateResponseBody will validate the response body for a http.Response pointer. The request is used to - // locate the operation in the specification, the response is used to ensure the response code, media type and the - // schema of the response body are valid. - ValidateResponseBody(request *http.Request, response *http.Response) (bool, []*errors.ValidationError) - - // ValidateResponseBodyWithPathItem will validate the response body for a http.Response pointer. The request is used to - // locate the operation in the specification, the response is used to ensure the response code, media type and the - // schema of the response body are valid. - ValidateResponseBodyWithPathItem(request *http.Request, response *http.Response, pathItem *v3.PathItem, pathFound string) (bool, []*errors.ValidationError) -} - -// NewResponseBodyValidator will create a new ResponseBodyValidator from an OpenAPI 3+ document -func NewResponseBodyValidator(document *v3.Document, opts ...config.Option) ResponseBodyValidator { - options := config.NewValidationOptions(opts...) - - return &responseBodyValidator{options: options, document: document} -} - -type responseBodyValidator struct { - options *config.ValidationOptions - document *v3.Document -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/responses/validate_body.go b/vendor/github.com/pb33f/libopenapi-validator/responses/validate_body.go deleted file mode 100644 index 3fc9891e3b5..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/responses/validate_body.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package responses - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - "strings" - - "github.com/pb33f/libopenapi/orderedmap" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi-validator/paths" - "github.com/pb33f/libopenapi-validator/schema_validation" -) - -func (v *responseBodyValidator) ValidateResponseBody( - request *http.Request, - response *http.Response, -) (bool, []*errors.ValidationError) { - pathItem, errs, foundPath := paths.FindPath(request, v.document, v.options) - if len(errs) > 0 { - return false, errs - } - return v.ValidateResponseBodyWithPathItem(request, response, pathItem, foundPath) -} - -func (v *responseBodyValidator) ValidateResponseBodyWithPathItem(request *http.Request, response *http.Response, pathItem *v3.PathItem, pathFound string) (bool, []*errors.ValidationError) { - if pathItem == nil { - return false, []*errors.ValidationError{{ - ValidationType: helpers.PathValidation, - ValidationSubType: helpers.ValidationMissing, - Message: fmt.Sprintf("%s Path '%s' not found", request.Method, request.URL.Path), - Reason: fmt.Sprintf("The %s request contains a path of '%s' "+ - "however that path, or the %s method for that path does not exist in the specification", - request.Method, request.URL.Path, request.Method), - SpecLine: -1, - SpecCol: -1, - HowToFix: errors.HowToFixPath, - }} - } - var validationErrors []*errors.ValidationError - operation := helpers.ExtractOperation(request, pathItem) - if operation == nil { - return false, []*errors.ValidationError{errors.OperationNotFound(pathItem, request, request.Method, pathFound)} - } - // extract the response code from the response - httpCode := response.StatusCode - contentType := response.Header.Get(helpers.ContentTypeHeader) - codeStr := strconv.Itoa(httpCode) - - // extract the media type from the content type header. - mediaTypeSting, _, _ := helpers.ExtractContentType(contentType) - - // check if the response code is in the contract - foundResponse := operation.Responses.Codes.GetOrZero(codeStr) - if foundResponse == nil { - // check range definition for response codes - foundResponse = operation.Responses.Codes.GetOrZero(fmt.Sprintf("%dXX", httpCode/100)) - if foundResponse != nil { - codeStr = fmt.Sprintf("%dXX", httpCode/100) - } - } - - if foundResponse != nil { - if foundResponse.Content != nil { // only validate if we have content types. - // check content type has been defined in the contract - if mediaType, ok := foundResponse.Content.Get(mediaTypeSting); ok { - validationErrors = append(validationErrors, - v.checkResponseSchema(request, response, mediaTypeSting, mediaType)...) - } else { - // check that the operation *actually* returns a body. (i.e. a 204 response) - if foundResponse.Content != nil && orderedmap.Len(foundResponse.Content) > 0 { - // content type not found in the contract - validationErrors = append(validationErrors, - errors.ResponseContentTypeNotFound(operation, request, response, codeStr, false)) - } - } - } - } else { - // no code match, check for default response - if operation.Responses.Default != nil && operation.Responses.Default.Content != nil { - // check content type has been defined in the contract - if mediaType, ok := operation.Responses.Default.Content.Get(mediaTypeSting); ok { - foundResponse = operation.Responses.Default - validationErrors = append(validationErrors, - v.checkResponseSchema(request, response, contentType, mediaType)...) - } else { - // check that the operation *actually* returns a body. (i.e. a 204 response) - if operation.Responses.Default.Content != nil && orderedmap.Len(operation.Responses.Default.Content) > 0 { - // content type not found in the contract - validationErrors = append(validationErrors, - errors.ResponseContentTypeNotFound(operation, request, response, codeStr, true)) - } - } - } else { - // TODO: add support for '2XX' and '3XX' responses in the contract - // no default, no code match, nothing! - validationErrors = append(validationErrors, - errors.ResponseCodeNotFound(operation, request, httpCode)) - } - } - - if foundResponse != nil { - // check for headers in the response - if foundResponse.Headers != nil { - if ok, hErrs := ValidateResponseHeaders(request, response, foundResponse.Headers, pathFound, codeStr, config.WithExistingOpts(v.options)); !ok { - validationErrors = append(validationErrors, hErrs...) - } - } - } - - errors.PopulateValidationErrors(validationErrors, request, pathFound) - - if len(validationErrors) > 0 { - return false, validationErrors - } - return true, nil -} - -func (v *responseBodyValidator) checkResponseSchema( - request *http.Request, - response *http.Response, - contentType string, - mediaType *v3.MediaType, -) []*errors.ValidationError { - var validationErrors []*errors.ValidationError - - if mediaType.Schema == nil { - return validationErrors - } - - // currently, we can only validate JSON, XML and URL Encoded based responses, so check for the presence - // of 'json' (what ever it may be) and for XML/URLEncoded content type so we can perform a schema check on it. - // anything other than JSON XML, or URL Encoded will be ignored. - - isXml := schema_validation.IsXMLContentType(contentType) - isUrlEncoded := schema_validation.IsURLEncodedContentType(contentType) - isJson := strings.Contains(strings.ToLower(contentType), helpers.JSONType) - - xmlValid := isXml && v.options.AllowXMLBodyValidation - urlEncodedValid := isUrlEncoded && v.options.AllowURLEncodedBodyValidation - - if !isJson && !xmlValid && !urlEncodedValid { - return validationErrors - } - - schema := mediaType.Schema.Schema() - - if !isJson { - if response != nil && response.Body != http.NoBody { - responseBody, _ := io.ReadAll(response.Body) - _ = response.Body.Close() - - stringedBody := string(responseBody) - var jsonBody any - var prevalidationErrors []*errors.ValidationError - - switch { - case xmlValid: - jsonBody, prevalidationErrors = schema_validation.TransformXMLToSchemaJSON(stringedBody, schema) - case urlEncodedValid: - jsonBody, prevalidationErrors = schema_validation.TransformURLEncodedToSchemaJSON(stringedBody, schema, mediaType.Encoding) - } - - if len(prevalidationErrors) > 0 { - return prevalidationErrors - } - - transformedBytes, err := json.Marshal(jsonBody) - if err != nil { - switch { - case isXml: - return []*errors.ValidationError{errors.InvalidXMLParsing(err.Error(), stringedBody)} - case isUrlEncoded: - return []*errors.ValidationError{errors.InvalidURLEncodedParsing(err.Error(), stringedBody)} - } - } - - response.Body = io.NopCloser(bytes.NewBuffer(transformedBytes)) - } - } - - // Validate response schema - valid, vErrs := ValidateResponseSchema(&ValidateResponseSchemaInput{ - Request: request, - Response: response, - Schema: schema, - Version: helpers.VersionToFloat(v.document.Version), - Options: []config.Option{config.WithExistingOpts(v.options)}, - }) - - if !valid { - validationErrors = append(validationErrors, vErrs...) - } - - return validationErrors -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/responses/validate_headers.go b/vendor/github.com/pb33f/libopenapi-validator/responses/validate_headers.go deleted file mode 100644 index 0cdd85bb07d..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/responses/validate_headers.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// https://pb33f.io - -package responses - -import ( - "fmt" - "net/http" - "strings" - - "github.com/pb33f/libopenapi/orderedmap" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - lowv3 "github.com/pb33f/libopenapi/datamodel/low/v3" - - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi-validator/parameters" - "github.com/pb33f/libopenapi-validator/strict" -) - -// ValidateResponseHeaders validates the response headers against the OpenAPI spec. -func ValidateResponseHeaders( - request *http.Request, - response *http.Response, - headers *orderedmap.Map[string, *v3.Header], - pathTemplate string, - statusCode string, - opts ...config.Option, -) (bool, []*errors.ValidationError) { - options := config.NewValidationOptions(opts...) - - // locate headers - type headerPair struct { - name string - value []string - model *v3.Header - } - locatedHeaders := make(map[string]headerPair) - var validationErrors []*errors.ValidationError - // iterate through the response headers - for name, v := range response.Header { - // check if the model is in the spec - for pair := headers.First(); pair != nil; pair = pair.Next() { - k := pair.Key() - header := pair.Value() - if strings.EqualFold(k, name) { - locatedHeaders[strings.ToLower(name)] = headerPair{ - name: k, - value: v, - model: header, - } - } - } - } - - // determine if any required headers are missing from the response - for pair := headers.First(); pair != nil; pair = pair.Next() { - name := pair.Key() - header := pair.Value() - if header.Required { - if _, ok := locatedHeaders[strings.ToLower(name)]; !ok { - keywordLocation := helpers.ConstructResponseHeaderJSONPointer(pathTemplate, request.Method, statusCode, name, "required") - - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: helpers.ResponseBodyValidation, - ValidationSubType: helpers.ParameterValidationHeader, - Message: "Missing required header", - Reason: fmt.Sprintf("Required header '%s' was not found in response", name), - SpecLine: header.GoLow().KeyNode.Line, - SpecCol: header.GoLow().KeyNode.Column, - HowToFix: errors.HowToFixMissingHeader, - RequestPath: request.URL.Path, - RequestMethod: request.Method, - SchemaValidationErrors: []*errors.SchemaValidationFailure{{ - Reason: fmt.Sprintf("Required header '%s' is missing", name), - FieldName: name, - InstancePath: []string{name}, - KeywordLocation: keywordLocation, - }}, - }) - } - } - } - - // validate the model schemas if they are set. - for h, header := range locatedHeaders { - if header.model.Schema != nil { - schema := header.model.Schema.Schema() - if schema != nil && header.model.Required { - for _, headerValue := range header.value { - validationErrors = append(validationErrors, - parameters.ValidateParameterSchema(schema, nil, headerValue, "header", - "response header", h, helpers.ResponseBodyValidation, lowv3.HeadersLabel, options)...) - } - } - } - } - - if len(validationErrors) > 0 { - return false, validationErrors - } - - // strict mode: check for undeclared response headers - if options.StrictMode { - // convert orderedmap to regular map for strict validation - declaredMap := make(map[string]*v3.Header) - for name, header := range headers.FromOldest() { - declaredMap[name] = header - } - - undeclaredHeaders := strict.ValidateResponseHeaders(response.Header, &declaredMap, options) - for _, undeclared := range undeclaredHeaders { - validationErrors = append(validationErrors, - errors.UndeclaredHeaderError( - undeclared.Name, - undeclared.Value.(string), - undeclared.DeclaredProperties, - undeclared.Direction.String(), - request.URL.Path, - request.Method, - )) - } - } - - if len(validationErrors) > 0 { - return false, validationErrors - } - return true, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/responses/validate_response.go b/vendor/github.com/pb33f/libopenapi-validator/responses/validate_response.go deleted file mode 100644 index 4a8c120af4b..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/responses/validate_response.go +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package responses - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "reflect" - "regexp" - "strconv" - - "github.com/pb33f/libopenapi/datamodel/high/base" - "github.com/pb33f/libopenapi/utils" - "github.com/santhosh-tekuri/jsonschema/v6" - "go.yaml.in/yaml/v4" - "golang.org/x/text/language" - "golang.org/x/text/message" - - "github.com/pb33f/libopenapi-validator/cache" - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi-validator/schema_validation" - "github.com/pb33f/libopenapi-validator/strict" -) - -var instanceLocationRegex = regexp.MustCompile(`^/(\d+)`) - -// ValidateResponseSchemaInput contains parameters for response schema validation. -type ValidateResponseSchemaInput struct { - Request *http.Request // Required: The HTTP request (for context) - Response *http.Response // Required: The HTTP response to validate - Schema *base.Schema // Required: The OpenAPI schema to validate against - Version float32 // Required: OpenAPI version (3.0 or 3.1) - Options []config.Option // Optional: Functional options (defaults applied if empty/nil) -} - -// ValidateResponseSchema will validate the response body for a http.Response pointer. The request is used to -// locate the operation in the specification, the response is used to ensure the response code, media type and the -// schema of the response body are valid. -// -// This function is used by the ValidateResponseBody function, but can be used independently. -// The schema will be compiled from cache if available, otherwise it will be compiled and cached. -func ValidateResponseSchema(input *ValidateResponseSchemaInput) (bool, []*errors.ValidationError) { - validationOptions := config.NewValidationOptions(input.Options...) - var validationErrors []*errors.ValidationError - var renderedSchema, jsonSchema []byte - var referenceSchema string - var compiledSchema *jsonschema.Schema - var cachedNode *yaml.Node - - if input.Schema == nil { - return false, []*errors.ValidationError{{ - ValidationType: helpers.ResponseBodyValidation, - ValidationSubType: helpers.Schema, - Message: "schema is nil", - Reason: "The schema to validate against is nil", - }} - } else if input.Schema.GoLow() == nil { - return false, []*errors.ValidationError{{ - ValidationType: helpers.ResponseBodyValidation, - ValidationSubType: helpers.Schema, - Message: "schema cannot be rendered", - Reason: "The schema does not have low-level information and cannot be rendered. Please ensure the schema is loaded from a document.", - }} - } - - if validationOptions.SchemaCache != nil { - hash := input.Schema.GoLow().Hash() - if cached, ok := validationOptions.SchemaCache.Load(hash); ok && cached != nil && cached.CompiledSchema != nil { - renderedSchema = cached.RenderedInline - referenceSchema = cached.ReferenceSchema - compiledSchema = cached.CompiledSchema - cachedNode = cached.RenderedNode - } - } - - // Cache miss or no cache - render and compile - if compiledSchema == nil { - renderCtx := base.NewInlineRenderContext() - var renderErr error - renderedSchema, renderErr = input.Schema.RenderInlineWithContext(renderCtx) - referenceSchema = string(renderedSchema) - - // If rendering failed (e.g., circular reference), return the render error - if renderErr != nil { - violation := &errors.SchemaValidationFailure{ - Reason: renderErr.Error(), - ReferenceSchema: referenceSchema, - } - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: helpers.ResponseBodyValidation, - ValidationSubType: helpers.Schema, - Message: fmt.Sprintf("%d response body for '%s' failed schema rendering", - input.Response.StatusCode, input.Request.URL.Path), - Reason: fmt.Sprintf("The response schema for status code '%d' failed to render: %s", - input.Response.StatusCode, renderErr.Error()), - SpecLine: 1, - SpecCol: 0, - SchemaValidationErrors: []*errors.SchemaValidationFailure{violation}, - HowToFix: "check the response schema for circular references or invalid structures", - Context: referenceSchema, - }) - return false, validationErrors - } - - jsonSchema, _ = utils.ConvertYAMLtoJSON(renderedSchema) - - var err error - schemaName := fmt.Sprintf("%x", input.Schema.GoLow().Hash()) - compiledSchema, err = helpers.NewCompiledSchemaWithVersion( - schemaName, - jsonSchema, - validationOptions, - input.Version, - ) - if err != nil { - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: helpers.ResponseBodyValidation, - ValidationSubType: helpers.Schema, - Message: fmt.Sprintf("%d response body for '%s' failed schema compilation", - input.Response.StatusCode, input.Request.URL.Path), - Reason: fmt.Sprintf("The response schema for status code '%d' failed to compile: %s", - input.Response.StatusCode, err.Error()), - SpecLine: 1, - SpecCol: 0, - HowToFix: "check the response schema for invalid JSON Schema syntax, complex regex patterns, or unsupported schema constructs", - Context: input.Schema, - }) - return false, validationErrors - } - - if validationOptions.SchemaCache != nil { - hash := input.Schema.GoLow().Hash() - validationOptions.SchemaCache.Store(hash, &cache.SchemaCacheEntry{ - Schema: input.Schema, - RenderedInline: renderedSchema, - ReferenceSchema: referenceSchema, - RenderedJSON: jsonSchema, - CompiledSchema: compiledSchema, - }) - } - } - - request := input.Request - response := input.Response - schema := input.Schema - - if response == nil || response.Body == http.NoBody { - - // skip response body validation for head request after processing schema - if response != nil && request != nil && request.Method == http.MethodHead { - return len(validationErrors) == 0, validationErrors - } - // cannot decode the response body, so it's not valid - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: "response", - ValidationSubType: "object", - Message: fmt.Sprintf("%s response object is missing for '%s'", - request.Method, request.URL.Path), - Reason: "The response object is completely missing", - SpecLine: 1, - SpecCol: 0, - HowToFix: "ensure response object has been set", - Context: schema, - }) - return false, validationErrors - } - - responseBody, ioErr := io.ReadAll(response.Body) - if ioErr != nil { - // cannot decode the response body, so it's not valid - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: helpers.ResponseBodyValidation, - ValidationSubType: helpers.Schema, - Message: fmt.Sprintf("%s response body for '%s' cannot be read, it's empty or malformed", - request.Method, request.URL.Path), - Reason: fmt.Sprintf("The response body cannot be decoded: %s", ioErr.Error()), - SpecLine: 1, - SpecCol: 0, - HowToFix: "ensure body is not empty", - Context: schema, - }) - return false, validationErrors - } - - // close the request body, so it can be re-read later by another player in the chain - _ = response.Body.Close() - response.Body = io.NopCloser(bytes.NewBuffer(responseBody)) - - var decodedObj interface{} - - if len(responseBody) > 0 { - // Per RFC7231, a response to a HEAD request MUST NOT include a message body. - if request != nil && request.Method == http.MethodHead { - violation := &errors.SchemaValidationFailure{ - Reason: "HEAD responses must not include a message body", - ReferenceObject: string(responseBody), - ReferenceSchema: referenceSchema, - } - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: helpers.ResponseBodyValidation, - ValidationSubType: helpers.Schema, - Message: fmt.Sprintf("%s response for '%s' must not include a body", - request.Method, request.URL.Path), - Reason: "The response to a HEAD request must not contain a body", - SpecLine: 1, - SpecCol: 0, - SchemaValidationErrors: []*errors.SchemaValidationFailure{violation}, - HowToFix: "ensure no response body is present for HEAD requests", - Context: referenceSchema, - }) - return false, validationErrors - } - err := json.Unmarshal(responseBody, &decodedObj) - if err != nil { - // cannot decode the response body, so it's not valid - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: helpers.ResponseBodyValidation, - ValidationSubType: helpers.Schema, - Message: fmt.Sprintf("%s response body for '%s' failed to validate schema", - request.Method, request.URL.Path), - Reason: fmt.Sprintf("The response body cannot be decoded: %s", err.Error()), - SpecLine: 1, - SpecCol: 0, - HowToFix: errors.HowToFixInvalidSchema, - Context: schema, - }) - return false, validationErrors - } - } - - // no response body? failed to decode anything? nothing to do here. - if responseBody == nil || decodedObj == nil { - return true, nil - } - - // validate the object against the schema - scErrs := compiledSchema.Validate(decodedObj) - if scErrs != nil { - jk := scErrs.(*jsonschema.ValidationError) - - // flatten the validationErrors - schFlatErrs := jk.BasicOutput().Errors - var schemaValidationErrors []*errors.SchemaValidationFailure - - renderedNode := cachedNode - if renderedNode == nil { - renderedNode = new(yaml.Node) - _ = yaml.Unmarshal(renderedSchema, renderedNode) - } - - for q := range schFlatErrs { - er := schFlatErrs[q] - - errMsg := er.Error.Kind.LocalizedString(message.NewPrinter(language.Tag{})) - if er.KeywordLocation == "" || helpers.IgnoreRegex.MatchString(errMsg) { - continue // ignore this error, it's useless tbh, utter noise. - } - if er.Error != nil { - // locate the violated property in the schema - located := schema_validation.LocateSchemaPropertyNodeByJSONPath(renderedNode.Content[0], er.KeywordLocation) - - // extract the element specified by the instance - val := instanceLocationRegex.FindStringSubmatch(er.InstanceLocation) - var referenceObject string - - if len(val) > 0 { - referenceIndex, _ := strconv.Atoi(val[1]) - if reflect.ValueOf(decodedObj).Type().Kind() == reflect.Slice { - found := decodedObj.([]any)[referenceIndex] - recoded, _ := json.MarshalIndent(found, "", " ") - referenceObject = string(recoded) - } - } - if referenceObject == "" { - referenceObject = string(responseBody) - } - - violation := &errors.SchemaValidationFailure{ - Reason: errMsg, - FieldName: helpers.ExtractFieldNameFromStringLocation(er.InstanceLocation), - FieldPath: helpers.ExtractJSONPathFromStringLocation(er.InstanceLocation), - InstancePath: helpers.ConvertStringLocationToPathSegments(er.InstanceLocation), - KeywordLocation: er.KeywordLocation, - ReferenceSchema: referenceSchema, - ReferenceObject: referenceObject, - OriginalJsonSchemaError: jk, - } - // if we have a location within the schema, add it to the error - if located != nil { - - line := located.Line - // if the located node is a map or an array, then the actual human interpretable - // line on which the violation occurred is the line of the key, not the value. - if located.Kind == yaml.MappingNode || located.Kind == yaml.SequenceNode { - if line > 0 { - line-- - } - } - - // location of the violation within the rendered schema. - violation.Line = line - violation.Column = located.Column - } - schemaValidationErrors = append(schemaValidationErrors, violation) - } - } - - line := 1 - col := 0 - if schema.GoLow().Type.KeyNode != nil { - line = schema.GoLow().Type.KeyNode.Line - col = schema.GoLow().Type.KeyNode.Column - } - - // add the error to the list - validationErrors = append(validationErrors, &errors.ValidationError{ - ValidationType: helpers.ResponseBodyValidation, - ValidationSubType: helpers.Schema, - Message: fmt.Sprintf("%d response body for '%s' failed to validate schema", - response.StatusCode, request.URL.Path), - Reason: fmt.Sprintf("The response body for status code '%d' is defined as an object. "+ - "However, it does not meet the schema requirements of the specification", response.StatusCode), - SpecLine: line, - SpecCol: col, - SchemaValidationErrors: schemaValidationErrors, - HowToFix: errors.HowToFixInvalidSchema, - Context: schema, - }) - } - if len(validationErrors) > 0 { - return false, validationErrors - } - - // strict mode: check for undeclared properties in response body - if validationOptions.StrictMode && decodedObj != nil { - strictValidator := strict.NewValidator(validationOptions, input.Version) - strictResult := strictValidator.Validate(strict.Input{ - Schema: schema, - Data: decodedObj, - Direction: strict.DirectionResponse, - Options: validationOptions, - BasePath: "$.body", - Version: input.Version, - }) - - if !strictResult.Valid { - for _, undeclared := range strictResult.UndeclaredValues { - validationErrors = append(validationErrors, - errors.UndeclaredPropertyError( - undeclared.Path, - undeclared.Name, - undeclared.Value, - undeclared.DeclaredProperties, - undeclared.Direction.String(), - request.URL.Path, - request.Method, - undeclared.SpecLine, - undeclared.SpecCol, - )) - } - } - } - - if len(validationErrors) > 0 { - return false, validationErrors - } - return true, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/locate_schema_property.go b/vendor/github.com/pb33f/libopenapi-validator/schema_validation/locate_schema_property.go deleted file mode 100644 index a84ce37768c..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/locate_schema_property.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package schema_validation - -import ( - "github.com/pb33f/jsonpath/pkg/jsonpath" - "github.com/pb33f/jsonpath/pkg/jsonpath/config" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// LocateSchemaPropertyNodeByJSONPath will locate a schema property node by a JSONPath. It converts something like -// #/components/schemas/MySchema/properties/MyProperty to something like $.components.schemas.MySchema.properties.MyProperty -func LocateSchemaPropertyNodeByJSONPath(doc *yaml.Node, JSONPath string) *yaml.Node { - var locatedNode *yaml.Node - doneChan := make(chan bool) - locatedNodeChan := make(chan *yaml.Node) - go func() { - defer func() { - if err := recover(); err != nil { - // can't search path, too crazy. - doneChan <- true - } - }() - _, path := utils.ConvertComponentIdIntoFriendlyPathSearch(JSONPath) - if path == "" { - doneChan <- true - } - jsonPath, _ := jsonpath.NewPath(path, config.WithLazyContextTracking()) - locatedNodes := jsonPath.Query(doc) - if len(locatedNodes) > 0 { - locatedNode = locatedNodes[0] - } - locatedNodeChan <- locatedNode - }() - select { - case locatedNode = <-locatedNodeChan: - return locatedNode - case <-doneChan: - return nil - } -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/package.go b/vendor/github.com/pb33f/libopenapi-validator/schema_validation/package.go deleted file mode 100644 index ee951b1f0c7..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/package.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package schema_validation contains all the logic, models and interfaces for validating OpenAPI 3+ Schemas. -// Functionality for validating individual *base.Schema instances, but as well as validating a complete OpenAPI 3+ document -package schema_validation diff --git a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/property_locator.go b/vendor/github.com/pb33f/libopenapi-validator/schema_validation/property_locator.go deleted file mode 100644 index dce4377d51b..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/property_locator.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package schema_validation - -import ( - "regexp" - "strings" - - "github.com/santhosh-tekuri/jsonschema/v6" - "go.yaml.in/yaml/v4" - - liberrors "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" -) - -// PropertyNameInfo contains extracted information about a property name validation error -type PropertyNameInfo struct { - PropertyName string // The property name that violated validation (e.g., "$defs-atmVolatility_type") - ParentLocation []string // The path to the parent containing the property (e.g., ["components", "schemas"]) - EnhancedReason string // A more detailed error message with context - Pattern string // The pattern that was violated, if applicable -} - -var ( - // invalidPropertyNameRegex matches errors like: "invalid propertyName 'X'" - invalidPropertyNameRegex = regexp.MustCompile(`invalid propertyName '([^']+)'`) - - // patternMismatchRegex matches errors like: "'X' does not match pattern 'Y'" - patternMismatchRegex = regexp.MustCompile(`'([^']+)' does not match pattern '([^']+)'`) -) - -// extractPropertyNameFromError extracts property name information from a jsonschema.ValidationError -// when BasicOutput doesn't provide useful InstanceLocation. -// This handles Priority 1 (invalid propertyName) and Priority 2 (pattern mismatch) cases. -// -// Returns PropertyNameInfo with extracted details, or nil if no relevant information found. -// Note: ValidationError.Error() includes all cause information in the formatted string, -// so we only need to check the root error message. -func extractPropertyNameFromError(ve *jsonschema.ValidationError) *PropertyNameInfo { - if ve == nil { - return nil - } - - // Check error message for patterns (Error() includes all cause information) - return checkErrorForPropertyInfo(ve) -} - -// checkErrorForPropertyInfo examines a single ValidationError for property name patterns. -// This is extracted as a separate function to avoid duplication and improve testability. -func checkErrorForPropertyInfo(ve *jsonschema.ValidationError) *PropertyNameInfo { - errMsg := ve.Error() - return checkErrorMessageForPropertyInfo(errMsg, ve.InstanceLocation, ve) -} - -// checkErrorMessageForPropertyInfo extracts property name info from an error message string. -// This is separated to improve testability while keeping validation error traversal logic intact. -func checkErrorMessageForPropertyInfo(errMsg string, instanceLocation []string, ve *jsonschema.ValidationError) *PropertyNameInfo { - // Check for "invalid propertyName 'X'" first (most specific error message) - if matches := invalidPropertyNameRegex.FindStringSubmatch(errMsg); len(matches) > 1 { - propertyName := matches[1] - info := &PropertyNameInfo{ - PropertyName: propertyName, - ParentLocation: instanceLocation, - } - - // try to extract pattern information from deeper causes if available - var pattern string - if ve != nil { - pattern = extractPatternFromCauses(ve) - } - - if pattern != "" { - info.Pattern = pattern - info.EnhancedReason = buildEnhancedReason(propertyName, pattern) - } else { - info.EnhancedReason = "invalid propertyName '" + propertyName + "'" - } - - return info - } - - // Check for "'X' does not match pattern 'Y'" as fallback (pattern violation) - if matches := patternMismatchRegex.FindStringSubmatch(errMsg); len(matches) > 2 { - return &PropertyNameInfo{ - PropertyName: matches[1], - ParentLocation: instanceLocation, - Pattern: matches[2], - EnhancedReason: buildEnhancedReason(matches[1], matches[2]), - } - } - - return nil -} - -// extractPatternFromCauses looks through error causes to find pattern violation details. -// Since ValidationError.Error() includes all cause information, we check the formatted error string. -func extractPatternFromCauses(ve *jsonschema.ValidationError) string { - if ve == nil { - return "" - } - - // Check the error message which includes all cause information - errMsg := ve.Error() - if matches := patternMismatchRegex.FindStringSubmatch(errMsg); len(matches) > 2 { - return matches[2] - } - - return "" -} - -// buildEnhancedReason constructs a detailed error message with property name and pattern -func buildEnhancedReason(propertyName, pattern string) string { - var buf strings.Builder - buf.Grow(len(propertyName) + len(pattern) + 50) // pre-allocate to avoid reallocation - buf.WriteString("invalid propertyName '") - buf.WriteString(propertyName) - buf.WriteString("': does not match pattern '") - buf.WriteString(pattern) - buf.WriteString("'") - return buf.String() -} - -// findPropertyKeyNodeInYAML searches the YAML tree for a property key node at a specific location. -// It first navigates to the parent location, then searches for the property name as a map key. -// -// Parameters: -// - rootNode: The root YAML node to search from -// - propertyName: The property key to find (e.g., "$defs-atmVolatility_type") -// - parentPath: Path segments to the parent (e.g., ["components", "schemas"]) -// -// Returns the YAML node for the property key, or nil if not found. -func findPropertyKeyNodeInYAML(rootNode *yaml.Node, propertyName string, parentPath []string) *yaml.Node { - if rootNode == nil || propertyName == "" { - return nil - } - - // Navigate to parent location first - currentNode := rootNode - for _, segment := range parentPath { - currentNode = navigateToYAMLChild(currentNode, segment) - if currentNode == nil { - return nil - } - } - - // Search for the property name as a map key - return findMapKeyNode(currentNode, propertyName) -} - -// navigateToYAMLChild navigates from a parent node to a child by name. -// Handles both document root navigation and map content navigation. -func navigateToYAMLChild(parent *yaml.Node, childName string) *yaml.Node { - if parent == nil { - return nil - } - - // If parent is a document node, navigate to its content - if parent.Kind == yaml.DocumentNode && len(parent.Content) > 0 { - parent = parent.Content[0] - } - - // Navigate through mapping node - if parent.Kind == yaml.MappingNode { - return findMapKeyValue(parent, childName) - } - - return nil -} - -// findMapKeyValue searches a mapping node for a key and returns its value node -func findMapKeyValue(mappingNode *yaml.Node, keyName string) *yaml.Node { - if mappingNode.Kind != yaml.MappingNode { - return nil - } - - // mapping nodes have key-value pairs: [key1, value1, key2, value2, ...] - for i := 0; i < len(mappingNode.Content); i += 2 { - keyNode := mappingNode.Content[i] - if keyNode.Value == keyName { - // return the value node (i+1) - if i+1 < len(mappingNode.Content) { - return mappingNode.Content[i+1] - } - } - } - - return nil -} - -// findMapKeyNode searches a mapping node for a key and returns the key node itself (not the value) -func findMapKeyNode(mappingNode *yaml.Node, keyName string) *yaml.Node { - if mappingNode == nil { - return nil - } - - // if it's a document node, unwrap to content - if mappingNode.Kind == yaml.DocumentNode && len(mappingNode.Content) > 0 { - mappingNode = mappingNode.Content[0] - } - - if mappingNode.Kind != yaml.MappingNode { - return nil - } - - // mapping nodes have key-value pairs: [key1, value1, key2, value2, ...] - for i := 0; i < len(mappingNode.Content); i += 2 { - keyNode := mappingNode.Content[i] - if keyNode.Value == keyName { - return keyNode // contains line/column metadata for error reporting - } - } - - return nil -} - -// applyPropertyNameFallback attempts to enrich a violation with property name information -// when the primary location method fails. Returns true if enrichment was applied. -func applyPropertyNameFallback( - propertyInfo *PropertyNameInfo, - rootNode *yaml.Node, - violation *liberrors.SchemaValidationFailure, -) bool { - if propertyInfo == nil { - return false - } - - return enrichSchemaValidationFailure( - propertyInfo, - rootNode, - &violation.Line, - &violation.Column, - &violation.Reason, - &violation.FieldName, - &violation.FieldPath, - &violation.InstancePath, - ) -} - -// enrichSchemaValidationFailure attempts to enhance a SchemaValidationFailure with better -// location information by searching the YAML tree when the standard location is empty. -// -// This function: -// 1. searches YAML tree for the property key in various locations -// 2. updates Line, Column, Reason, and other fields if found -// -// Returns true if enrichment was performed, false otherwise. -func enrichSchemaValidationFailure( - failure *PropertyNameInfo, - rootNode *yaml.Node, - line *int, - column *int, - reason *string, - fieldName *string, - fieldPath *string, - instancePath *[]string, -) bool { - if failure == nil { - return false - } - - // search for the property key in the YAML tree with multiple fallback locations - // since InstanceLocation may be empty for property name errors - var foundNode *yaml.Node - - // try with the provided parent location first - if len(failure.ParentLocation) > 0 { - foundNode = findPropertyKeyNodeInYAML(rootNode, failure.PropertyName, failure.ParentLocation) - } - - // common fallback locations for OpenAPI property name errors - if foundNode == nil { - foundNode = findPropertyKeyNodeInYAML(rootNode, failure.PropertyName, []string{"components", "schemas"}) - } - if foundNode == nil { - foundNode = findPropertyKeyNodeInYAML(rootNode, failure.PropertyName, []string{"components"}) - } - if foundNode == nil { - foundNode = findPropertyKeyNodeInYAML(rootNode, failure.PropertyName, []string{}) - } - - if foundNode == nil { - return false - } - - // populate location metadata from YAML node - *line = foundNode.Line - *column = foundNode.Column - - if failure.EnhancedReason != "" { - *reason = failure.EnhancedReason - } - - *fieldName = failure.PropertyName - - // construct JSONPath from parent location segments - if len(failure.ParentLocation) > 0 { - *fieldPath = helpers.ExtractJSONPathFromStringLocation("/" + strings.Join(failure.ParentLocation, "/") + "/" + failure.PropertyName) - *instancePath = failure.ParentLocation - } else { - *fieldPath = helpers.ExtractJSONPathFromStringLocation("/" + failure.PropertyName) - *instancePath = []string{} - } - - return true -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/urlencoded_validator.go b/vendor/github.com/pb33f/libopenapi-validator/schema_validation/urlencoded_validator.go deleted file mode 100644 index ebfbd7c74d8..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/urlencoded_validator.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2026 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package schema_validation - -import ( - "log/slog" - "os" - - "github.com/pb33f/libopenapi/datamodel/high/base" - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - "github.com/pb33f/libopenapi/orderedmap" - - "github.com/pb33f/libopenapi-validator/config" - liberrors "github.com/pb33f/libopenapi-validator/errors" -) - -// URLEncodedValidator is an interface that defines methods for validating URL encoded strings against OpenAPI schemas. -// There are 2 methods for validating URL encoded: -// -// ValidateURLEncodedString validates an URL encoded string against a schema, applying OpenAPI object transformations. -// ValidateURLEncodedStringWithVersion - version-aware URL encoded validation that allows OpenAPI 3.0 keywords when version is specified. -type URLEncodedValidator interface { - // ValidateURLEncodedString validates an URL encoded string against a schema, applying OpenAPI object transformations. - // Uses OpenAPI 3.1+ validation by default (strict JSON Schema compliance). - ValidateURLEncodedString(schema *base.Schema, encoding *orderedmap.Map[string, *v3.Encoding], urlEncodedString string) (bool, []*liberrors.ValidationError) - - // ValidateURLEncodedStringWithVersion validates an URL encoded string with version-specific rules. - // When version is 3.0, OpenAPI 3.0-specific keywords like 'nullable' are allowed and processed. - // When version is 3.1+, OpenAPI 3.0-specific keywords like 'nullable' will cause validation to fail. - ValidateURLEncodedStringWithVersion(schema *base.Schema, encoding *orderedmap.Map[string, *v3.Encoding], urlEncodedString string, version float32) (bool, []*liberrors.ValidationError) -} - -type urlEncodedValidator struct { - schemaValidator *schemaValidator - logger *slog.Logger -} - -// NewURLEncodedValidatorWithLogger creates a new URLEncodedValidator instance with a custom logger. -func NewURLEncodedValidatorWithLogger(logger *slog.Logger, opts ...config.Option) URLEncodedValidator { - options := config.NewValidationOptions(opts...) - // Create an internal schema validator for JSON validation after URLEncoded transformation - sv := &schemaValidator{options: options, logger: logger} - return &urlEncodedValidator{schemaValidator: sv, logger: logger} -} - -// NewURLEncodedValidator creates a new URLEncodedValidator instance with default logging configuration. -func NewURLEncodedValidator(opts ...config.Option) URLEncodedValidator { - logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ - Level: slog.LevelError, - })) - return NewURLEncodedValidatorWithLogger(logger, opts...) -} - -func (x *urlEncodedValidator) ValidateURLEncodedString(schema *base.Schema, encoding *orderedmap.Map[string, *v3.Encoding], urlEncodedString string) (bool, []*liberrors.ValidationError) { - return x.validateURLEncodedWithVersion(schema, encoding, urlEncodedString, x.logger, 3.1) -} - -func (x *urlEncodedValidator) ValidateURLEncodedStringWithVersion(schema *base.Schema, encoding *orderedmap.Map[string, *v3.Encoding], urlEncodedString string, version float32) (bool, []*liberrors.ValidationError) { - return x.validateURLEncodedWithVersion(schema, encoding, urlEncodedString, x.logger, version) -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/validate_document.go b/vendor/github.com/pb33f/libopenapi-validator/schema_validation/validate_document.go deleted file mode 100644 index 635f733530b..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/validate_document.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package schema_validation - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/pb33f/libopenapi" - "github.com/santhosh-tekuri/jsonschema/v6" - "go.yaml.in/yaml/v4" - "golang.org/x/text/language" - "golang.org/x/text/message" - - "github.com/pb33f/libopenapi-validator/config" - liberrors "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" -) - -func normalizeJSON(data any) any { - d, _ := json.Marshal(data) - var normalized any - _ = json.Unmarshal(d, &normalized) - return normalized -} - -// ValidateOpenAPIDocument will validate an OpenAPI document against the OpenAPI 2, 3.0 and 3.1 schemas (depending on version) -// It will return true if the document is valid, false if it is not and a slice of ValidationError pointers. -func ValidateOpenAPIDocument(doc libopenapi.Document, opts ...config.Option) (bool, []*liberrors.ValidationError) { - options := config.NewValidationOptions(opts...) - - info := doc.GetSpecInfo() - loadedSchema := info.APISchema - var validationErrors []*liberrors.ValidationError - - // Check if SpecJSON is nil before dereferencing - if info.SpecJSON == nil { - validationErrors = append(validationErrors, &liberrors.ValidationError{ - ValidationType: helpers.Schema, - ValidationSubType: "document", - Message: "OpenAPI document validation failed", - Reason: "The document's SpecJSON is nil, indicating the document was not properly parsed or is empty", - SpecLine: 1, - SpecCol: 0, - HowToFix: "ensure the OpenAPI document is valid YAML/JSON and can be properly parsed by libopenapi", - Context: "document root", - }) - return false, validationErrors - } - - decodedDocument := *info.SpecJSON - - // Compile the JSON Schema - jsch, err := helpers.NewCompiledSchema("schema", []byte(loadedSchema), options) - if err != nil { - // schema compilation failed, return validation error instead of panicking - validationErrors = append(validationErrors, &liberrors.ValidationError{ - ValidationType: helpers.Schema, - ValidationSubType: "compilation", - Message: "OpenAPI document schema compilation failed", - Reason: fmt.Sprintf("The OpenAPI schema failed to compile: %s", err.Error()), - SpecLine: 1, - SpecCol: 0, - HowToFix: "check the OpenAPI schema for invalid JSON Schema syntax, complex regex patterns, or unsupported schema constructs", - Context: loadedSchema, - }) - return false, validationErrors - } - - // Validate the document - scErrs := jsch.Validate(normalizeJSON(decodedDocument)) - - var schemaValidationErrors []*liberrors.SchemaValidationFailure - - if scErrs != nil { - - var jk *jsonschema.ValidationError - if errors.As(scErrs, &jk) { - - // flatten the validationErrors - schFlatErrs := jk.BasicOutput().Errors - - // Extract property name info once before processing errors (performance optimization) - propertyInfo := extractPropertyNameFromError(jk) - - for q := range schFlatErrs { - er := schFlatErrs[q] - - errMsg := er.Error.Kind.LocalizedString(message.NewPrinter(language.Tag{})) - if er.KeywordLocation == "" || helpers.IgnorePolyRegex.MatchString(errMsg) { - continue // ignore this error, it's useless tbh, utter noise. - } - if errMsg != "" { - - // locate the violated property in the schema - located := LocateSchemaPropertyNodeByJSONPath(info.RootNode.Content[0], er.InstanceLocation) - violation := &liberrors.SchemaValidationFailure{ - Reason: errMsg, - FieldName: helpers.ExtractFieldNameFromStringLocation(er.InstanceLocation), - FieldPath: helpers.ExtractJSONPathFromStringLocation(er.InstanceLocation), - InstancePath: helpers.ConvertStringLocationToPathSegments(er.InstanceLocation), - KeywordLocation: er.KeywordLocation, - OriginalJsonSchemaError: jk, - } - - // if we have a location within the schema, add it to the error - if located != nil { - line := located.Line - // if the located node is a map or an array, then the actual human interpretable - // line on which the violation occurred is the line of the key, not the value. - if located.Kind == yaml.MappingNode || located.Kind == yaml.SequenceNode { - if line > 0 { - line-- - } - } - - // location of the violation within the rendered schema. - violation.Line = line - violation.Column = located.Column - } else { - // handles property name validation errors that don't provide useful InstanceLocation - applyPropertyNameFallback(propertyInfo, info.RootNode.Content[0], violation) - } - schemaValidationErrors = append(schemaValidationErrors, violation) - } - } - } - - // add the error to the list - validationErrors = append(validationErrors, &liberrors.ValidationError{ - ValidationType: helpers.Schema, - Message: "Document does not pass validation", - Reason: fmt.Sprintf("OpenAPI document is not valid according "+ - "to the %s specification", info.Version), - SchemaValidationErrors: schemaValidationErrors, - HowToFix: liberrors.HowToFixInvalidSchema, - }) - } - if len(validationErrors) > 0 { - return false, validationErrors - } - return true, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/validate_schema.go b/vendor/github.com/pb33f/libopenapi-validator/schema_validation/validate_schema.go deleted file mode 100644 index 7f1f055386a..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/validate_schema.go +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT -package schema_validation - -import ( - "encoding/json" - "errors" - "fmt" - "log/slog" - "math" - "os" - "reflect" - "regexp" - "strconv" - "sync" - - "github.com/pb33f/libopenapi-validator/cache" - "github.com/pb33f/libopenapi/datamodel/high/base" - "github.com/santhosh-tekuri/jsonschema/v6" - "go.yaml.in/yaml/v4" - "golang.org/x/text/language" - "golang.org/x/text/message" - - _ "embed" - - "github.com/pb33f/libopenapi-validator/config" - liberrors "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" -) - -// SchemaValidator is an interface that defines the methods for validating a *base.Schema (V3+ Only) object. -// There are 6 methods for validating a schema: -// -// ValidateSchemaString accepts a schema object to validate against, and a JSON/YAML blob that is defined as a string. -// ValidateSchemaObject accepts a schema object to validate against, and an object, created from unmarshalled JSON/YAML. -// ValidateSchemaBytes accepts a schema object to validate against, and a JSON/YAML blob that is defined as a byte array. -// ValidateSchemaStringWithVersion - version-aware validation that allows OpenAPI 3.0 keywords when version is specified. -// ValidateSchemaObjectWithVersion - version-aware validation that allows OpenAPI 3.0 keywords when version is specified. -// ValidateSchemaBytesWithVersion - version-aware validation that allows OpenAPI 3.0 keywords when version is specified. -type SchemaValidator interface { - // ValidateSchemaString accepts a schema object to validate against, and a JSON/YAML blob that is defined as a string. - // Uses OpenAPI 3.1+ validation by default (strict JSON Schema compliance). - ValidateSchemaString(schema *base.Schema, payload string) (bool, []*liberrors.ValidationError) - - // ValidateSchemaObject accepts a schema object to validate against, and an object, created from unmarshalled JSON/YAML. - // This is a pre-decoded object that will skip the need to unmarshal a string of JSON/YAML. - // Uses OpenAPI 3.1+ validation by default (strict JSON Schema compliance). - ValidateSchemaObject(schema *base.Schema, payload interface{}) (bool, []*liberrors.ValidationError) - - // ValidateSchemaBytes accepts a schema object to validate against, and a byte slice containing a schema to - // validate against. Uses OpenAPI 3.1+ validation by default (strict JSON Schema compliance). - ValidateSchemaBytes(schema *base.Schema, payload []byte) (bool, []*liberrors.ValidationError) - - // ValidateSchemaStringWithVersion accepts a schema object to validate against, a JSON/YAML blob, and an OpenAPI version. - // When version is 3.0, OpenAPI 3.0-specific keywords like 'nullable' are allowed and processed. - // When version is 3.1+, OpenAPI 3.0-specific keywords like 'nullable' will cause validation to fail. - ValidateSchemaStringWithVersion(schema *base.Schema, payload string, version float32) (bool, []*liberrors.ValidationError) - - // ValidateSchemaObjectWithVersion accepts a schema object to validate against, an object, and an OpenAPI version. - // When version is 3.0, OpenAPI 3.0-specific keywords like 'nullable' are allowed and processed. - // When version is 3.1+, OpenAPI 3.0-specific keywords like 'nullable' will cause validation to fail. - ValidateSchemaObjectWithVersion(schema *base.Schema, payload interface{}, version float32) (bool, []*liberrors.ValidationError) - - // ValidateSchemaBytesWithVersion accepts a schema object to validate against, a byte slice, and an OpenAPI version. - // When version is 3.0, OpenAPI 3.0-specific keywords like 'nullable' are allowed and processed. - // When version is 3.1+, OpenAPI 3.0-specific keywords like 'nullable' will cause validation to fail. - ValidateSchemaBytesWithVersion(schema *base.Schema, payload []byte, version float32) (bool, []*liberrors.ValidationError) -} - -var instanceLocationRegex = regexp.MustCompile(`^/(\d+)`) - -type schemaValidator struct { - options *config.ValidationOptions - logger *slog.Logger - lock sync.Mutex -} - -// NewSchemaValidatorWithLogger will create a new SchemaValidator instance, ready to accept schemas and payloads to validate. -func NewSchemaValidatorWithLogger(logger *slog.Logger, opts ...config.Option) SchemaValidator { - options := config.NewValidationOptions(opts...) - - return &schemaValidator{options: options, logger: logger, lock: sync.Mutex{}} -} - -// NewSchemaValidator will create a new SchemaValidator instance, ready to accept schemas and payloads to validate. -func NewSchemaValidator(opts ...config.Option) SchemaValidator { - logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ - Level: slog.LevelError, - })) - return NewSchemaValidatorWithLogger(logger, opts...) -} - -func (s *schemaValidator) ValidateSchemaString(schema *base.Schema, payload string) (bool, []*liberrors.ValidationError) { - return s.validateSchemaWithVersion(schema, []byte(payload), nil, s.logger, 3.1) -} - -func (s *schemaValidator) ValidateSchemaObject(schema *base.Schema, payload interface{}) (bool, []*liberrors.ValidationError) { - return s.validateSchemaWithVersion(schema, nil, payload, s.logger, 3.1) -} - -func (s *schemaValidator) ValidateSchemaBytes(schema *base.Schema, payload []byte) (bool, []*liberrors.ValidationError) { - return s.validateSchemaWithVersion(schema, payload, nil, s.logger, 3.1) -} - -func (s *schemaValidator) ValidateSchemaStringWithVersion(schema *base.Schema, payload string, version float32) (bool, []*liberrors.ValidationError) { - return s.validateSchemaWithVersion(schema, []byte(payload), nil, s.logger, version) -} - -func (s *schemaValidator) ValidateSchemaObjectWithVersion(schema *base.Schema, payload interface{}, version float32) (bool, []*liberrors.ValidationError) { - return s.validateSchemaWithVersion(schema, nil, payload, s.logger, version) -} - -func (s *schemaValidator) ValidateSchemaBytesWithVersion(schema *base.Schema, payload []byte, version float32) (bool, []*liberrors.ValidationError) { - return s.validateSchemaWithVersion(schema, payload, nil, s.logger, version) -} - -func (s *schemaValidator) validateSchemaWithVersion(schema *base.Schema, payload []byte, decodedObject interface{}, log *slog.Logger, version float32) (bool, []*liberrors.ValidationError) { - var validationErrors []*liberrors.ValidationError - - if schema == nil { - log.Info("schema is empty and cannot be validated. This generally means the schema is missing from the spec, or could not be read.") - return false, validationErrors - } - - var renderedSchema []byte - var renderedNode *yaml.Node - var compiledSchema *jsonschema.Schema - - // Check cache first — reuses existing SchemaCache (populated by NewValidationOptions). - var cacheKey uint64 - canCache := s.options.SchemaCache != nil && schema.GoLow() != nil - if canCache { - // Include version in key so 3.0 (nullable) and 3.1 compile differently. - cacheKey = schema.GoLow().Hash() ^ uint64(math.Float32bits(version)) - if cached, ok := s.options.SchemaCache.Load(cacheKey); ok && - cached != nil && cached.CompiledSchema != nil { - renderedSchema = cached.RenderedInline - renderedNode = cached.RenderedNode - compiledSchema = cached.CompiledSchema - } - } - - // Cache miss — render, convert to JSON, and compile. - if compiledSchema == nil { - renderCtx := base.NewInlineRenderContextForValidation() - s.lock.Lock() - nodeIface, renderErr := schema.MarshalYAMLInlineWithContext(renderCtx) - s.lock.Unlock() - - if renderErr != nil { - validationErrors = append(validationErrors, &liberrors.ValidationError{ - ValidationType: helpers.RequestBodyValidation, - ValidationSubType: helpers.Schema, - Message: "schema does not pass validation", - Reason: fmt.Sprintf("The schema cannot be decoded: %s", renderErr.Error()), - SpecLine: schema.GoLow().GetRootNode().Line, - SpecCol: schema.GoLow().GetRootNode().Column, - HowToFix: liberrors.HowToFixInvalidSchema, - Context: string(renderedSchema), - }) - return false, validationErrors - } - - // MarshalYAMLInlineWithContext returns *yaml.Node (from NodeBuilder.Render) - renderedNode, _ = nodeIface.(*yaml.Node) - - // yaml.Node → map → JSON bytes (skips yaml.Marshal + yaml.Unmarshal round-trip) - var jsonMap map[string]interface{} - if renderedNode != nil { - _ = renderedNode.Decode(&jsonMap) - } - jsonSchema, _ := json.Marshal(jsonMap) - - // YAML bytes generated once for error messages / context strings - renderedSchema, _ = yaml.Marshal(renderedNode) - - path := "" - if schema.GoLow().GetIndex() != nil { - path = schema.GoLow().GetIndex().GetSpecAbsolutePath() - } - - var compileErr error - compiledSchema, compileErr = helpers.NewCompiledSchemaWithVersion(path, jsonSchema, s.options, version) - if compileErr != nil { - line := 1 - col := 0 - if schema.GoLow().Type.KeyNode != nil { - line = schema.GoLow().Type.KeyNode.Line - col = schema.GoLow().Type.KeyNode.Column - } - validationErrors = append(validationErrors, &liberrors.ValidationError{ - ValidationType: helpers.Schema, - ValidationSubType: helpers.Schema, - Message: "schema compilation failed", - Reason: fmt.Sprintf("Schema compilation failed: %s", compileErr.Error()), - SpecLine: line, - SpecCol: col, - HowToFix: liberrors.HowToFixInvalidSchema, - Context: string(renderedSchema), - }) - return false, validationErrors - } - - // Store in cache for subsequent validations of the same schema. - if canCache && compiledSchema != nil { - s.options.SchemaCache.Store(cacheKey, &cache.SchemaCacheEntry{ - Schema: schema, - RenderedInline: renderedSchema, - ReferenceSchema: string(renderedSchema), - RenderedJSON: jsonSchema, - CompiledSchema: compiledSchema, - RenderedNode: renderedNode, - }) - } - } - - if decodedObject == nil && len(payload) > 0 { - err := json.Unmarshal(payload, &decodedObject) - if err != nil { - // cannot decode the request body, so it's not valid - line := 1 - col := 0 - if schema.GoLow().Type.KeyNode != nil { - line = schema.GoLow().Type.KeyNode.Line - col = schema.GoLow().Type.KeyNode.Column - } - validationErrors = append(validationErrors, &liberrors.ValidationError{ - ValidationType: helpers.RequestBodyValidation, - ValidationSubType: helpers.Schema, - Message: "schema does not pass validation", - Reason: fmt.Sprintf("The schema cannot be decoded: %s", err.Error()), - SpecLine: line, - SpecCol: col, - HowToFix: liberrors.HowToFixInvalidSchema, - Context: string(renderedSchema), - }) - return false, validationErrors - } - } - - var schemaValidationErrors []*liberrors.SchemaValidationFailure - - if compiledSchema != nil && decodedObject != nil { - scErrs := compiledSchema.Validate(decodedObject) - if scErrs != nil { - - var jk *jsonschema.ValidationError - if errors.As(scErrs, &jk) { - - // flatten the validationErrors - schFlatErr := jk.BasicOutput().Errors - schemaValidationErrors = extractBasicErrors(schFlatErr, renderedSchema, - renderedNode, decodedObject, payload, jk, schemaValidationErrors) - } - line := 1 - col := 0 - if schema.GoLow().Type.KeyNode != nil { - line = schema.GoLow().Type.KeyNode.Line - col = schema.GoLow().Type.KeyNode.Column - } - - validationErrors = append(validationErrors, &liberrors.ValidationError{ - ValidationType: helpers.Schema, - Message: "schema does not pass validation", - Reason: "Schema failed to validate against the contract requirements", - SpecLine: line, - SpecCol: col, - SchemaValidationErrors: schemaValidationErrors, - HowToFix: liberrors.HowToFixInvalidSchema, - Context: string(renderedSchema), - }) - } - } - if len(validationErrors) > 0 { - return false, validationErrors - } - return true, nil -} - -func extractBasicErrors(schFlatErrs []jsonschema.OutputUnit, - renderedSchema []byte, renderedNode *yaml.Node, - decodedObject interface{}, - payload []byte, jk *jsonschema.ValidationError, - schemaValidationErrors []*liberrors.SchemaValidationFailure, -) []*liberrors.SchemaValidationFailure { - // Extract property name info once before processing errors (performance optimization) - propertyInfo := extractPropertyNameFromError(jk) - - // Determine root content node ONCE (not per-error). - // NodeBuilder.Render() returns MappingNode directly, no DocumentNode unwrapping needed. - var rootNode *yaml.Node - if renderedNode != nil { - rootNode = renderedNode - } else if len(renderedSchema) > 0 { - // Fallback: parse bytes ONCE - var docNode yaml.Node - _ = yaml.Unmarshal(renderedSchema, &docNode) - if len(docNode.Content) > 0 { - rootNode = docNode.Content[0] - } - } - - for q := range schFlatErrs { - er := schFlatErrs[q] - - errMsg := er.Error.Kind.LocalizedString(message.NewPrinter(language.Tag{})) - if helpers.IgnoreRegex.MatchString(errMsg) { - continue // ignore this error, it's useless tbh, utter noise. - } - if er.Error != nil { - - // locate the violated property in the schema - var located *yaml.Node - if rootNode != nil { - located = LocateSchemaPropertyNodeByJSONPath(rootNode, er.KeywordLocation) - } - - // extract the element specified by the instance - val := instanceLocationRegex.FindStringSubmatch(er.InstanceLocation) - var referenceObject string - - if len(val) > 0 { - referenceIndex, _ := strconv.Atoi(val[1]) - if reflect.ValueOf(decodedObject).Type().Kind() == reflect.Slice { - found := decodedObject.([]any)[referenceIndex] - recoded, _ := json.MarshalIndent(found, "", " ") - referenceObject = string(recoded) - } - } - if referenceObject == "" { - referenceObject = string(payload) - } - - violation := &liberrors.SchemaValidationFailure{ - Reason: errMsg, - FieldName: helpers.ExtractFieldNameFromStringLocation(er.InstanceLocation), - FieldPath: helpers.ExtractJSONPathFromStringLocation(er.InstanceLocation), - InstancePath: helpers.ConvertStringLocationToPathSegments(er.InstanceLocation), - KeywordLocation: er.KeywordLocation, - ReferenceSchema: string(renderedSchema), - ReferenceObject: referenceObject, - OriginalJsonSchemaError: jk, - } - // if we have a location within the schema, add it to the error - if located != nil { - line := located.Line - // if the located node is a map or an array, then the actual human interpretable - // line on which the violation occurred is the line of the key, not the value. - if located.Kind == yaml.MappingNode || located.Kind == yaml.SequenceNode { - if line > 0 { - line-- - } - } - - // location of the violation within the rendered schema. - violation.Line = line - violation.Column = located.Column - } else if rootNode != nil { - // handles property name validation errors that don't provide useful InstanceLocation - applyPropertyNameFallback(propertyInfo, rootNode, violation) - } - schemaValidationErrors = append(schemaValidationErrors, violation) - } - } - return schemaValidationErrors -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/validate_urlencoded.go b/vendor/github.com/pb33f/libopenapi-validator/schema_validation/validate_urlencoded.go deleted file mode 100644 index f908db3f429..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/validate_urlencoded.go +++ /dev/null @@ -1,392 +0,0 @@ -package schema_validation - -import ( - "encoding/json" - "fmt" - "log/slog" - "net/url" - "regexp" - "slices" - "sort" - "strconv" - "strings" - - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi/datamodel/high/base" - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - "github.com/pb33f/libopenapi/orderedmap" -) - -var rxReserved = regexp.MustCompile(`[:/?#\[\]@!$&'()*+,;=]`) - -func TransformURLEncodedToSchemaJSON(bodyString string, schema *base.Schema, encoding *orderedmap.Map[string, *v3.Encoding]) (map[string]any, []*errors.ValidationError) { - rawValues, err := url.ParseQuery(bodyString) - if err != nil { - return nil, []*errors.ValidationError{errors.InvalidURLEncodedParsing("empty form-urlencoded context", bodyString)} - } - - jsonMap := unflattenValues(rawValues) - - var validationErrors []*errors.ValidationError - - if schema != nil { - if schema.Properties != nil { - for pair := orderedmap.First(schema.Properties); pair != nil; pair = pair.Next() { - propName := pair.Key() - propSchema := pair.Value().Schema() - - var contentEncoding *v3.Encoding - var allowReserved bool - - if encoding != nil { - contentEncoding = encoding.GetOrZero(propName) - - if contentEncoding != nil { - allowReserved = contentEncoding.AllowReserved - } - } - - if val, exists := jsonMap[propName]; exists { - newVal, err := applyEncodingRules(val, contentEncoding, propSchema) - if err != nil { - contentType := "" - if contentEncoding != nil { - contentType = contentEncoding.ContentType - } - - validationErrors = append(validationErrors, errors.InvalidTypeEncoding(propSchema, propName, contentType)) - } else { - jsonMap[propName] = newVal - val = newVal - } - - validateEncodingRecursive(propName, val, allowReserved, &validationErrors, propSchema) - } - } - } - - coerced := coerceValue(jsonMap, schema) - if asMap, ok := coerced.(map[string]any); ok { - jsonMap = asMap - } - } - - return jsonMap, validationErrors -} - -func applyEncodingRules(data any, encoding *v3.Encoding, schema *base.Schema) (any, error) { - style := "form" - explode := true - contentType := "" - - if encoding != nil { - contentType = encoding.ContentType - - if encoding.Style != "" { - style = encoding.Style - contentType = "" - } - - if encoding.AllowReserved { - contentType = "" - } - - if encoding.Explode != nil { - explode = *encoding.Explode - contentType = "" - } else if style != "form" { - explode = false - } - } - - if contentType != "" && !IsURLEncodedContentType(contentType) && !strings.Contains(contentType, "text/plain") { - if strVal, ok := data.(string); ok { - if strings.Contains(contentType, helpers.JSONContentType) { - var parsed any - if err := json.Unmarshal([]byte(strVal), &parsed); err == nil { - return parsed, nil - } - return nil, fmt.Errorf("value matches content-type '%s' but could not be parsed", contentType) - } - } - } - - if isArraySchema(schema) { - if strVal, ok := data.(string); ok { - if !explode { - switch style { - case helpers.Form: - return strings.Split(strVal, ","), nil - case helpers.SpaceDelimited: - return strings.Split(strVal, " "), nil - case helpers.PipeDelimited: - return strings.Split(strVal, "|"), nil - } - } - } - } - - if style == helpers.DeepObject { - if _, ok := data.(map[string]any); !ok { - return data, nil - } - } - - return data, nil -} - -func unflattenValues(values url.Values) map[string]any { - result := make(map[string]any) - - for k, v := range values { - if strings.Contains(k, "[") { - buildDeepMap(result, k, v) - } else { - if len(v) == 1 { - result[k] = v[0] - } else { - result[k] = v - } - } - } - return result -} - -func buildDeepMap(root map[string]any, key string, value []string) { - parts := strings.FieldsFunc(key, func(r rune) bool { - return r == '[' || r == ']' - }) - - current := root - for i, part := range parts { - isLeaf := i == len(parts)-1 - - if isLeaf { - if len(value) == 1 { - current[part] = value[0] - } else { - current[part] = value - } - } else { - if _, ok := current[part]; !ok { - current[part] = make(map[string]any) - } - if nextMap, ok := current[part].(map[string]any); ok { - current = nextMap - } else { - return - } - } - } -} - -func validateEncodingRecursive(path string, val any, allowReserved bool, errs *[]*errors.ValidationError, schema *base.Schema) { - if allowReserved { - return - } - - switch v := val.(type) { - case string: - if rxReserved.MatchString(v) { - *errs = append(*errs, errors.ReservedURLEncodedValue(schema, path, v)) - } - case []any: - for i, item := range v { - validateEncodingRecursive(fmt.Sprintf("%s[%d]", path, i), item, allowReserved, errs, schema) - } - case map[string]any: - for k, item := range v { - validateEncodingRecursive(fmt.Sprintf("%s[%s]", path, k), item, allowReserved, errs, schema) - } - case []string: - for i, item := range v { - if rxReserved.MatchString(item) { - *errs = append(*errs, errors.ReservedURLEncodedValue(schema, fmt.Sprintf("%s[%d]", path, i), item)) - } - } - } -} - -func coerceValue(data any, schema *base.Schema) any { - if schema == nil { - return data - } - - targetTypes := []string{} - if len(schema.Type) > 0 { - targetTypes = append(targetTypes, schema.Type...) - } - - extractTypes := func(proxies []*base.SchemaProxy) { - for _, proxy := range proxies { - sch := proxy.Schema() - if len(sch.Type) > 0 { - targetTypes = append(targetTypes, sch.Type...) - } - } - } - extractTypes(schema.AllOf) - extractTypes(schema.OneOf) - extractTypes(schema.AnyOf) - - if len(targetTypes) == 0 { - return data - } - - for _, t := range targetTypes { - converted, ok := tryConvert(data, t, schema) - if ok { - return converted - } - } - return data -} - -func tryConvert(data any, targetType string, schema *base.Schema) (any, bool) { - var strVal string - var isString bool - - switch v := data.(type) { - case string: - strVal = v - isString = true - case []string: - if len(v) > 0 { - strVal = v[0] - isString = true - } - } - - switch targetType { - case helpers.Integer: - if !isString || strVal == "" { - return nil, false - } - i, err := strconv.ParseInt(strVal, 10, 64) - if err == nil { - return i, true - } - - case helpers.Number: - if !isString || strVal == "" { - return nil, false - } - f, err := strconv.ParseFloat(strVal, 64) - if err == nil { - return f, true - } - - case helpers.Boolean: - if !isString { - return nil, false - } - b, err := strconv.ParseBool(strVal) - if err == nil { - return b, true - } - - case helpers.String: - if isString { - return strVal, true - } - return fmt.Sprintf("%v", data), true - - case helpers.Array: - var arr []any - itemSchema := getSchemaItem(schema) - - if vSlice, ok := data.([]any); ok { - for _, s := range vSlice { - arr = append(arr, coerceValue(s, itemSchema)) - } - return arr, true - } - - if vStringSlice, ok := data.([]string); ok { - for _, s := range vStringSlice { - arr = append(arr, coerceValue(s, itemSchema)) - } - return arr, true - } - - if vMap, ok := data.(map[string]any); ok { - keys := make([]int, 0, len(vMap)) - mapIsArray := true - for k := range vMap { - idx, err := strconv.Atoi(k) - if err != nil { - mapIsArray = false - break - } - keys = append(keys, idx) - } - if mapIsArray { - sort.Ints(keys) - for _, k := range keys { - val := vMap[strconv.Itoa(k)] - arr = append(arr, coerceValue(val, itemSchema)) - } - return arr, true - } - } - - if isString { - arr = append(arr, coerceValue(strVal, itemSchema)) - return arr, true - } - - case helpers.Object: - if m, ok := data.(map[string]any); ok { - newMap := make(map[string]any) - for k, v := range m { - newMap[k] = v - } - if schema.Properties != nil { - for pair := orderedmap.First(schema.Properties); pair != nil; pair = pair.Next() { - propName := pair.Key() - if val, exists := newMap[propName]; exists { - newMap[propName] = coerceValue(val, pair.Value().Schema()) - } - } - } - return newMap, true - } - } - - return nil, false -} - -func isArraySchema(schema *base.Schema) bool { - if schema == nil { - return false - } - - return slices.Contains(schema.Type, helpers.Array) -} - -func getSchemaItem(schema *base.Schema) *base.Schema { - if schema.Items != nil && schema.Items.IsA() { - return schema.Items.A.Schema() - } - return nil -} - -func (v *urlEncodedValidator) validateURLEncodedWithVersion(schema *base.Schema, encoding *orderedmap.Map[string, *v3.Encoding], bodyString string, log *slog.Logger, version float32) (bool, []*errors.ValidationError) { - if schema == nil { - log.Info("schema is empty and cannot be validated") - return false, nil - } - - transformedJSON, prevalidationErrors := TransformURLEncodedToSchemaJSON(bodyString, schema, encoding) - if len(prevalidationErrors) > 0 { - return false, prevalidationErrors - } - - return v.schemaValidator.validateSchemaWithVersion(schema, nil, transformedJSON, log, version) -} - -func IsURLEncodedContentType(mediaType string) bool { - mt := strings.ToLower(strings.TrimSpace(mediaType)) - return strings.HasPrefix(mt, helpers.URLEncodedContentType) -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/validate_xml.go b/vendor/github.com/pb33f/libopenapi-validator/schema_validation/validate_xml.go deleted file mode 100644 index 19eaa7da88f..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/validate_xml.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT -package schema_validation - -import ( - "encoding/json" - "fmt" - "log/slog" - "strconv" - "strings" - - "github.com/pb33f/libopenapi/datamodel/high/base" - - xj "github.com/basgys/goxml2json" - - liberrors "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" -) - -func (x *xmlValidator) validateXMLWithVersion(schema *base.Schema, xmlString string, log *slog.Logger, version float32) (bool, []*liberrors.ValidationError) { - if schema == nil { - log.Info("schema is empty and cannot be validated") - return false, nil - } - - // parse xml and transform to json structure matching schema - transformedJSON, prevalidationErrors := TransformXMLToSchemaJSON(xmlString, schema) - if len(prevalidationErrors) > 0 { - return false, prevalidationErrors - } - - // validate transformed json against schema using existing validator - return x.schemaValidator.validateSchemaWithVersion(schema, nil, transformedJSON, log, version) -} - -// TransformXMLToSchemaJSON converts xml to json structure matching openapi schema. -// applies xml object transformations: name, attribute, wrapped. -func TransformXMLToSchemaJSON(xmlString string, schema *base.Schema) (any, []*liberrors.ValidationError) { - if xmlString == "" { - return nil, []*liberrors.ValidationError{liberrors.InvalidXMLParsing("empty xml content", xmlString)} - } - - // parse xml using goxml2json. we convert types manually - jsonBuf, err := xj.Convert(strings.NewReader(xmlString)) - if err != nil { - return nil, []*liberrors.ValidationError{liberrors.InvalidXMLParsing(fmt.Sprintf("malformed xml: %s", err.Error()), xmlString)} - } - - jsonBytes := jsonBuf.Bytes() - - // the smallest valid XML possible "" generates a 10 bytes buffer. - // any other invalid XML generates a smaller buffer - if len(jsonBytes) < 10 { - return nil, []*liberrors.ValidationError{liberrors.InvalidXMLParsing("malformed xml", xmlString)} - } - - var rawJSON any - if err := json.Unmarshal(jsonBytes, &rawJSON); err != nil { - return nil, []*liberrors.ValidationError{liberrors.InvalidXMLParsing(fmt.Sprintf("failed to decode converted xml to json: %s", err.Error()), xmlString)} - } - - xmlNsMap := make(map[string]string, 2) - - // apply openapi xml object transformations - return applyXMLTransformations(rawJSON, schema, &xmlNsMap) -} - -func validateXmlNs(dataMap *map[string]any, schema *base.Schema, propName string, xmlNsMap *map[string]string) []*liberrors.ValidationError { - var validationErrors []*liberrors.ValidationError - - if dataMap == nil || schema == nil || xmlNsMap == nil { - return validationErrors - } - - if propName != "" { - if val, exists := (*dataMap)[propName]; exists { - if converted, ok := val.(map[string]any); ok { - dataMap = &converted - } - } - } - - if schema.XML.Prefix != "" { - attrKey := "-" + schema.XML.Prefix - - val, exists := (*dataMap)[attrKey] - - if exists { - if ns, ok := val.(string); ok { - (*xmlNsMap)[schema.XML.Prefix] = ns - (*xmlNsMap)[ns] = schema.XML.Prefix - - if schema.XML.Namespace != "" && schema.XML.Namespace != ns { - validationErrors = append(validationErrors, - liberrors.InvalidNamespace(schema, ns, schema.XML.Namespace, schema.XML.Prefix)) - } - } - - delete((*dataMap), attrKey) - } else { - validationErrors = append(validationErrors, liberrors.MissingPrefix(schema, schema.XML.Prefix)) - } - } - - if schema.XML.Namespace != "" { - _, exists := (*xmlNsMap)[schema.XML.Namespace] - - if !exists { - validationErrors = append(validationErrors, liberrors.MissingNamespace(schema, schema.XML.Namespace)) - } - } - - return validationErrors -} - -func convertBasedOnSchema(propName, xmlName string, propValue any, schema *base.Schema, xmlNsMap *map[string]string) (any, []*liberrors.ValidationError) { - var xmlNsErrors []*liberrors.ValidationError - - types := schema.Type - - extractTypes := func(proxies []*base.SchemaProxy) { - for _, proxy := range proxies { - sch := proxy.Schema() - if len(sch.Type) > 0 { - types = append(types, sch.Type...) - } - } - } - - extractTypes(schema.AllOf) - extractTypes(schema.OneOf) - extractTypes(schema.AnyOf) - - convertedValue := propValue - -typesLoop: - for _, pType := range types { - // because in XML everything is a string, we try to convert the value to the - // actual expected type, so the normal schema validation should pass with correct types - switch pType { - case helpers.Integer: - textValue, isString := propValue.(string) - - if isString { - converted, err := strconv.ParseInt(textValue, 10, 64) - - if err == nil { - convertedValue = converted - break typesLoop - } - } - case helpers.Number: - textValue, isString := propValue.(string) - - if isString { - converted, err := strconv.ParseFloat(textValue, 64) - if err == nil { - convertedValue = converted - break typesLoop - } - } - - case helpers.Boolean: - textValue, isString := propValue.(string) - - if isString { - converted, err := strconv.ParseBool(textValue) - if err == nil { - convertedValue = converted - break typesLoop - } - } - - case helpers.Array: - convertedValue = propValue - - if schema.XML != nil && schema.XML.Wrapped { - convertedValue = unwrapArrayElement(propValue, propName, schema) - } - - if schema.Items != nil && schema.Items.A != nil { - itemSchema := schema.Items.A.Schema() - - arr, isArr := convertedValue.([]any) - - if !isArr { - arr = []any{ - convertedValue, - } - } - - for index, item := range arr { - converted, errs := convertBasedOnSchema(propName, xmlName, item, itemSchema, xmlNsMap) - - if len(errs) > 0 { - xmlNsErrors = append(xmlNsErrors, errs...) - } - - arr[index] = converted - } - - convertedValue = arr - break typesLoop - } - case helpers.Object: - objectValue, isObject := propValue.(map[string]any) - - if isObject { - newValue, xmlErrors := applyXMLTransformations(objectValue, schema, xmlNsMap) - - if len(xmlErrors) > 0 { - xmlNsErrors = append(xmlNsErrors, xmlErrors...) - continue typesLoop - } - - convertedValue = newValue - break typesLoop - } - } - } - - return convertedValue, xmlNsErrors -} - -// applyXMLTransformations applies openapi xml object rules to match json schema. -// handles xml.name (root unwrapping), xml.attribute (dash prefix), xml.wrapped (array unwrapping), -// xml.prefix (check existance), xml.namespace (check if exists and match). -// we delete all attributes, prefixes, and namespaces found in the data interface; therefore, undeclared items -// are sent in the body for validation, so that 'additionalProperties: false' can detect it. -func applyXMLTransformations(data any, schema *base.Schema, xmlNsMap *map[string]string) (any, []*liberrors.ValidationError) { - if schema == nil || data == nil || xmlNsMap == nil { - return data, nil - } - - // unwrap root element if xml.name is set on schema - if schema.XML != nil && schema.XML.Name != "" { - if dataMap, ok := data.(map[string]any); ok { - if wrapped, exists := dataMap[schema.XML.Name]; exists { - data = wrapped - } - } - } - - var xmlNsErrors []*liberrors.ValidationError - - // transform properties based on their xml configurations - if dataMap, ok := data.(map[string]any); ok { - if schema.Properties == nil || schema.Properties.Len() == 0 { - if schema.XML != nil && (schema.XML.Prefix != "" || schema.XML.Namespace != "") { - namespaceErrors := validateXmlNs(&dataMap, schema, "", xmlNsMap) - - if len(namespaceErrors) > 0 { - xmlNsErrors = append(xmlNsErrors, namespaceErrors...) - } else { - if content, has := dataMap["#content"]; has { - if stringContent, ok := content.(string); ok { - data = stringContent - } - } - } - } - - return data, xmlNsErrors - } - - for pair := schema.Properties.First(); pair != nil; pair = pair.Next() { - propName := pair.Key() - propSchemaProxy := pair.Value() - propSchema := propSchemaProxy.Schema() - if propSchema == nil { - continue - } - - xmlName := propName - - if propSchema.XML != nil { - // determine xml element name (defaults to property name) - if propSchema.XML.Name != "" { - xmlName = propSchema.XML.Name - } - } - - if propSchema.XML != nil { - namespaceErrors := validateXmlNs(&dataMap, propSchema, xmlName, xmlNsMap) - - if len(namespaceErrors) > 0 { - xmlNsErrors = append(xmlNsErrors, namespaceErrors...) - } - - // handle xml.attribute: true - attributes are prefixed with dash - if propSchema.XML.Attribute { - attrKey := "-" + xmlName - if val, exists := dataMap[attrKey]; exists { - // If the value is an attribute, it cannot have a namespace - convertedValue, _ := convertBasedOnSchema(propName, xmlName, val, propSchema, xmlNsMap) - dataMap[propName] = convertedValue - delete(dataMap, attrKey) - continue - } - } - } - - // handle regular elements - if val, exists := dataMap[xmlName]; exists { - if mapObject, ok := val.(map[string]any); ok { - if content, has := mapObject["#content"]; has { - if stringContent, ok := content.(string); ok { - val = stringContent - } - } - } - - convertedValue, nsErrors := convertBasedOnSchema(propName, xmlName, val, propSchema, xmlNsMap) - - if len(nsErrors) > 0 { - xmlNsErrors = append(xmlNsErrors, nsErrors...) - } - - dataMap[propName] = convertedValue - - if propName != xmlName { - delete(dataMap, xmlName) - } - } - } - } - - return data, xmlNsErrors -} - -// unwrapArrayElement removes wrapping element from xml arrays when xml.wrapped is true. -// example: {"items": {"item": [...]}} becomes [...] -func unwrapArrayElement(val any, itemName string, propSchema *base.Schema) any { - wrapMap, ok := val.(map[string]any) - if !ok { - return val - } - - if propSchema.XML.Name != "" { - itemName = propSchema.XML.Name - } - - // determine item element name - if propSchema.Items != nil && propSchema.Items.A != nil { - itemSchema := propSchema.Items.A.Schema() - if itemSchema != nil && itemSchema.XML != nil && itemSchema.XML.Name != "" { - itemName = itemSchema.XML.Name - } - } - - // unwrap: look for item element inside wrapper - if unwrapped, exists := wrapMap[itemName]; exists { - return unwrapped - } - - return val -} - -// IsXMLContentType checks if a media type string represents xml content. -func IsXMLContentType(mediaType string) bool { - mt := strings.ToLower(strings.TrimSpace(mediaType)) - return strings.HasPrefix(mt, "application/xml") || - strings.HasPrefix(mt, "text/xml") || - strings.HasSuffix(mt, "+xml") -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/xml_validator.go b/vendor/github.com/pb33f/libopenapi-validator/schema_validation/xml_validator.go deleted file mode 100644 index f9b2ef6e44e..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/schema_validation/xml_validator.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package schema_validation - -import ( - "log/slog" - "os" - - "github.com/pb33f/libopenapi/datamodel/high/base" - - "github.com/pb33f/libopenapi-validator/config" - liberrors "github.com/pb33f/libopenapi-validator/errors" -) - -// XMLValidator is an interface that defines methods for validating XML against OpenAPI schemas. -// There are 2 methods for validating XML: -// -// ValidateXMLString validates an XML string against a schema, applying OpenAPI xml object transformations. -// ValidateXMLStringWithVersion - version-aware XML validation that allows OpenAPI 3.0 keywords when version is specified. -type XMLValidator interface { - // ValidateXMLString validates an XML string against an OpenAPI schema, applying xml object transformations. - // Uses OpenAPI 3.1+ validation by default (strict JSON Schema compliance). - ValidateXMLString(schema *base.Schema, xmlString string) (bool, []*liberrors.ValidationError) - - // ValidateXMLStringWithVersion validates an XML string with version-specific rules. - // When version is 3.0, OpenAPI 3.0-specific keywords like 'nullable' are allowed and processed. - // When version is 3.1+, OpenAPI 3.0-specific keywords like 'nullable' will cause validation to fail. - ValidateXMLStringWithVersion(schema *base.Schema, xmlString string, version float32) (bool, []*liberrors.ValidationError) -} - -type xmlValidator struct { - schemaValidator *schemaValidator - logger *slog.Logger -} - -// NewXMLValidatorWithLogger creates a new XMLValidator instance with a custom logger. -func NewXMLValidatorWithLogger(logger *slog.Logger, opts ...config.Option) XMLValidator { - options := config.NewValidationOptions(opts...) - // Create an internal schema validator for JSON validation after XML transformation - sv := &schemaValidator{options: options, logger: logger} - return &xmlValidator{schemaValidator: sv, logger: logger} -} - -// NewXMLValidator creates a new XMLValidator instance with default logging configuration. -func NewXMLValidator(opts ...config.Option) XMLValidator { - logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ - Level: slog.LevelError, - })) - return NewXMLValidatorWithLogger(logger, opts...) -} - -func (x *xmlValidator) ValidateXMLString(schema *base.Schema, xmlString string) (bool, []*liberrors.ValidationError) { - return x.validateXMLWithVersion(schema, xmlString, x.logger, 3.1) -} - -func (x *xmlValidator) ValidateXMLStringWithVersion(schema *base.Schema, xmlString string, version float32) (bool, []*liberrors.ValidationError) { - return x.validateXMLWithVersion(schema, xmlString, x.logger, version) -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/strict/array_validator.go b/vendor/github.com/pb33f/libopenapi-validator/strict/array_validator.go deleted file mode 100644 index 06605307462..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/strict/array_validator.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package strict - -import ( - "strconv" - - "github.com/pb33f/libopenapi/datamodel/high/base" -) - -// validateArray checks an array value against a schema for undeclared properties -// within array items. It handles: -// - items (schema for all items or boolean) -// - prefixItems (tuple validation with positional schemas) -// - unevaluatedItems (items not covered by items/prefixItems) -func (v *Validator) validateArray(ctx *traversalContext, schema *base.Schema, data []any) []UndeclaredValue { - if len(data) == 0 { - return nil - } - - var undeclared []UndeclaredValue - - // Check for items: false - // When items: false, no items are allowed. If base validation passed, the - // array should be empty. But we explicitly check in case it wasn't caught. - if schema.Items != nil && schema.Items.IsB() && !schema.Items.B { - for i := range data { - itemPath := buildArrayPath(ctx.path, i) - undeclared = append(undeclared, - newUndeclaredItem(itemPath, strconv.Itoa(i), data[i], ctx.direction)) - } - return undeclared - } - - prefixLen := 0 - - // handle prefixItems first (tuple validation) - if len(schema.PrefixItems) > 0 { - for i, itemProxy := range schema.PrefixItems { - if i >= len(data) { - break - } - - itemPath := buildArrayPath(ctx.path, i) - itemCtx := ctx.withPath(itemPath) - - if itemCtx.shouldIgnore() { - prefixLen++ - continue - } - - itemSchema := itemProxy.Schema() - if itemSchema != nil { - undeclared = append(undeclared, v.validateValue(itemCtx, itemSchema, data[i])...) - } - prefixLen++ - } - } - - // handle items for remaining elements (after prefixItems) - if schema.Items != nil && schema.Items.A != nil { - itemProxy := schema.Items.A - itemSchema := itemProxy.Schema() - - if itemSchema != nil { - for i := prefixLen; i < len(data); i++ { - itemPath := buildArrayPath(ctx.path, i) - itemCtx := ctx.withPath(itemPath) - - if itemCtx.shouldIgnore() { - continue - } - - undeclared = append(undeclared, v.validateValue(itemCtx, itemSchema, data[i])...) - } - } - } - - // handle unevaluatedItems with schema. - // unevaluatedItems: false is handled by base validation. - // unevaluatedItems: {schema} means items matching the schema are valid. - // note: this doesn't account for items evaluated by `contains`. for strict - // validation this is acceptable as we check conservatively. - if schema.UnevaluatedItems != nil && schema.UnevaluatedItems.Schema() != nil { - // this applies to items not covered by items or prefixItems. - // if there's no items schema, unevaluatedItems applies to: - // - items after prefixItems (if prefixItems exists) - // - all items (if neither items nor prefixItems exists) - if schema.Items == nil { - unevalSchema := schema.UnevaluatedItems.Schema() - startIndex := len(schema.PrefixItems) // 0 if no prefixItems - for i := startIndex; i < len(data); i++ { - itemPath := buildArrayPath(ctx.path, i) - itemCtx := ctx.withPath(itemPath) - - if itemCtx.shouldIgnore() { - continue - } - - undeclared = append(undeclared, v.validateValue(itemCtx, unevalSchema, data[i])...) - } - } - } - - return undeclared -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/strict/headers.go b/vendor/github.com/pb33f/libopenapi-validator/strict/headers.go deleted file mode 100644 index 823848ca44b..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/strict/headers.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package strict - -import "strings" - -// isHeaderIgnored checks if a header name should be ignored in strict validation. -// Uses the effective ignored headers list from options (defaults, replaced, or merged). -// Set-Cookie is direction-aware: ignored in responses but reported in requests. -func (v *Validator) isHeaderIgnored(name string, direction Direction) bool { - lower := strings.ToLower(name) - - // Set-Cookie is expected in responses but unexpected in requests - if lower == "set-cookie" { - return direction == DirectionResponse - } - - // Check effective ignored list - for _, h := range v.getEffectiveIgnoredHeaders() { - if strings.ToLower(h) == lower { - return true - } - } - return false -} - -// getEffectiveIgnoredHeaders returns the list of headers to ignore based on -// configuration. Uses the ValidationOptions method for consistency. -func (v *Validator) getEffectiveIgnoredHeaders() []string { - if v.options == nil { - return nil - } - return v.options.GetEffectiveStrictIgnoredHeaders() -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/strict/matcher.go b/vendor/github.com/pb33f/libopenapi-validator/strict/matcher.go deleted file mode 100644 index 89bb2c3b3c7..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/strict/matcher.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package strict - -import ( - "errors" - "fmt" - - "github.com/pb33f/libopenapi/datamodel/high/base" - "github.com/pb33f/libopenapi/utils" - "github.com/santhosh-tekuri/jsonschema/v6" - - "github.com/pb33f/libopenapi-validator/helpers" -) - -// dataMatchesSchema checks if the given data matches the schema using -// JSON Schema validation. This is used for: -// - oneOf/anyOf variant selection (finding which variant the data matches) -// - if/then/else condition evaluation -// - additionalProperties schema matching -// -// The method uses version-aware schema compilation to handle OpenAPI 3.0 vs 3.1 -// differences (especially nullable handling). -// -// Returns (true, nil) if data matches the schema. -// Returns (false, nil) if data does not match the schema. -// Returns (false, error) if schema compilation failed. -func (v *Validator) dataMatchesSchema(schema *base.Schema, data any) (bool, error) { - if schema == nil { - return true, nil // No schema means anything matches - } - - compiled, err := v.getCompiledSchema(schema) - if err != nil { - return false, err - } - return compiled.Validate(data) == nil, nil -} - -// getCompiledSchema returns a compiled JSON Schema for the given high-level schema. -// It checks multiple cache levels: -// 1. Global SchemaCache (if configured in options) -// 2. Local instance cache (for reuse within this validation call) -// 3. Compiles on-the-fly if not cached -// -// Returns the compiled schema and nil error on success. -// Returns nil schema and nil error if the input schema is nil. -// Returns nil schema and error if compilation failed. -func (v *Validator) getCompiledSchema(schema *base.Schema) (*jsonschema.Schema, error) { - if schema == nil || schema.GoLow() == nil { - return nil, nil - } - - hash := schema.GoLow().Hash() - hashKey := fmt.Sprintf("%x", hash) - - // try global cache first (if available) - if v.options != nil && v.options.SchemaCache != nil { - if cached, ok := v.options.SchemaCache.Load(hash); ok && cached != nil && cached.CompiledSchema != nil { - return cached.CompiledSchema, nil - } - } - - // try local instance cache - if compiled, ok := v.localCache[hashKey]; ok { - return compiled, nil - } - - // cache miss - compile on-the-fly with context-aware rendering - compiled, err := v.compileSchema(schema) - if err != nil { - return nil, err - } - if compiled != nil { - v.localCache[hashKey] = compiled - } - - return compiled, nil -} - -// compileSchema renders and compiles a schema for validation. -// Uses RenderInlineWithContext for safe cycle handling. -// -// Returns the compiled schema and nil error on success. -// Returns nil schema and error if any step fails (render, conversion, compilation). -func (v *Validator) compileSchema(schema *base.Schema) (*jsonschema.Schema, error) { - if schema == nil { - return nil, nil - } - - schemaHash := fmt.Sprintf("%x", schema.GoLow().Hash()) - - // use RenderInlineWithContext for safe cycle handling - renderedSchema, err := schema.RenderInlineWithContext(v.renderCtx) - if err != nil { - return nil, fmt.Errorf("strict: schema render failed (hash=%s): %w", schemaHash, err) - } - - jsonSchema, convErr := utils.ConvertYAMLtoJSON(renderedSchema) - if convErr != nil { - return nil, fmt.Errorf("strict: YAML to JSON conversion failed: %w", convErr) - } - if len(jsonSchema) == 0 { - return nil, errors.New("strict: schema rendered to empty JSON") - } - - schemaName := fmt.Sprintf("strict-match-%s", schemaHash) - compiled, err := helpers.NewCompiledSchemaWithVersion( - schemaName, - jsonSchema, - v.options, - v.version, - ) - if err != nil { - return nil, fmt.Errorf("strict: schema compilation failed (name=%s): %w", schemaName, err) - } - - return compiled, nil -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/strict/polymorphic.go b/vendor/github.com/pb33f/libopenapi-validator/strict/polymorphic.go deleted file mode 100644 index d229f338321..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/strict/polymorphic.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package strict - -import ( - "regexp" - "strings" - - "github.com/pb33f/libopenapi/datamodel/high/base" -) - -// validatePolymorphic handles allOf, oneOf, and anyOf schemas. -// For allOf: merge all schemas and validate against all. -// For oneOf/anyOf: find the matching variant and validate against it. -func (v *Validator) validatePolymorphic(ctx *traversalContext, schema *base.Schema, data map[string]any) []UndeclaredValue { - var undeclared []UndeclaredValue - - // Handle allOf first - data must match ALL schemas - if len(schema.AllOf) > 0 { - undeclared = append(undeclared, v.validateAllOf(ctx, schema, data)...) - } - - // Handle oneOf - data must match exactly ONE schema - if len(schema.OneOf) > 0 { - undeclared = append(undeclared, v.validateOneOf(ctx, schema, data)...) - } - - // Handle anyOf - data must match at least ONE schema - if len(schema.AnyOf) > 0 { - undeclared = append(undeclared, v.validateAnyOf(ctx, schema, data)...) - } - - // Also validate any direct properties on the parent schema - if schema.Properties != nil { - declared, patterns := v.collectDeclaredProperties(schema, data) - - // Check properties that aren't handled by allOf/oneOf/anyOf - for propName := range data { - // Skip if declared directly or via patterns - if isPropertyDeclared(propName, declared, patterns) { - continue - } - - // Check if it's declared in any of the allOf schemas - if v.isPropertyDeclaredInAllOf(schema.AllOf, propName) { - continue - } - - // For oneOf/anyOf, we've already validated against the matching variant - } - } - - return undeclared -} - -// validateAllOf validates data against all schemas in allOf. -// Collects properties from all schemas as declared. -func (v *Validator) validateAllOf(ctx *traversalContext, schema *base.Schema, data map[string]any) []UndeclaredValue { - var undeclared []UndeclaredValue - - // Collect declared properties from ALL schemas in allOf - allDeclared := make(map[string]*declaredProperty) - var allPatterns []*regexp.Regexp - - for _, schemaProxy := range schema.AllOf { - if schemaProxy == nil { - continue - } - - subSchema := schemaProxy.Schema() - if subSchema == nil { - continue - } - - declared, patterns := v.collectDeclaredProperties(subSchema, data) - for name, prop := range declared { - if _, exists := allDeclared[name]; !exists { - allDeclared[name] = prop - } - } - - allPatterns = append(allPatterns, patterns...) - } - - // collect from parent schema - declared, patterns := v.collectDeclaredProperties(schema, data) - for name, prop := range declared { - if _, exists := allDeclared[name]; !exists { - allDeclared[name] = prop - } - } - - allPatterns = append(allPatterns, patterns...) - - // check if strict mode should report for this combined schema - if !v.shouldReportUndeclaredForAllOf(schema) { - // Still recurse into declared properties - return v.recurseIntoAllOfDeclaredProperties(ctx, schema.AllOf, data, allDeclared) - } - - // Check each property in data - for propName, propValue := range data { - propPath := buildPath(ctx.path, propName) - propCtx := ctx.withPath(propPath) - - if propCtx.shouldIgnore() { - continue - } - - // Check if declared in merged schema - if isPropertyDeclared(propName, allDeclared, allPatterns) { - // Recurse into the property - propSchema := v.findPropertySchemaInAllOf(schema.AllOf, propName, allDeclared) - if propSchema != nil { - if v.shouldSkipProperty(propSchema, ctx.direction) { - continue - } - undeclared = append(undeclared, v.validateValue(propCtx, propSchema, propValue)...) - } - continue - } - - // Not declared - report as undeclared - undeclared = append(undeclared, - newUndeclaredProperty(propPath, propName, propValue, getDeclaredPropertyNames(allDeclared), ctx.direction, schema)) - } - - return undeclared -} - -// validateOneOf finds the matching oneOf variant and validates against it. -// Parent schema properties are merged with the variant's properties. -func (v *Validator) validateOneOf(ctx *traversalContext, schema *base.Schema, data map[string]any) []UndeclaredValue { - var matchingVariant *base.Schema - - // discriminator is present, use it to select the variant - if schema.Discriminator != nil { - matchingVariant = v.selectByDiscriminator(schema, schema.OneOf, data) - } - - // no discriminator or no match: find matching variant by validation - if matchingVariant == nil { - matchingVariant = v.findMatchingVariant(schema.OneOf, data) - } - - if matchingVariant == nil { - // No match found - base validation would report this error - return nil - } - - // Validate against variant, but filter out properties declared in parent - return v.validateVariantWithParent(ctx, schema, matchingVariant, data) -} - -// validateAnyOf finds matching anyOf variants and validates against them. -// Parent schema properties are merged with the variant's properties. -func (v *Validator) validateAnyOf(ctx *traversalContext, schema *base.Schema, data map[string]any) []UndeclaredValue { - var matchingVariant *base.Schema - - // If discriminator is present, use it to select the variant - if schema.Discriminator != nil { - matchingVariant = v.selectByDiscriminator(schema, schema.AnyOf, data) - } - - // No discriminator or no match: find matching variant by validation - if matchingVariant == nil { - matchingVariant = v.findMatchingVariant(schema.AnyOf, data) - } - - if matchingVariant == nil { - // No match found - base validation would report this error - return nil - } - - // Validate against variant, but filter out properties declared in parent - return v.validateVariantWithParent(ctx, schema, matchingVariant, data) -} - -// validateVariantWithParent validates data against a variant schema while also -// considering properties declared in the parent schema. This ensures parent -// properties are not reported as undeclared when using oneOf/anyOf. -func (v *Validator) validateVariantWithParent(ctx *traversalContext, parent *base.Schema, variant *base.Schema, data map[string]any) []UndeclaredValue { - var undeclared []UndeclaredValue - - // Collect declared properties from parent schema - parentDeclared, parentPatterns := v.collectDeclaredProperties(parent, data) - - // Collect declared properties from variant schema - variantDeclared, variantPatterns := v.collectDeclaredProperties(variant, data) - - // Merge: parent + variant - allDeclared := make(map[string]*declaredProperty) - for name, prop := range parentDeclared { - allDeclared[name] = prop - } - for name, prop := range variantDeclared { - allDeclared[name] = prop - } - allPatterns := append(parentPatterns, variantPatterns...) - - // Check if we should report undeclared (skip if additionalProperties: false) - if !v.shouldReportUndeclared(variant) && !v.shouldReportUndeclared(parent) { - // Still recurse into declared properties - return v.recurseIntoDeclaredPropertiesWithMerged(ctx, variant, parent, data, allDeclared) - } - - // Check each property in data - for propName, propValue := range data { - propPath := buildPath(ctx.path, propName) - propCtx := ctx.withPath(propPath) - - if propCtx.shouldIgnore() { - continue - } - - // Check if declared in merged schema (parent + variant) - if isPropertyDeclared(propName, allDeclared, allPatterns) { - // Find the property schema (prefer variant, fallback to parent) - propSchema := v.findPropertySchemaInMerged(variant, parent, propName, allDeclared) - if propSchema != nil { - if v.shouldSkipProperty(propSchema, ctx.direction) { - continue - } - undeclared = append(undeclared, v.validateValue(propCtx, propSchema, propValue)...) - } - continue - } - - // Not declared - report as undeclared - // Use variant schema location if available, otherwise fall back to parent - locationSchema := variant - if locationSchema == nil || locationSchema.GoLow() == nil { - locationSchema = parent - } - undeclared = append(undeclared, - newUndeclaredProperty(propPath, propName, propValue, getDeclaredPropertyNames(allDeclared), ctx.direction, locationSchema)) - } - - return undeclared -} - -// findPropertySchemaInMerged finds the schema for a property, preferring variant over parent. -// Checks explicit properties first, then patternProperties. -func (v *Validator) findPropertySchemaInMerged(variant, parent *base.Schema, propName string, declared map[string]*declaredProperty) *base.Schema { - // Check explicit declared first - if prop, ok := declared[propName]; ok && prop.proxy != nil { - return prop.proxy.Schema() - } - - // Check variant schema explicit properties - if variant != nil && variant.Properties != nil { - if propProxy, exists := variant.Properties.Get(propName); exists && propProxy != nil { - return propProxy.Schema() - } - } - - // Check parent schema explicit properties - if parent != nil && parent.Properties != nil { - if propProxy, exists := parent.Properties.Get(propName); exists && propProxy != nil { - return propProxy.Schema() - } - } - - // Check variant patternProperties - if variant != nil { - if propProxy := v.getPatternPropertySchema(variant, propName); propProxy != nil { - return propProxy.Schema() - } - } - - // Check parent patternProperties - if parent != nil { - if propProxy := v.getPatternPropertySchema(parent, propName); propProxy != nil { - return propProxy.Schema() - } - } - - return nil -} - -// recurseIntoDeclaredPropertiesWithMerged recurses into properties from merged parent+variant. -func (v *Validator) recurseIntoDeclaredPropertiesWithMerged(ctx *traversalContext, variant, parent *base.Schema, data map[string]any, declared map[string]*declaredProperty) []UndeclaredValue { - var undeclared []UndeclaredValue - - for propName, propValue := range data { - propPath := buildPath(ctx.path, propName) - propCtx := ctx.withPath(propPath) - - if propCtx.shouldIgnore() { - continue - } - - propSchema := v.findPropertySchemaInMerged(variant, parent, propName, declared) - if propSchema != nil { - if v.shouldSkipProperty(propSchema, ctx.direction) { - continue - } - undeclared = append(undeclared, v.validateValue(propCtx, propSchema, propValue)...) - } - } - - return undeclared -} - -// selectByDiscriminator uses the discriminator to select the appropriate variant. -func (v *Validator) selectByDiscriminator(schema *base.Schema, variants []*base.SchemaProxy, data map[string]any) *base.Schema { - if schema.Discriminator == nil { - return nil - } - - propName := schema.Discriminator.PropertyName - if propName == "" { - return nil - } - - discriminatorValue, ok := data[propName] - if !ok { - return nil - } - - valueStr, ok := discriminatorValue.(string) - if !ok { - return nil - } - - // check mapping first - if schema.Discriminator.Mapping != nil { - for pair := schema.Discriminator.Mapping.First(); pair != nil; pair = pair.Next() { - if pair.Key() == valueStr { - // The mapping value is a reference like "#/components/schemas/Dog" - mappedRef := pair.Value() - for _, variantProxy := range variants { - if variantProxy.IsReference() && variantProxy.GetReference() == mappedRef { - return variantProxy.Schema() - } - } - } - } - } - - // no mapping match, try to match by schema name in reference - for _, variantProxy := range variants { - if variantProxy.IsReference() { - ref := variantProxy.GetReference() - // Extract schema name from reference like "#/components/schemas/Dog" - parts := strings.Split(ref, "/") - if len(parts) > 0 && parts[len(parts)-1] == valueStr { - return variantProxy.Schema() - } - } - } - - return nil -} - -// findMatchingVariant finds the first variant that the data validates against. -func (v *Validator) findMatchingVariant(variants []*base.SchemaProxy, data map[string]any) *base.Schema { - for _, variantProxy := range variants { - if variantProxy == nil { - continue - } - - variantSchema := variantProxy.Schema() - if variantSchema == nil { - continue - } - - matches, _ := v.dataMatchesSchema(variantSchema, data) - if matches { - return variantSchema - } - } - return nil -} - -// isPropertyDeclaredInAllOf checks if a property is declared in any allOf schema. -func (v *Validator) isPropertyDeclaredInAllOf(allOf []*base.SchemaProxy, propName string) bool { - for _, schemaProxy := range allOf { - if schemaProxy == nil { - continue - } - - subSchema := schemaProxy.Schema() - if subSchema == nil { - continue - } - - if subSchema.Properties != nil { - if _, exists := subSchema.Properties.Get(propName); exists { - return true - } - } - } - return false -} - -// shouldReportUndeclaredForAllOf checks if any schema in allOf disables additional properties. -func (v *Validator) shouldReportUndeclaredForAllOf(schema *base.Schema) bool { - // Check parent schema - if schema.AdditionalProperties != nil && schema.AdditionalProperties.IsB() && !schema.AdditionalProperties.B { - return false - } - - // Check each allOf schema - for _, schemaProxy := range schema.AllOf { - if schemaProxy == nil { - continue - } - - subSchema := schemaProxy.Schema() - if subSchema == nil { - continue - } - - if subSchema.AdditionalProperties != nil && subSchema.AdditionalProperties.IsB() && !subSchema.AdditionalProperties.B { - return false - } - } - - return true -} - -// findPropertySchemaInAllOf finds the schema for a property in allOf schemas. -func (v *Validator) findPropertySchemaInAllOf(allOf []*base.SchemaProxy, propName string, declared map[string]*declaredProperty) *base.Schema { - // Check explicit declared first - if prop, ok := declared[propName]; ok && prop.proxy != nil { - return prop.proxy.Schema() - } - - // Search in allOf schemas - for _, schemaProxy := range allOf { - if schemaProxy == nil { - continue - } - - subSchema := schemaProxy.Schema() - if subSchema == nil { - continue - } - - if subSchema.Properties != nil { - if propProxy, exists := subSchema.Properties.Get(propName); exists && propProxy != nil { - return propProxy.Schema() - } - } - } - - return nil -} - -// recurseIntoAllOfDeclaredProperties recurses into properties without checking for undeclared. -func (v *Validator) recurseIntoAllOfDeclaredProperties(ctx *traversalContext, allOf []*base.SchemaProxy, data map[string]any, declared map[string]*declaredProperty) []UndeclaredValue { - var undeclared []UndeclaredValue - - for propName, propValue := range data { - propPath := buildPath(ctx.path, propName) - propCtx := ctx.withPath(propPath) - - if propCtx.shouldIgnore() { - continue - } - - propSchema := v.findPropertySchemaInAllOf(allOf, propName, declared) - if propSchema != nil { - if v.shouldSkipProperty(propSchema, ctx.direction) { - continue - } - undeclared = append(undeclared, v.validateValue(propCtx, propSchema, propValue)...) - } - } - - return undeclared -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/strict/property_collector.go b/vendor/github.com/pb33f/libopenapi-validator/strict/property_collector.go deleted file mode 100644 index 24317b83344..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/strict/property_collector.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package strict - -import ( - "regexp" - - "github.com/pb33f/libopenapi/datamodel/high/base" -) - -// declaredProperty holds information about a declared property in a schema. -type declaredProperty struct { - // proxy is the SchemaProxy for the property. - proxy *base.SchemaProxy -} - -// collectDeclaredProperties gathers all property names that are declared in a schema. -// This includes explicit properties, patternProperties matches, and properties from -// dependentSchemas and if/then/else based on the actual data. -// -// Returns a map from property name to its declaration info, plus a slice of -// pattern regexes for patternProperties matching. -func (v *Validator) collectDeclaredProperties( - schema *base.Schema, - data map[string]any, -) (declared map[string]*declaredProperty, patterns []*regexp.Regexp) { - declared = make(map[string]*declaredProperty) - - if schema == nil { - return declared, nil - } - - // explicit properties - if schema.Properties != nil { - for pair := schema.Properties.First(); pair != nil; pair = pair.Next() { - declared[pair.Key()] = &declaredProperty{ - proxy: pair.Value(), - } - } - } - - // pattern properties - use cached compiled patterns - if schema.PatternProperties != nil { - for pair := schema.PatternProperties.First(); pair != nil; pair = pair.Next() { - pattern := v.getCompiledPattern(pair.Key()) - if pattern == nil { - continue - } - patterns = append(patterns, pattern) - } - } - - // dependent schemas - if trigger property exists in data - if schema.DependentSchemas != nil { - for pair := schema.DependentSchemas.First(); pair != nil; pair = pair.Next() { - triggerProp := pair.Key() - if _, exists := data[triggerProp]; !exists { - continue - } - // trigger property exists, include dependent schema's properties - mergePropertiesIntoDeclared(declared, pair.Value().Schema()) - } - } - - // if/then/else - if schema.If != nil { - ifProxy := schema.If - ifSchema := ifProxy.Schema() - if ifSchema != nil { - matches, err := v.dataMatchesSchema(ifSchema, data) - if err != nil { - // schema compilation failed - log and use else branch - v.logger.Debug("strict: if schema compilation failed, using else branch", "error", err) - matches = false - } - if matches { - if schema.Then != nil { - mergePropertiesIntoDeclared(declared, schema.Then.Schema()) - } - } else { - if schema.Else != nil { - mergePropertiesIntoDeclared(declared, schema.Else.Schema()) - } - } - } - } - - return declared, patterns -} - -// mergePropertiesIntoDeclared merges properties from a schema's Properties map into -// the declared map. Only adds properties that are not already declared. -// This eliminates code duplication when collecting properties from multiple sources. -func mergePropertiesIntoDeclared(declared map[string]*declaredProperty, schema *base.Schema) { - if schema == nil || schema.Properties == nil { - return - } - for p := schema.Properties.First(); p != nil; p = p.Next() { - if _, alreadyDeclared := declared[p.Key()]; !alreadyDeclared { - declared[p.Key()] = &declaredProperty{ - proxy: p.Value(), - } - } - } -} - -// getDeclaredPropertyNames returns just the property names from declared properties. -func getDeclaredPropertyNames(declared map[string]*declaredProperty) []string { - if len(declared) == 0 { - return nil - } - names := make([]string, 0, len(declared)) - for name := range declared { - names = append(names, name) - } - return names -} - -// isPropertyDeclared checks if a property name is declared in the schema. -// A property is declared if: -// - It's in the explicit properties map -// - It matches any patternProperties regex -func isPropertyDeclared(name string, declared map[string]*declaredProperty, patterns []*regexp.Regexp) bool { - // check explicit properties - if _, ok := declared[name]; ok { - return true - } - - // check pattern properties - for _, pattern := range patterns { - if pattern.MatchString(name) { - return true - } - } - - return false -} - -// getPropertySchema returns the SchemaProxy for a declared property. -// Returns nil if the property is not declared or is only matched by pattern. -func getPropertySchema(name string, declared map[string]*declaredProperty) *base.SchemaProxy { - // check explicit properties first - if dp, ok := declared[name]; ok && dp.proxy != nil { - return dp.proxy - } - return nil -} - -// shouldSkipProperty checks if a property should be skipped based on -// readOnly/writeOnly and the current validation direction. -func (v *Validator) shouldSkipProperty(schema *base.Schema, direction Direction) bool { - if schema == nil { - return false - } - - // readOnly: skip in requests (should not be sent by client) - if direction == DirectionRequest && schema.ReadOnly != nil && *schema.ReadOnly { - return true - } - - // writeOnly: skip in responses (should not be returned by server) - if direction == DirectionResponse && schema.WriteOnly != nil && *schema.WriteOnly { - return true - } - - return false -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/strict/schema_walker.go b/vendor/github.com/pb33f/libopenapi-validator/strict/schema_walker.go deleted file mode 100644 index 042a91ccdd8..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/strict/schema_walker.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package strict - -import ( - "github.com/pb33f/libopenapi/datamodel/high/base" -) - -// validateValue is the main entry point for validating a value against a schema. -// It dispatches to the appropriate handler based on the value type. -func (v *Validator) validateValue(ctx *traversalContext, schema *base.Schema, data any) []UndeclaredValue { - if schema == nil || data == nil { - return nil - } - - if ctx.shouldIgnore() { - return nil - } - - if ctx.exceedsDepth() { - return nil - } - - // check for cycles using schema hash - schemaKey := v.getSchemaKey(schema) - if ctx.checkAndMarkVisited(schemaKey) { - return nil - } - - // switch on data type - switch val := data.(type) { - case map[string]any: - return v.validateObject(ctx, schema, val) - case []any: - return v.validateArray(ctx, schema, val) - default: - return nil - } -} - -// validateObject checks an object value against a schema for undeclared properties. -func (v *Validator) validateObject(ctx *traversalContext, schema *base.Schema, data map[string]any) []UndeclaredValue { - var undeclared []UndeclaredValue - - if len(schema.AllOf) > 0 || len(schema.OneOf) > 0 || len(schema.AnyOf) > 0 { - return v.validatePolymorphic(ctx, schema, data) - } - - if !v.shouldReportUndeclared(schema) { - // additionalProperties: false - base validation catches this, no strict check needed - // Still need to recurse into declared properties - return v.recurseIntoDeclaredProperties(ctx, schema, data) - } - - declared, patterns := v.collectDeclaredProperties(schema, data) - - // check each property in the data - for propName, propValue := range data { - propPath := buildPath(ctx.path, propName) - propCtx := ctx.withPath(propPath) - - if propCtx.shouldIgnore() { - continue - } - - if !isPropertyDeclared(propName, declared, patterns) { - undeclared = append(undeclared, - newUndeclaredProperty(propPath, propName, propValue, getDeclaredPropertyNames(declared), ctx.direction, schema)) - - // even if undeclared, recurse into additionalProperties schema if present - if schema.AdditionalProperties != nil && schema.AdditionalProperties.IsA() { - addlProxy := schema.AdditionalProperties.A - if addlProxy != nil { - addlSchema := addlProxy.Schema() - if addlSchema != nil { - undeclared = append(undeclared, v.validateValue(propCtx, addlSchema, propValue)...) - } - } - } - continue - } - - // property is declared, recurse into it - propProxy := getPropertySchema(propName, declared) - if propProxy == nil { - propProxy = v.getPatternPropertySchema(schema, propName) - } - - if propProxy != nil { - propSchema := propProxy.Schema() - if propSchema != nil { - // check readOnly/writeOnly - if v.shouldSkipProperty(propSchema, ctx.direction) { - continue - } - undeclared = append(undeclared, v.validateValue(propCtx, propSchema, propValue)...) - } - } - } - - return undeclared -} - -// shouldReportUndeclared determines if strict mode should report undeclared -// properties for this schema. -func (v *Validator) shouldReportUndeclared(schema *base.Schema) bool { - if schema == nil { - return false - } - - // SHORT-CIRCUIT: If additionalProperties: false, base validation already catches extras. - if schema.AdditionalProperties != nil && schema.AdditionalProperties.IsB() && !schema.AdditionalProperties.B { - return false - } - - // STRICT OVERRIDE: Even if additionalProperties: true, report undeclared. - if schema.AdditionalProperties != nil { - if schema.AdditionalProperties.IsB() && schema.AdditionalProperties.B { - return true - } - if schema.AdditionalProperties.IsA() { - // additionalProperties with schema - properties matching schema are - // technically "declared" but we still want to flag them as not in - // the explicit schema. They will be recursed into. - return true - } - } - - // STRICT OVERRIDE: unevaluatedProperties: false with implicit additionalProperties: true - // Standard JSON Schema would catch via unevaluatedProperties, but strict reports - // even when additionalProperties: true would normally allow extras. - if schema.UnevaluatedProperties != nil && schema.UnevaluatedProperties.IsB() && !schema.UnevaluatedProperties.B { - // unevaluatedProperties: false means base validation catches extras - // BUT if there's no additionalProperties: false, strict should report - return true - } - - // default: no additionalProperties means implicit true in JSON Schema - // Strict reports undeclared in this case - return true -} - -// getPatternPropertySchema finds the schema for a property that matches -// a patternProperties regex. Uses cached compiled patterns. -func (v *Validator) getPatternPropertySchema(schema *base.Schema, propName string) *base.SchemaProxy { - if schema.PatternProperties == nil { - return nil - } - - for pair := schema.PatternProperties.First(); pair != nil; pair = pair.Next() { - pattern := v.getCompiledPattern(pair.Key()) - if pattern == nil { - continue - } - if pattern.MatchString(propName) { - return pair.Value() - } - } - - return nil -} - -// recurseIntoDeclaredProperties recurses into declared properties without -// checking for undeclared (used when additionalProperties: false). -// This includes both explicit properties and patternProperties matches. -func (v *Validator) recurseIntoDeclaredProperties(ctx *traversalContext, schema *base.Schema, data map[string]any) []UndeclaredValue { - var undeclared []UndeclaredValue - - processed := make(map[string]bool) - - // process explicit properties - if schema.Properties != nil { - for pair := schema.Properties.First(); pair != nil; pair = pair.Next() { - propName := pair.Key() - propProxy := pair.Value() - - propValue, exists := data[propName] - if !exists { - continue - } - - processed[propName] = true - - propPath := buildPath(ctx.path, propName) - propCtx := ctx.withPath(propPath) - - if propCtx.shouldIgnore() { - continue - } - - propSchema := propProxy.Schema() - if propSchema != nil { - if v.shouldSkipProperty(propSchema, ctx.direction) { - continue - } - undeclared = append(undeclared, v.validateValue(propCtx, propSchema, propValue)...) - } - } - } - - // process patternProperties - recurse into any data properties that match patterns - if schema.PatternProperties != nil { - for propName, propValue := range data { - if processed[propName] { - continue - } - - propProxy := v.getPatternPropertySchema(schema, propName) - if propProxy == nil { - continue - } - - processed[propName] = true - - propPath := buildPath(ctx.path, propName) - propCtx := ctx.withPath(propPath) - - if propCtx.shouldIgnore() { - continue - } - - propSchema := propProxy.Schema() - if propSchema != nil { - if v.shouldSkipProperty(propSchema, ctx.direction) { - continue - } - undeclared = append(undeclared, v.validateValue(propCtx, propSchema, propValue)...) - } - } - } - - return undeclared -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/strict/types.go b/vendor/github.com/pb33f/libopenapi-validator/strict/types.go deleted file mode 100644 index 2d682cc9659..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/strict/types.go +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package strict provides strict validation that detects undeclared -// properties in requests and responses, even when additionalProperties -// would normally allow them. -// -// Strict mode is designed for API governance scenarios where you want to -// ensure that clients only send properties that are explicitly documented -// in the OpenAPI specification, regardless of whether additionalProperties -// is set to true. -// -// # Key Features -// -// - Detects undeclared properties in request/response bodies (JSON only) -// - Detects undeclared query parameters, headers, and cookies -// - Supports ignore paths with glob patterns (e.g., "$.body.metadata.*") -// - Handles polymorphic schemas (oneOf/anyOf) via per-branch validation -// - Respects readOnly/writeOnly based on request vs response direction -// - Configurable header ignore list with sensible defaults -// -// # Known Limitations -// -// Property names containing single quotes (e.g., {"it's": "value"}) cannot be -// represented in bracket notation and cannot be matched by ignore patterns. -// Such properties will always be reported as undeclared if not in schema. -// This is acceptable because property names with quotes are extremely rare. -package strict - -import ( - "context" - "fmt" - "log/slog" - "regexp" - - "github.com/pb33f/libopenapi/datamodel/high/base" - "github.com/santhosh-tekuri/jsonschema/v6" - - "github.com/pb33f/libopenapi-validator/config" -) - -// Direction indicates whether validation is for a request or response. -// This affects readOnly/writeOnly handling and Set-Cookie behavior. -type Direction int - -const ( - // DirectionRequest indicates validation of an HTTP request. - // readOnly properties are not expected in request bodies. - DirectionRequest Direction = iota - - // DirectionResponse indicates validation of an HTTP response. - // writeOnly properties are not expected in response bodies. - // Set-Cookie headers are ignored (expected in responses). - DirectionResponse -) - -// String returns a human-readable direction name. -func (d Direction) String() string { - if d == DirectionResponse { - return "response" - } - return "request" -} - -// UndeclaredValue represents a value found in data that is not declared -// in the schema. This is the core output of strict validation. -type UndeclaredValue struct { - // Path is the instance JSONPath where the undeclared value was found. - // uses bracket notation for property names with special characters. - // examples: "$.body.user.extra", "$.body['a.b'].value", "$.query.debug" - Path string - - // Name is the property, parameter, header, or cookie name. - Name string - - // Value is the actual value found (it may be truncated for display). - Value any - - // Type indicates what kind of value this is. - // one of: "property", "header", "query", "cookie", "item" - Type string - - // DeclaredProperties lists property names that ARE declared at this - // location in the schema. Helps users understand what's expected. - // for headers/query/cookies, this lists declared parameter names. - DeclaredProperties []string - - // Direction indicates whether this was in a request or response. - // used for error message disambiguation when Path is "$.body". - Direction Direction - - // SpecLine is the line number in the OpenAPI spec where the parent - // schema is defined. Zero if unavailable. - SpecLine int - - // SpecCol is the column number in the OpenAPI spec where the parent - // schema is defined. Zero if unavailable. - SpecCol int -} - -// extractSchemaLocation extracts the line and column from a schema's low-level -// representation. returns (0, 0) if the schema is nil or has no low-level info. -func extractSchemaLocation(schema *base.Schema) (line, col int) { - if schema == nil { - return 0, 0 - } - low := schema.GoLow() - if low == nil || low.RootNode == nil { - return 0, 0 - } - return low.RootNode.Line, low.RootNode.Column -} - -// newUndeclaredProperty creates an UndeclaredValue for an undeclared object property. -// the schema parameter is the parent schema where the property would need to be declared. -func newUndeclaredProperty(path, name string, value any, declaredNames []string, direction Direction, schema *base.Schema) UndeclaredValue { - line, col := extractSchemaLocation(schema) - return UndeclaredValue{ - Path: path, - Name: name, - Value: TruncateValue(value), - Type: "property", - DeclaredProperties: declaredNames, - Direction: direction, - SpecLine: line, - SpecCol: col, - } -} - -// newUndeclaredParam creates an UndeclaredValue for an undeclared parameter (query/header/cookie). -// note: parameters don't have SpecLine/SpecCol because they're defined in OpenAPI parameter objects, -// not schema objects. the parameter itself is the issue, not a schema definition. -func newUndeclaredParam(path, name string, value any, paramType string, declaredNames []string, direction Direction) UndeclaredValue { - return UndeclaredValue{ - Path: path, - Name: name, - Value: value, - Type: paramType, - DeclaredProperties: declaredNames, - Direction: direction, - } -} - -// newUndeclaredItem creates an UndeclaredValue for an undeclared array item. -func newUndeclaredItem(path, name string, value any, direction Direction) UndeclaredValue { - return UndeclaredValue{ - Path: path, - Name: name, - Value: TruncateValue(value), - Type: "item", - Direction: direction, - } -} - -// Input contains the parameters for strict validation. -type Input struct { - // Schema is the OpenAPI schema to validate against. - Schema *base.Schema - - // Data is the unmarshalled data to validate (from request/response body). - // Should be the result of json.Unmarshal. - Data any - - // Direction indicates request vs response validation. - // affects readOnly/writeOnly and Set-Cookie handling. - Direction Direction - - // Options contains validation configuration including ignore paths. - Options *config.ValidationOptions - - // BasePath is the prefix for generated instance paths. - // typically "$.body" for bodies, "$.query" for query params, etc. - BasePath string - - // Version is the OpenAPI version (3.0 or 3.1). - // affects nullable handling in schema matching. - Version float32 -} - -// Result contains the output of strict validation. -type Result struct { - Valid bool - - // UndeclaredValues lists all undeclared properties, parameters, - // headers, or cookies found during validation. - UndeclaredValues []UndeclaredValue -} - -// cycleKey uniquely identifies a schema at a specific validation path. -// Using a struct key avoids string allocation in the hot path. -type cycleKey struct { - path string - schemaKey string -} - -// traversalContext tracks state during schema traversal to detect cycles -// and limit recursion depth. -type traversalContext struct { - // visited tracks schemas already being validated at specific paths. - // key combines instance path + schema key to allow same schema at different paths. - visited map[cycleKey]bool - - // depth tracks current recursion depth for safety limits. - depth int - - // maxDepth is the maximum allowed recursion depth (default: 100). - maxDepth int - - // direction indicates request vs response for readOnly/writeOnly. - direction Direction - - // ignorePaths are compiled regex patterns for paths to skip. - ignorePaths []*regexp.Regexp - - // path is the current instance path being validated. - path string -} - -// newTraversalContext creates a new context for schema traversal. -func newTraversalContext(direction Direction, ignorePaths []*regexp.Regexp, basePath string) *traversalContext { - return &traversalContext{ - visited: make(map[cycleKey]bool), - depth: 0, - maxDepth: 100, - direction: direction, - ignorePaths: ignorePaths, - path: basePath, - } -} - -// withPath returns a new context with an updated path. -func (c *traversalContext) withPath(path string) *traversalContext { - return &traversalContext{ - visited: c.visited, - depth: c.depth + 1, - maxDepth: c.maxDepth, - direction: c.direction, - ignorePaths: c.ignorePaths, - path: path, - } -} - -// shouldIgnore checks if the current path matches any ignore pattern. -func (c *traversalContext) shouldIgnore() bool { - for _, pattern := range c.ignorePaths { - if pattern.MatchString(c.path) { - return true - } - } - return false -} - -// exceedsDepth checks if we've exceeded the maximum recursion depth. -func (c *traversalContext) exceedsDepth() bool { - return c.depth > c.maxDepth -} - -// checkAndMarkVisited checks if a schema has been visited at the current path. -// Returns true if this is a cycle (already visited), false otherwise. -// If not a cycle, marks the schema as visited. -func (c *traversalContext) checkAndMarkVisited(schemaKey string) bool { - key := cycleKey{path: c.path, schemaKey: schemaKey} - if c.visited[key] { - return true // Cycle detected - } - c.visited[key] = true - return false -} - -// Validator performs strict property validation against OpenAPI schemas. -// It detects any properties present in data that are not explicitly -// declared in the schema, regardless of additionalProperties settings. -// -// A new Validator should be created for each validation call to ensure -// isolation of internal caches and render contexts. -// -// # Cycle Detection -// -// The Validator uses two distinct cycle detection mechanisms: -// -// 1. traversalContext.visited: Tracks visited (path, schemaKey) combinations -// during the main validation traversal. This prevents infinite recursion -// when the same schema is encountered at the same instance path. The key -// uses a struct for zero-allocation lookups in the hot path. -// -// 2. renderCtx (InlineRenderContext): libopenapi's built-in cycle detection -// for schema rendering. This is used when compiling schemas for oneOf/anyOf -// variant matching. It operates at the schema reference level rather than -// instance path level. -// -// These mechanisms serve complementary purposes: visited tracks data traversal -// while renderCtx tracks schema resolution during compilation. -type Validator struct { - options *config.ValidationOptions - logger *slog.Logger - - // localCache stores compiled schemas for reuse within this validation. - // ley is schema hash (as string for map compatibility), value is compiled jsonschema. - localCache map[string]*jsonschema.Schema - - // patternCache stores compiled regex patterns for patternProperties. - // key is the pattern string, value is the compiled regex. - patternCache map[string]*regexp.Regexp - - // renderCtx is used for safe schema rendering with cycle detection. - // see Validator doc comment for how this relates to traversalContext.visited. - renderCtx *base.InlineRenderContext - - // version is the OpenAPI version (3.0 or 3.1). - version float32 - - // compiledIgnorePaths are the pre-compiled regex patterns. - compiledIgnorePaths []*regexp.Regexp -} - -// NewValidator creates a fresh validator for a single validation call. -// The validator should not be reused across concurrent requests. -// Uses the logger from options if available, otherwise logging is silent. -func NewValidator(options *config.ValidationOptions, version float32) *Validator { - var logger *slog.Logger - if options != nil && options.Logger != nil { - logger = options.Logger - } else { - // create a no-op logger that discards all output - logger = slog.New(discardHandler{}) - } - - v := &Validator{ - options: options, - logger: logger, - localCache: make(map[string]*jsonschema.Schema), - patternCache: make(map[string]*regexp.Regexp), - renderCtx: base.NewInlineRenderContext(), - version: version, - } - - if options != nil { - v.compiledIgnorePaths = compileIgnorePaths(options.StrictIgnorePaths) - } - - return v -} - -// discardHandler is a slog.Handler that discards all log records. -type discardHandler struct{} - -func (discardHandler) Enabled(context.Context, slog.Level) bool { return false } -func (discardHandler) Handle(context.Context, slog.Record) error { return nil } -func (d discardHandler) WithAttrs([]slog.Attr) slog.Handler { return d } -func (d discardHandler) WithGroup(string) slog.Handler { return d } - -// matchesIgnorePath checks if a path matches any pre-compiled ignore pattern. -func (v *Validator) matchesIgnorePath(path string) bool { - for _, pattern := range v.compiledIgnorePaths { - if pattern.MatchString(path) { - return true - } - } - return false -} - -// getCompiledPattern returns a cached compiled regex for a pattern string. -// If the pattern is not in the cache, it compiles and caches it. -// Returns nil if the pattern is invalid. -func (v *Validator) getCompiledPattern(pattern string) *regexp.Regexp { - if cached, ok := v.patternCache[pattern]; ok { - return cached - } - - compiled, err := regexp.Compile(pattern) - if err != nil { - return nil - } - - v.patternCache[pattern] = compiled - return compiled -} - -// getSchemaKey returns a unique key for a schema used in cycle detection. -// Uses the schema's low-level hash if available, otherwise the pointer address. -func (v *Validator) getSchemaKey(schema *base.Schema) string { - if schema == nil { - return "" - } - if low := schema.GoLow(); low != nil { - hash := low.Hash() - return fmt.Sprintf("%x", hash) // uint64 hash as hex string - } - // fallback to pointer address for inline schemas without low-level info - return fmt.Sprintf("%p", schema) -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/strict/utils.go b/vendor/github.com/pb33f/libopenapi-validator/strict/utils.go deleted file mode 100644 index 5bdc4c9622c..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/strict/utils.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package strict - -import ( - "regexp" - "strconv" - "strings" -) - -// buildPath creates an instance path by appending a property name to a base path. -// Property names containing dots or brackets use bracket notation for clarity. -// -// Examples: -// - buildPath("$.body", "name") → "$.body.name" -// - buildPath("$.body", "a.b") → "$.body['a.b']" -// - buildPath("$.body", "x[0]") → "$.body['x[0]']" -func buildPath(base, propName string) string { - if needsBracketNotation(propName) { - return base + "['" + propName + "']" - } - return base + "." + propName -} - -// needsBracketNotation returns true if a property name contains characters -// that require bracket notation (dots, brackets). -func needsBracketNotation(name string) bool { - return strings.ContainsAny(name, ".[]") -} - -// buildArrayPath creates an instance path for an array element. -func buildArrayPath(base string, index int) string { - return base + "[" + strconv.Itoa(index) + "]" -} - -// compileIgnorePaths converts glob patterns to compiled regular expressions. -// Supports: -// - * matches single path segment (no dots or brackets) -// - ** matches any depth (zero or more segments) -// - [*] matches any array index -// - \* escapes literal asterisk -// - \*\* escapes literal double-asterisk -func compileIgnorePaths(patterns []string) []*regexp.Regexp { - if len(patterns) == 0 { - return nil - } - - compiled := make([]*regexp.Regexp, 0, len(patterns)) - for _, pattern := range patterns { - re := compilePattern(pattern) - if re != nil { - compiled = append(compiled, re) - } - } - return compiled -} - -// compilePattern converts a single glob pattern to a regular expression. -func compilePattern(pattern string) *regexp.Regexp { - if pattern == "" { - return nil - } - - var b strings.Builder - b.WriteString("^") - - i := 0 - for i < len(pattern) { - c := pattern[i] - - // handle escape sequences - if c == '\\' && i+1 < len(pattern) { - next := pattern[i+1] - if next == '*' { - // check for escaped ** - if i+2 < len(pattern) && pattern[i+2] == '\\' && i+3 < len(pattern) && pattern[i+3] == '*' { - b.WriteString(`\*\*`) - i += 4 - continue - } - // escaped single * - b.WriteString(`\*`) - i += 2 - continue - } - // other escape - include literally - b.WriteString(regexp.QuoteMeta(string(next))) - i += 2 - continue - } - - // handle ** (any depth) - if c == '*' && i+1 < len(pattern) && pattern[i+1] == '*' { - // ** matches any sequence of segments including none - b.WriteString(`.*`) - i += 2 - continue - } - - // handle single * (single segment) - if c == '*' { - // * matches single path segment (no dots or brackets) - b.WriteString(`[^.\[\]]+`) - i++ - continue - } - - // handle [*] (any array index) - if c == '[' && i+2 < len(pattern) && pattern[i+1] == '*' && pattern[i+2] == ']' { - b.WriteString(`\[\d+\]`) - i += 3 - continue - } - - // handle special regex characters - switch c { - case '.', '[', ']', '(', ')', '{', '}', '+', '?', '^', '$', '|': - b.WriteString(`\`) - b.WriteByte(c) - default: - b.WriteByte(c) - } - i++ - } - - b.WriteString("$") - - re, _ := regexp.Compile(b.String()) - return re -} - -// TruncateValue creates a display-friendly version of a value. -// Long strings are truncated, complex objects show type info. -// This is exported for use in error messages. -func TruncateValue(v any) any { - switch val := v.(type) { - case string: - if len(val) > 50 { - return val[:47] + "..." - } - return val - case map[string]any: - if len(val) > 3 { - return "{...}" - } - return val - case []any: - if len(val) > 3 { - return "[...]" - } - return val - default: - return v - } -} - -// truncateValue is an internal alias for TruncateValue. -func truncateValue(v any) any { - return TruncateValue(v) -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/strict/validator.go b/vendor/github.com/pb33f/libopenapi-validator/strict/validator.go deleted file mode 100644 index 1917eff915c..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/strict/validator.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package strict - -import ( - "net/http" - "strings" - - "github.com/pb33f/libopenapi/datamodel/high/base" - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/config" -) - -// Validate performs strict validation on the input data against the schema. -// This is the main entry point for body validation. -// -// It detects undeclared properties even when additionalProperties: true -// would normally allow them. This is useful for API governance scenarios -// where you want to ensure clients only send explicitly documented properties. -func (v *Validator) Validate(input Input) *Result { - result := &Result{Valid: true} - - if input.Schema == nil || input.Data == nil { - return result - } - - ctx := newTraversalContext(input.Direction, v.compiledIgnorePaths, input.BasePath) - - undeclared := v.validateValue(ctx, input.Schema, input.Data) - - if len(undeclared) > 0 { - result.Valid = false - result.UndeclaredValues = undeclared - } - - return result -} - -// ValidateBody is a convenience method for validating request/response bodies. -func ValidateBody(schema *base.Schema, data any, direction Direction, options *config.ValidationOptions, version float32) *Result { - v := NewValidator(options, version) - return v.Validate(Input{ - Schema: schema, - Data: data, - Direction: direction, - Options: options, - BasePath: "$.body", - Version: version, - }) -} - -// ValidateQueryParams checks for undeclared query parameters in an HTTP request. -// It compares the query parameters present in the request against those -// declared in the OpenAPI operation. -func ValidateQueryParams( - request *http.Request, - declaredParams []*v3.Parameter, - options *config.ValidationOptions, -) []UndeclaredValue { - if request == nil || options == nil || !options.StrictMode { - return nil - } - - v := NewValidator(options, 3.2) - - // build set of declared query params (case-sensitive) - declared := make(map[string]bool) - for _, param := range declaredParams { - if param.In == "query" { - declared[param.Name] = true - } - } - - var undeclared []UndeclaredValue - - // check each query parameter in the request - for paramName := range request.URL.Query() { - if !declared[paramName] { - // build path using proper notation for special characters - path := buildPath("$.query", paramName) - if v.matchesIgnorePath(path) { - continue - } - - undeclared = append(undeclared, - newUndeclaredParam(path, paramName, request.URL.Query().Get(paramName), "query", getParamNames(declaredParams, "query"), DirectionRequest)) - } - } - - return undeclared -} - -// ValidateRequestHeaders checks for undeclared headers in an HTTP request. -// Header names are normalized to lowercase for path generation and pattern matching. -// -// The securityHeaders parameter contains header names that are valid due to security -// scheme definitions (e.g., "X-API-Key" for apiKey schemes, "Authorization" for -// http/oauth2/openIdConnect schemes). These headers are considered "declared" even -// though they don't appear in the operation's parameters array. -func ValidateRequestHeaders( - headers http.Header, - declaredParams []*v3.Parameter, - securityHeaders []string, - options *config.ValidationOptions, -) []UndeclaredValue { - if headers == nil || options == nil || !options.StrictMode { - return nil - } - - v := NewValidator(options, 3.2) - - // build set of declared headers (case-insensitive) - declared := make(map[string]bool) - for _, param := range declaredParams { - if param.In == "header" { - declared[strings.ToLower(param.Name)] = true - } - } - - // add security scheme headers (case-insensitive) - for _, h := range securityHeaders { - declared[strings.ToLower(h)] = true - } - - var undeclared []UndeclaredValue - - // check each header - for headerName := range headers { - lowerName := strings.ToLower(headerName) - - // skip if declared (via parameters or security schemes) - if declared[lowerName] { - continue - } - - // skip if in ignored headers list - if v.isHeaderIgnored(headerName, DirectionRequest) { - continue - } - - // build path using lowercase name for case-insensitive pattern matching - path := buildPath("$.headers", lowerName) - if v.matchesIgnorePath(path) { - continue - } - - undeclared = append(undeclared, - newUndeclaredParam(path, headerName, headers.Get(headerName), "header", getParamNames(declaredParams, "header"), DirectionRequest)) - } - - return undeclared -} - -// ValidateCookies checks for undeclared cookies in an HTTP request. -func ValidateCookies( - request *http.Request, - declaredParams []*v3.Parameter, - options *config.ValidationOptions, -) []UndeclaredValue { - if request == nil || options == nil || !options.StrictMode { - return nil - } - - v := NewValidator(options, 3.2) - - // build set of declared cookies - declared := make(map[string]bool) - for _, param := range declaredParams { - if param.In == "cookie" { - declared[param.Name] = true - } - } - - var undeclared []UndeclaredValue - - // check each cookie in the request - for _, cookie := range request.Cookies() { - if !declared[cookie.Name] { - // build path using proper notation for special characters - path := buildPath("$.cookies", cookie.Name) - if v.matchesIgnorePath(path) { - continue - } - - undeclared = append(undeclared, - newUndeclaredParam(path, cookie.Name, cookie.Value, "cookie", getParamNames(declaredParams, "cookie"), DirectionRequest)) - } - } - - return undeclared -} - -// getParamNames extracts parameter names of a specific type. -func getParamNames(params []*v3.Parameter, paramType string) []string { - var names []string - for _, param := range params { - if param.In == paramType { - names = append(names, param.Name) - } - } - return names -} - -// ValidateResponseHeaders checks for undeclared headers in an HTTP response. -// Uses the declared headers from the OpenAPI response object. -// Header names are normalized to lowercase for path generation and pattern matching. -func ValidateResponseHeaders( - headers http.Header, - declaredHeaders *map[string]*v3.Header, - options *config.ValidationOptions, -) []UndeclaredValue { - if headers == nil || options == nil || !options.StrictMode { - return nil - } - - v := NewValidator(options, 3.2) - - // build set of declared headers (case-insensitive) - declared := make(map[string]bool) - if declaredHeaders != nil { - for name := range *declaredHeaders { - declared[strings.ToLower(name)] = true - } - } - - var undeclared []UndeclaredValue - var declaredNames []string - if declaredHeaders != nil { - for name := range *declaredHeaders { - declaredNames = append(declaredNames, name) - } - } - - for headerName := range headers { - lowerName := strings.ToLower(headerName) - - if declared[lowerName] { - continue - } - - if v.isHeaderIgnored(headerName, DirectionResponse) { - continue - } - - // build path using lowercase name for case-insensitive pattern matching - path := buildPath("$.headers", lowerName) - if v.matchesIgnorePath(path) { - continue - } - - undeclared = append(undeclared, - newUndeclaredParam(path, headerName, headers.Get(headerName), "header", declaredNames, DirectionResponse)) - } - - return undeclared -} diff --git a/vendor/github.com/pb33f/libopenapi-validator/validator.go b/vendor/github.com/pb33f/libopenapi-validator/validator.go deleted file mode 100644 index d84b9004cbb..00000000000 --- a/vendor/github.com/pb33f/libopenapi-validator/validator.go +++ /dev/null @@ -1,572 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package validator - -import ( - "fmt" - "net/http" - "sort" - "sync" - - "github.com/pb33f/libopenapi" - "github.com/pb33f/libopenapi/datamodel/high/base" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" - - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - - "github.com/pb33f/libopenapi-validator/cache" - "github.com/pb33f/libopenapi-validator/config" - "github.com/pb33f/libopenapi-validator/errors" - "github.com/pb33f/libopenapi-validator/helpers" - "github.com/pb33f/libopenapi-validator/parameters" - "github.com/pb33f/libopenapi-validator/paths" - "github.com/pb33f/libopenapi-validator/radix" - "github.com/pb33f/libopenapi-validator/requests" - "github.com/pb33f/libopenapi-validator/responses" - "github.com/pb33f/libopenapi-validator/schema_validation" -) - -// Validator provides a coarse grained interface for validating an OpenAPI 3+ documents. -// There are three primary use-cases for validation -// -// Validating *http.Request objects against and OpenAPI 3+ document -// Validating *http.Response objects against an OpenAPI 3+ document -// Validating an OpenAPI 3+ document against the OpenAPI 3+ specification -type Validator interface { - // ValidateHttpRequest will validate an *http.Request object against an OpenAPI 3+ document. - // The path, query, cookie and header parameters and request body are validated. - ValidateHttpRequest(request *http.Request) (bool, []*errors.ValidationError) - // ValidateHttpRequestSync will validate an *http.Request object against an OpenAPI 3+ document synchronously and without spawning any goroutines. - // The path, query, cookie and header parameters and request body are validated. - ValidateHttpRequestSync(request *http.Request) (bool, []*errors.ValidationError) - - // ValidateHttpRequestWithPathItem will validate an *http.Request object against an OpenAPI 3+ document. - // The path, query, cookie and header parameters and request body are validated. - ValidateHttpRequestWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) - - // ValidateHttpRequestSyncWithPathItem will validate an *http.Request object against an OpenAPI 3+ document synchronously and without spawning any goroutines. - // The path, query, cookie and header parameters and request body are validated. - ValidateHttpRequestSyncWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) - - // ValidateHttpResponse will an *http.Response object against an OpenAPI 3+ document. - // The response body is validated. The request is only used to extract the correct response from the spec. - ValidateHttpResponse(request *http.Request, response *http.Response) (bool, []*errors.ValidationError) - - // ValidateHttpRequestResponse will validate both the *http.Request and *http.Response objects against an OpenAPI 3+ document. - // The path, query, cookie and header parameters and request and response body are validated. - ValidateHttpRequestResponse(request *http.Request, response *http.Response) (bool, []*errors.ValidationError) - - // ValidateDocument will validate an OpenAPI 3+ document against the 3.0 or 3.1 OpenAPI 3+ specification - ValidateDocument() (bool, []*errors.ValidationError) - - // GetParameterValidator will return a parameters.ParameterValidator instance used to validate parameters - GetParameterValidator() parameters.ParameterValidator - - // GetRequestBodyValidator will return a parameters.RequestBodyValidator instance used to validate request bodies - GetRequestBodyValidator() requests.RequestBodyValidator - - // GetResponseBodyValidator will return a parameters.ResponseBodyValidator instance used to validate response bodies - GetResponseBodyValidator() responses.ResponseBodyValidator - - // SetDocument will set the OpenAPI 3+ document to be validated - SetDocument(document libopenapi.Document) -} - -// NewValidator will create a new Validator from an OpenAPI 3+ document -func NewValidator(document libopenapi.Document, opts ...config.Option) (Validator, []error) { - m, errs := document.BuildV3Model() - if errs != nil { - return nil, []error{errs} - } - v := NewValidatorFromV3Model(&m.Model, opts...) - v.(*validator).document = document - return v, nil -} - -// NewValidatorFromV3Model will create a new Validator from an OpenAPI Model -func NewValidatorFromV3Model(m *v3.Document, opts ...config.Option) Validator { - options := config.NewValidationOptions(opts...) - - // Build radix tree for O(k) path lookup (where k = path depth) - // Skip if path tree is disabled or a custom tree was provided - if options.PathTree == nil && !options.IsPathTreeDisabled() { - options.PathTree = radix.BuildPathTree(m) - } - - // warm the schema caches by pre-compiling all schemas in the document - // (warmSchemaCaches checks for nil cache and skips if disabled) - warmSchemaCaches(m, options) - - v := &validator{options: options, v3Model: m} - - // create a new parameter validator - v.paramValidator = parameters.NewParameterValidator(m, config.WithExistingOpts(options)) - - // create aq new request body validator - v.requestValidator = requests.NewRequestBodyValidator(m, config.WithExistingOpts(options)) - - // create a response body validator - v.responseValidator = responses.NewResponseBodyValidator(m, config.WithExistingOpts(options)) - - return v -} - -func (v *validator) SetDocument(document libopenapi.Document) { - v.document = document -} - -func (v *validator) GetParameterValidator() parameters.ParameterValidator { - return v.paramValidator -} - -func (v *validator) GetRequestBodyValidator() requests.RequestBodyValidator { - return v.requestValidator -} - -func (v *validator) GetResponseBodyValidator() responses.ResponseBodyValidator { - return v.responseValidator -} - -func (v *validator) ValidateDocument() (bool, []*errors.ValidationError) { - if v.document == nil { - return false, []*errors.ValidationError{{ - ValidationType: helpers.DocumentValidation, - ValidationSubType: helpers.ValidationMissing, - Message: "Document is not set", - Reason: "The document cannot be validated as it is not set", - SpecLine: 1, - SpecCol: 1, - HowToFix: "Set the document via `SetDocument` before validating", - }} - } - var validationOpts []config.Option - if v.options != nil { - validationOpts = append(validationOpts, config.WithRegexEngine(v.options.RegexEngine)) - } - return schema_validation.ValidateOpenAPIDocument(v.document, validationOpts...) -} - -func (v *validator) ValidateHttpResponse( - request *http.Request, - response *http.Response, -) (bool, []*errors.ValidationError) { - var pathItem *v3.PathItem - var pathValue string - var errs []*errors.ValidationError - - pathItem, errs, pathValue = paths.FindPath(request, v.v3Model, v.options) - if pathItem == nil || errs != nil { - return false, errs - } - - responseBodyValidator := v.responseValidator - - // validate response - _, responseErrors := responseBodyValidator.ValidateResponseBodyWithPathItem(request, response, pathItem, pathValue) - - if len(responseErrors) > 0 { - return false, responseErrors - } - return true, nil -} - -func (v *validator) ValidateHttpRequestResponse( - request *http.Request, - response *http.Response, -) (bool, []*errors.ValidationError) { - var pathItem *v3.PathItem - var pathValue string - var errs []*errors.ValidationError - - pathItem, errs, pathValue = paths.FindPath(request, v.v3Model, v.options) - if pathItem == nil || errs != nil { - return false, errs - } - - responseBodyValidator := v.responseValidator - - // validate request and response - _, requestErrors := v.ValidateHttpRequestWithPathItem(request, pathItem, pathValue) - _, responseErrors := responseBodyValidator.ValidateResponseBodyWithPathItem(request, response, pathItem, pathValue) - - if len(requestErrors) > 0 || len(responseErrors) > 0 { - return false, append(requestErrors, responseErrors...) - } - return true, nil -} - -func (v *validator) ValidateHttpRequest(request *http.Request) (bool, []*errors.ValidationError) { - pathItem, errs, foundPath := paths.FindPath(request, v.v3Model, v.options) - if len(errs) > 0 { - return false, errs - } - return v.ValidateHttpRequestWithPathItem(request, pathItem, foundPath) -} - -func (v *validator) ValidateHttpRequestWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) { - // create a new parameter validator - paramValidator := v.paramValidator - - // create a new request body validator - reqBodyValidator := v.requestValidator - - // create some channels to handle async validation - doneChan := make(chan struct{}) - errChan := make(chan []*errors.ValidationError) - controlChan := make(chan struct{}) - - // async param validation function. - parameterValidationFunc := func(control chan struct{}, errorChan chan []*errors.ValidationError) { - paramErrs := make(chan []*errors.ValidationError) - paramControlChan := make(chan struct{}) - paramFunctionControlChan := make(chan struct{}) - var paramValidationErrors []*errors.ValidationError - - validations := []validationFunction{ - paramValidator.ValidatePathParamsWithPathItem, - paramValidator.ValidateCookieParamsWithPathItem, - paramValidator.ValidateHeaderParamsWithPathItem, - paramValidator.ValidateQueryParamsWithPathItem, - paramValidator.ValidateSecurityWithPathItem, - } - - // listen for validation errors on parameters. everything will run async. - paramListener := func(control chan struct{}, errorChan chan []*errors.ValidationError) { - completedValidations := 0 - for { - select { - case vErrs := <-errorChan: - paramValidationErrors = append(paramValidationErrors, vErrs...) - case <-control: - completedValidations++ - if completedValidations == len(validations) { - paramFunctionControlChan <- struct{}{} - return - } - } - } - } - - validateParamFunction := func( - control chan struct{}, - errorChan chan []*errors.ValidationError, - validatorFunc validationFunction, - ) { - valid, pErrs := validatorFunc(request, pathItem, pathValue) - if !valid { - errorChan <- pErrs - } - control <- struct{}{} - } - go paramListener(paramControlChan, paramErrs) - for i := range validations { - go validateParamFunction(paramControlChan, paramErrs, validations[i]) - } - - // wait for all the validations to complete - <-paramFunctionControlChan - if len(paramValidationErrors) > 0 { - errorChan <- paramValidationErrors - } - - // let runValidation know we are done with this part. - controlChan <- struct{}{} - } - - requestBodyValidationFunc := func(control chan struct{}, errorChan chan []*errors.ValidationError) { - valid, pErrs := reqBodyValidator.ValidateRequestBodyWithPathItem(request, pathItem, pathValue) - if !valid { - errorChan <- pErrs - } - control <- struct{}{} - } - - // build async functions - asyncFunctions := []validationFunctionAsync{ - parameterValidationFunc, - requestBodyValidationFunc, - } - - var validationErrors []*errors.ValidationError - - // sit and wait for everything to report back. - go runValidation(controlChan, doneChan, errChan, &validationErrors, len(asyncFunctions)) - - // run async functions - for i := range asyncFunctions { - go asyncFunctions[i](controlChan, errChan) - } - - // wait for all the validations to complete - <-doneChan - - // sort errors for deterministic ordering (async validation can return errors in any order) - sortValidationErrors(validationErrors) - - return len(validationErrors) == 0, validationErrors -} - -func (v *validator) ValidateHttpRequestSync(request *http.Request) (bool, []*errors.ValidationError) { - pathItem, errs, foundPath := paths.FindPath(request, v.v3Model, v.options) - if len(errs) > 0 { - return false, errs - } - return v.ValidateHttpRequestSyncWithPathItem(request, pathItem, foundPath) -} - -func (v *validator) ValidateHttpRequestSyncWithPathItem(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) { - // create a new parameter validator - paramValidator := v.paramValidator - - // create a new request body validator - reqBodyValidator := v.requestValidator - - validationErrors := make([]*errors.ValidationError, 0) - - paramValidationErrors := make([]*errors.ValidationError, 0) - for _, validateFunc := range []validationFunction{ - paramValidator.ValidatePathParamsWithPathItem, - paramValidator.ValidateCookieParamsWithPathItem, - paramValidator.ValidateHeaderParamsWithPathItem, - paramValidator.ValidateQueryParamsWithPathItem, - paramValidator.ValidateSecurityWithPathItem, - } { - valid, pErrs := validateFunc(request, pathItem, pathValue) - if !valid { - paramValidationErrors = append(paramValidationErrors, pErrs...) - } - } - - valid, pErrs := reqBodyValidator.ValidateRequestBodyWithPathItem(request, pathItem, pathValue) - if !valid { - paramValidationErrors = append(paramValidationErrors, pErrs...) - } - - validationErrors = append(validationErrors, paramValidationErrors...) - return len(validationErrors) == 0, validationErrors -} - -type validator struct { - options *config.ValidationOptions - v3Model *v3.Document - document libopenapi.Document - paramValidator parameters.ParameterValidator - requestValidator requests.RequestBodyValidator - responseValidator responses.ResponseBodyValidator -} - -func runValidation(control, doneChan chan struct{}, - errorChan chan []*errors.ValidationError, - validationErrors *[]*errors.ValidationError, - total int, -) { - var validationLock sync.Mutex - completedValidations := 0 - for { - select { - case vErrs := <-errorChan: - validationLock.Lock() - *validationErrors = append(*validationErrors, vErrs...) - validationLock.Unlock() - case <-control: - completedValidations++ - if completedValidations == total { - doneChan <- struct{}{} - return - } - } - } -} - -type ( - validationFunction func(request *http.Request, pathItem *v3.PathItem, pathValue string) (bool, []*errors.ValidationError) - validationFunctionAsync func(control chan struct{}, errorChan chan []*errors.ValidationError) -) - -// sortValidationErrors sorts validation errors for deterministic ordering. -// Errors are sorted by validation type first, then by message. -func sortValidationErrors(errs []*errors.ValidationError) { - sort.Slice(errs, func(i, j int) bool { - if errs[i].ValidationType != errs[j].ValidationType { - return errs[i].ValidationType < errs[j].ValidationType - } - return errs[i].Message < errs[j].Message - }) -} - -// warmSchemaCaches pre-compiles all schemas in the OpenAPI document and stores them in the validator caches. -// This frontloads the compilation cost so that runtime validation doesn't need to compile schemas. -func warmSchemaCaches( - doc *v3.Document, - options *config.ValidationOptions, -) { - // Skip warming if cache is nil (explicitly disabled via WithSchemaCache(nil)) - if doc == nil || doc.Paths == nil || doc.Paths.PathItems == nil || options.SchemaCache == nil { - return - } - - schemaCache := options.SchemaCache - - // Walk through all paths and operations - for pathPair := doc.Paths.PathItems.First(); pathPair != nil; pathPair = pathPair.Next() { - pathItem := pathPair.Value() - - // Get all operations for this path (handles all HTTP methods including OpenAPI 3.2+ extensions) - operations := pathItem.GetOperations() - if operations == nil { - continue - } - - for opPair := operations.First(); opPair != nil; opPair = opPair.Next() { - operation := opPair.Value() - if operation == nil { - continue - } - - // Warm request body schemas - if operation.RequestBody != nil && operation.RequestBody.Content != nil { - for contentPair := operation.RequestBody.Content.First(); contentPair != nil; contentPair = contentPair.Next() { - mediaType := contentPair.Value() - if mediaType.Schema != nil { - warmMediaTypeSchema(mediaType, schemaCache, options) - } - } - } - - // Warm response body schemas - if operation.Responses != nil { - // Warm status code responses - if operation.Responses.Codes != nil { - for codePair := operation.Responses.Codes.First(); codePair != nil; codePair = codePair.Next() { - response := codePair.Value() - if response != nil && response.Content != nil { - for contentPair := response.Content.First(); contentPair != nil; contentPair = contentPair.Next() { - mediaType := contentPair.Value() - if mediaType.Schema != nil { - warmMediaTypeSchema(mediaType, schemaCache, options) - } - } - } - } - } - - // Warm default response schemas - if operation.Responses.Default != nil && operation.Responses.Default.Content != nil { - for contentPair := operation.Responses.Default.Content.First(); contentPair != nil; contentPair = contentPair.Next() { - mediaType := contentPair.Value() - if mediaType.Schema != nil { - warmMediaTypeSchema(mediaType, schemaCache, options) - } - } - } - } - - // Warm parameter schemas - if operation.Parameters != nil { - for _, param := range operation.Parameters { - if param != nil { - warmParameterSchema(param, schemaCache, options) - } - } - } - } - - // Warm path-level parameters - if pathItem.Parameters != nil { - for _, param := range pathItem.Parameters { - if param != nil { - warmParameterSchema(param, schemaCache, options) - } - } - } - } -} - -// warmMediaTypeSchema warms the cache for a media type schema -func warmMediaTypeSchema(mediaType *v3.MediaType, schemaCache cache.SchemaCache, options *config.ValidationOptions) { - if mediaType != nil && mediaType.Schema != nil { - hash := mediaType.GoLow().Schema.Value.Hash() - - if _, exists := schemaCache.Load(hash); !exists { - schema := mediaType.Schema.Schema() - if schema != nil { - renderCtx := base.NewInlineRenderContext() - renderedInline, _ := schema.RenderInlineWithContext(renderCtx) - referenceSchema := string(renderedInline) - renderedJSON, _ := utils.ConvertYAMLtoJSON(renderedInline) - if len(renderedInline) > 0 { - compiledSchema, _ := helpers.NewCompiledSchema(fmt.Sprintf("%x", hash), renderedJSON, options) - - // Pre-parse YAML node for error reporting (avoids re-parsing on each error) - var renderedNode yaml.Node - _ = yaml.Unmarshal(renderedInline, &renderedNode) - - schemaCache.Store(hash, &cache.SchemaCacheEntry{ - Schema: schema, - RenderedInline: renderedInline, - ReferenceSchema: referenceSchema, - RenderedJSON: renderedJSON, - CompiledSchema: compiledSchema, - RenderedNode: &renderedNode, - }) - } - } - } - } -} - -// warmParameterSchema warms the cache for a parameter schema -func warmParameterSchema(param *v3.Parameter, schemaCache cache.SchemaCache, options *config.ValidationOptions) { - if param != nil { - var schema *base.Schema - var hash uint64 - - // Parameters can have schemas in two places: schema property or content property - if param.Schema != nil { - schema = param.Schema.Schema() - if schema != nil { - hash = param.GoLow().Schema.Value.Hash() - } - } else if param.Content != nil { - // Check content for schema - for contentPair := param.Content.First(); contentPair != nil; contentPair = contentPair.Next() { - mediaType := contentPair.Value() - if mediaType.Schema != nil { - schema = mediaType.Schema.Schema() - if schema != nil { - hash = mediaType.GoLow().Schema.Value.Hash() - } - break // Only process first content type - } - } - } - - if schema != nil { - if _, exists := schemaCache.Load(hash); !exists { - renderCtx := base.NewInlineRenderContext() - renderedInline, _ := schema.RenderInlineWithContext(renderCtx) - referenceSchema := string(renderedInline) - renderedJSON, _ := utils.ConvertYAMLtoJSON(renderedInline) - if len(renderedInline) > 0 { - compiledSchema, _ := helpers.NewCompiledSchema(fmt.Sprintf("%x", hash), renderedJSON, options) - - // Pre-parse YAML node for error reporting (avoids re-parsing on each error) - var renderedNode yaml.Node - _ = yaml.Unmarshal(renderedInline, &renderedNode) - - // Store in cache using the shared SchemaCache type - schemaCache.Store(hash, &cache.SchemaCacheEntry{ - Schema: schema, - RenderedInline: renderedInline, - ReferenceSchema: referenceSchema, - RenderedJSON: renderedJSON, - CompiledSchema: compiledSchema, - RenderedNode: &renderedNode, - }) - } - } - } - } -} diff --git a/vendor/github.com/pb33f/libopenapi/.gitignore b/vendor/github.com/pb33f/libopenapi/.gitignore deleted file mode 100644 index 069611e7d75..00000000000 --- a/vendor/github.com/pb33f/libopenapi/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -test-operation.yaml -.idea/ -*.iml \ No newline at end of file diff --git a/vendor/github.com/pb33f/libopenapi/README.md b/vendor/github.com/pb33f/libopenapi/README.md deleted file mode 100644 index a2fccfdc0e6..00000000000 --- a/vendor/github.com/pb33f/libopenapi/README.md +++ /dev/null @@ -1,152 +0,0 @@ -

- libopenapi -

- -# libopenapi - enterprise grade OpenAPI tools for golang. - - -![Pipeline](https://github.com/pb33f/libopenapi/workflows/Build/badge.svg) -[![GoReportCard](https://goreportcard.com/badge/github.com/pb33f/libopenapi)](https://goreportcard.com/report/github.com/pb33f/libopenapi) -[![codecov](https://codecov.io/gh/pb33f/libopenapi/branch/main/graph/badge.svg?)](https://codecov.io/gh/pb33f/libopenapi) -[![discord](https://img.shields.io/discord/923258363540815912)](https://discord.gg/x7VACVuEGP) -[![Docs](https://img.shields.io/badge/godoc-reference-5fafd7)](https://pkg.go.dev/github.com/pb33f/libopenapi) - -libopenapi has full support for OpenAPI 3, 3.1 and 3.2. It can handle the largest and most -complex specifications you can think of. - -Overlays and Arazzo are also fully supported. - ---- - -## Sponsors & users -If your company is using `libopenapi`, please considering [supporting this project](https://github.com/sponsors/daveshanley), -like our _very kind_ sponsors: - - - - - speakeasy' - - - -[Speakeasy](https://speakeasy.com/editor?utm_source=libopenapi+repo&utm_medium=github+sponsorship) - - - - - scalar' - - - -[scalar](https://scalar.com) - - - - - apideck' - - - -[apideck](https://apideck.com) - ---- - -## Come chat with us - -Need help? Have a question? Want to share your work? [Join our discord](https://discord.gg/x7VACVuEGP) and -come say hi! - -## Check out the `libopenapi-validator` - -Need to validate requests, responses, parameters or schemas? Use the new -[libopenapi-validator](https://github.com/pb33f/libopenapi-validator) module. - -## Documentation - -See all the documentation at https://pb33f.io/libopenapi/ - -- [Installing libopenapi](https://pb33f.io/libopenapi/installing/) -- [Using OpenAPI](https://pb33f.io/libopenapi/openapi/) -- [Using Swagger](https://pb33f.io/libopenapi/swagger/) -- [The Data Model](https://pb33f.io/libopenapi/model/) -- [Validation](https://pb33f.io/libopenapi/validation/) -- [Modifying / Mutating the OpenAPI Model](https://pb33f.io/libopenapi/modifying/) -- [Mocking / Creating Examples](https://pb33f.io/libopenapi/mocks/) -- [Using Vendor Extensions](https://pb33f.io/libopenapi/extensions/) -- [The Index](https://pb33f.io/libopenapi/index/) -- [The Resolver](https://pb33f.io/libopenapi/resolver/) -- [The Rolodex](https://pb33f.io/libopenapi/rolodex/) -- [Circular References](https://pb33f.io/libopenapi/circular-references/) -- [Bundling Specs](https://pb33f.io/libopenapi/bundling/) -- [What Changed / Diff Engine](https://pb33f.io/libopenapi/what-changed/) -- [Overlays](https://pb33f.io/libopenapi/overlays/) -- [Arazzo](https://pb33f.io/libopenapi/arazzo/) -- [FAQ](https://pb33f.io/libopenapi/faq/) -- [About libopenapi](https://pb33f.io/libopenapi/about/) ---- - -### Quick-start tutorial - -👀 **Get rolling fast using `libopenapi` with the -[Parsing OpenAPI files using go](https://quobix.com/articles/parsing-openapi-using-go/)** guide 👀 - -Or, follow these steps and see something in a few seconds. - -#### Step 1: Grab the petstore - -```bash -curl https://raw.githubusercontent.com/OAI/OpenAPI-Specification/main/_archive_/schemas/v3.0/pass/petstore.yaml > petstorev3.json -``` - -#### Step 2: Grab libopenapi - -```bash -go get github.com/pb33f/libopenapi -``` - -#### Step 3: Parse the petstore using libopenapi - -Copy and paste this code into a `main.go` file. - -```go -package main - -import ( - "fmt" - "os" - "github.com/pb33f/libopenapi" -) - -func main() { - petstore, _ := os.ReadFile("petstorev3.json") - document, err := libopenapi.NewDocument(petstore) - if err != nil { - panic(fmt.Sprintf("cannot create new document: %e", err)) - } - docModel, err := document.BuildV3Model() - if err != nil { - panic(fmt.Sprintf("cannot create v3 model from document: %e", err)) - } - - // The following fails after the first iteration - for schemaName, schema := range docModel.Model.Components.Schemas.FromOldest() { - if schema.Schema().Properties != nil { - fmt.Printf("Schema '%s' has %d properties\n", schemaName, schema.Schema().Properties.Len()) - } - } -} -``` - -Run it, which should print out: - -```bash -Schema 'Pet' has 3 properties -Schema 'Error' has 2 properties -``` - - -> Read the full docs at [https://pb33f.io/libopenapi/](https://pb33f.io/libopenapi/) - ---- - -Logo gopher is modified, originally from [egonelbre](https://github.com/egonelbre/gophers) diff --git a/vendor/github.com/pb33f/libopenapi/arazzo.go b/vendor/github.com/pb33f/libopenapi/arazzo.go deleted file mode 100644 index c732db4db68..00000000000 --- a/vendor/github.com/pb33f/libopenapi/arazzo.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package libopenapi - -import ( - gocontext "context" - "fmt" - - high "github.com/pb33f/libopenapi/datamodel/high/arazzo" - "github.com/pb33f/libopenapi/datamodel/low" - lowArazzo "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "go.yaml.in/yaml/v4" -) - -// NewArazzoDocument parses raw bytes into a high-level Arazzo document. -func NewArazzoDocument(arazzoBytes []byte) (*high.Arazzo, error) { - var rootNode yaml.Node - if err := yaml.Unmarshal(arazzoBytes, &rootNode); err != nil { - return nil, fmt.Errorf("failed to parse YAML: %w", err) - } - - if rootNode.Kind != yaml.DocumentNode || len(rootNode.Content) == 0 { - return nil, fmt.Errorf("invalid YAML document structure") - } - - mappingNode := rootNode.Content[0] - if mappingNode.Kind != yaml.MappingNode { - return nil, fmt.Errorf("expected YAML mapping, got %v", mappingNode.Kind) - } - - // Build the low-level model - lowDoc := &lowArazzo.Arazzo{} - if err := low.BuildModel(mappingNode, lowDoc); err != nil { - return nil, fmt.Errorf("failed to build low-level model: %w", err) - } - - ctx := gocontext.Background() - if err := lowDoc.Build(ctx, nil, mappingNode, nil); err != nil { - return nil, fmt.Errorf("failed to build arazzo document: %w", err) - } - - // Build the high-level model - highDoc := high.NewArazzo(lowDoc) - return highDoc, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/cache.go b/vendor/github.com/pb33f/libopenapi/cache.go deleted file mode 100644 index bf440c639d5..00000000000 --- a/vendor/github.com/pb33f/libopenapi/cache.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2022-2026 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package libopenapi - -import ( - highbase "github.com/pb33f/libopenapi/datamodel/high/base" - "github.com/pb33f/libopenapi/datamodel/low" - lowbase "github.com/pb33f/libopenapi/datamodel/low/base" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/utils" -) - -// ClearAllCaches resets every global in-process cache in libopenapi. -// Call this between document lifecycles in long-running processes -// (servers, CLI tools that process many specs) to release memory that -// would otherwise accumulate and never be garbage-collected. -func ClearAllCaches() { - low.ClearHashCache() // hashCache + indexCollectionCache - lowbase.ClearSchemaQuickHashMap() // SchemaQuickHashMap - index.ClearHashCache() // nodeHashCache - index.ClearContentDetectionCache() - highbase.ClearInlineRenderingTracker() - utils.ClearJSONPathCache() - - // Drain sync.Pool instances that hold *yaml.Node pointers. - // Pooled slices/maps keep the entire YAML parse tree alive. - index.ClearNodePools() - low.ClearNodePools() -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/document_config.go b/vendor/github.com/pb33f/libopenapi/datamodel/document_config.go index a0fe469f194..5da94ef3129 100644 --- a/vendor/github.com/pb33f/libopenapi/datamodel/document_config.go +++ b/vendor/github.com/pb33f/libopenapi/datamodel/document_config.go @@ -209,6 +209,10 @@ type DocumentConfiguration struct { // - OverwriteWithRemote: Referenced properties overwrite local properties // - RejectConflicts: Throw error when properties conflict PropertyMergeStrategy PropertyMergeStrategy + + // ResolveNestedRefsWithDocumentContext uses the referenced document's path/index as the base for nested refs. + // This controls how nested relative references are interpreted during reference resolution. + ResolveNestedRefsWithDocumentContext bool } func NewDocumentConfiguration() *DocumentConfiguration { diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/arazzo.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/arazzo.go deleted file mode 100644 index b830738b76d..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/arazzo.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - v3 "github.com/pb33f/libopenapi/datamodel/high/v3" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Arazzo represents a high-level Arazzo document. -// https://spec.openapis.org/arazzo/v1.0.1 -type Arazzo struct { - Arazzo string `json:"arazzo,omitempty" yaml:"arazzo,omitempty"` - Info *Info `json:"info,omitempty" yaml:"info,omitempty"` - SourceDescriptions []*SourceDescription `json:"sourceDescriptions,omitempty" yaml:"sourceDescriptions,omitempty"` - Workflows []*Workflow `json:"workflows,omitempty" yaml:"workflows,omitempty"` - Components *Components `json:"components,omitempty" yaml:"components,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - openAPISourceDocs []*v3.Document - low *low.Arazzo -} - -// NewArazzo creates a new high-level Arazzo instance from a low-level one. -func NewArazzo(a *low.Arazzo) *Arazzo { - h := new(Arazzo) - h.low = a - if !a.Arazzo.IsEmpty() { - h.Arazzo = a.Arazzo.Value - } - if !a.Info.IsEmpty() { - h.Info = NewInfo(a.Info.Value) - } - if !a.SourceDescriptions.IsEmpty() { - h.SourceDescriptions = buildSlice(a.SourceDescriptions.Value, NewSourceDescription) - } - if !a.Workflows.IsEmpty() { - h.Workflows = buildSlice(a.Workflows.Value, NewWorkflow) - } - if !a.Components.IsEmpty() { - h.Components = NewComponents(a.Components.Value) - } - h.Extensions = high.ExtractExtensions(a.Extensions) - return h -} - -// GoLow returns the low-level Arazzo instance used to create the high-level one. -func (a *Arazzo) GoLow() *low.Arazzo { - return a.low -} - -// GoLowUntyped returns the low-level Arazzo instance with no type. -func (a *Arazzo) GoLowUntyped() any { - return a.low -} - -// AddOpenAPISourceDocument attaches one or more OpenAPI source documents to this Arazzo model. -// Attached documents are runtime metadata and are not rendered or serialized. -func (a *Arazzo) AddOpenAPISourceDocument(docs ...*v3.Document) { - if a == nil || len(docs) == 0 { - return - } - for _, doc := range docs { - if doc != nil { - a.openAPISourceDocs = append(a.openAPISourceDocs, doc) - } - } -} - -// GetOpenAPISourceDocuments returns attached OpenAPI source documents. -func (a *Arazzo) GetOpenAPISourceDocuments() []*v3.Document { - if a == nil || len(a.openAPISourceDocs) == 0 { - return nil - } - docs := make([]*v3.Document, len(a.openAPISourceDocs)) - copy(docs, a.openAPISourceDocs) - return docs -} - -// Render returns a YAML representation of the Arazzo object as a byte slice. -func (a *Arazzo) Render() ([]byte, error) { - return yaml.Marshal(a) -} - -// MarshalYAML creates a ready to render YAML representation of the Arazzo object. -func (a *Arazzo) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if a.Arazzo != "" { - m.Set(low.ArazzoLabel, a.Arazzo) - } - if a.Info != nil { - m.Set(low.InfoLabel, a.Info) - } - if len(a.SourceDescriptions) > 0 { - m.Set(low.SourceDescriptionsLabel, a.SourceDescriptions) - } - if len(a.Workflows) > 0 { - m.Set(low.WorkflowsLabel, a.Workflows) - } - if a.Components != nil { - m.Set(low.ComponentsLabel, a.Components) - } - marshalExtensions(m, a.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/build_helpers.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/build_helpers.go deleted file mode 100644 index 505ea2a578f..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/build_helpers.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/low" -) - -// buildSlice converts a slice of low.ValueReference[L] to a slice of H using a conversion function. -func buildSlice[L any, H any](refs []low.ValueReference[L], convert func(L) H) []H { - if len(refs) == 0 { - return nil - } - out := make([]H, 0, len(refs)) - for _, ref := range refs { - out = append(out, convert(ref.Value)) - } - return out -} - -// buildValueSlice extracts the Value from each low.ValueReference into a plain slice. -func buildValueSlice[T any](refs []low.ValueReference[T]) []T { - if len(refs) == 0 { - return nil - } - out := make([]T, 0, len(refs)) - for _, ref := range refs { - out = append(out, ref.Value) - } - return out -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/components.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/components.go deleted file mode 100644 index 1d507db2b4f..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/components.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - lowmodel "github.com/pb33f/libopenapi/datamodel/low" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Components represents a high-level Arazzo Components Object. -// https://spec.openapis.org/arazzo/v1.0.1#components-object -type Components struct { - Inputs *orderedmap.Map[string, *yaml.Node] `json:"inputs,omitempty" yaml:"inputs,omitempty"` - Parameters *orderedmap.Map[string, *Parameter] `json:"parameters,omitempty" yaml:"parameters,omitempty"` - SuccessActions *orderedmap.Map[string, *SuccessAction] `json:"successActions,omitempty" yaml:"successActions,omitempty"` - FailureActions *orderedmap.Map[string, *FailureAction] `json:"failureActions,omitempty" yaml:"failureActions,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.Components -} - -// NewComponents creates a new high-level Components instance from a low-level one. -func NewComponents(comp *low.Components) *Components { - c := new(Components) - c.low = comp - - if !comp.Inputs.IsEmpty() && comp.Inputs.Value != nil { - c.Inputs = lowmodel.FromReferenceMap[string, *yaml.Node](comp.Inputs.Value) - } - if !comp.Parameters.IsEmpty() && comp.Parameters.Value != nil { - c.Parameters = lowmodel.FromReferenceMapWithFunc(comp.Parameters.Value, func(v *low.Parameter) *Parameter { - return NewParameter(v) - }) - } - if !comp.SuccessActions.IsEmpty() && comp.SuccessActions.Value != nil { - c.SuccessActions = lowmodel.FromReferenceMapWithFunc(comp.SuccessActions.Value, func(v *low.SuccessAction) *SuccessAction { - return NewSuccessAction(v) - }) - } - if !comp.FailureActions.IsEmpty() && comp.FailureActions.Value != nil { - c.FailureActions = lowmodel.FromReferenceMapWithFunc(comp.FailureActions.Value, func(v *low.FailureAction) *FailureAction { - return NewFailureAction(v) - }) - } - c.Extensions = high.ExtractExtensions(comp.Extensions) - return c -} - -// GoLow returns the low-level Components instance used to create the high-level one. -func (c *Components) GoLow() *low.Components { - return c.low -} - -// GoLowUntyped returns the low-level Components instance with no type. -func (c *Components) GoLowUntyped() any { - return c.low -} - -// Render returns a YAML representation of the Components object as a byte slice. -func (c *Components) Render() ([]byte, error) { - return yaml.Marshal(c) -} - -// MarshalYAML creates a ready to render YAML representation of the Components object. -func (c *Components) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if c.Inputs != nil && c.Inputs.Len() > 0 { - m.Set(low.InputsLabel, c.Inputs) - } - if c.Parameters != nil && c.Parameters.Len() > 0 { - m.Set(low.ParametersLabel, c.Parameters) - } - if c.SuccessActions != nil && c.SuccessActions.Len() > 0 { - m.Set(low.SuccessActionsLabel, c.SuccessActions) - } - if c.FailureActions != nil && c.FailureActions.Len() > 0 { - m.Set(low.FailureActionsLabel, c.FailureActions) - } - marshalExtensions(m, c.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/criterion.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/criterion.go deleted file mode 100644 index d2c0907555d..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/criterion.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - - "github.com/pb33f/libopenapi/datamodel/high" - lowmodel "github.com/pb33f/libopenapi/datamodel/low" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Criterion represents a high-level Arazzo Criterion Object. -// https://spec.openapis.org/arazzo/v1.0.1#criterion-object -type Criterion struct { - Context string `json:"context,omitempty" yaml:"context,omitempty"` - Condition string `json:"condition,omitempty" yaml:"condition,omitempty"` - Type string `json:"-" yaml:"-"` - ExpressionType *CriterionExpressionType `json:"-" yaml:"-"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.Criterion -} - -// GetEffectiveType returns the effective criterion type. Returns "simple" when Type is empty, -// the string value when set as a scalar, or ExpressionType.Type when the type field is an object. -func (c *Criterion) GetEffectiveType() string { - if c.ExpressionType != nil { - return c.ExpressionType.Type - } - if c.Type != "" { - return c.Type - } - return "simple" -} - -// NewCriterion creates a new high-level Criterion instance from a low-level one. -func NewCriterion(criterion *low.Criterion) *Criterion { - c := new(Criterion) - c.low = criterion - if !criterion.Context.IsEmpty() { - c.Context = criterion.Context.Value - } - if !criterion.Condition.IsEmpty() { - c.Condition = criterion.Condition.Value - } - // Type is a union: scalar string or CriterionExpressionType mapping - if !criterion.Type.IsEmpty() && criterion.Type.Value != nil { - node := criterion.Type.Value - switch node.Kind { - case yaml.ScalarNode: - c.Type = node.Value - case yaml.MappingNode: - cet := &low.CriterionExpressionType{} - if err := lowmodel.BuildModel(node, cet); err == nil { - if err = cet.Build(context.Background(), nil, node, nil); err == nil { - c.ExpressionType = NewCriterionExpressionType(cet) - } - } - } - } - c.Extensions = high.ExtractExtensions(criterion.Extensions) - return c -} - -// GoLow returns the low-level Criterion instance used to create the high-level one. -func (c *Criterion) GoLow() *low.Criterion { - return c.low -} - -// GoLowUntyped returns the low-level Criterion instance with no type. -func (c *Criterion) GoLowUntyped() any { - return c.low -} - -// Render returns a YAML representation of the Criterion object as a byte slice. -func (c *Criterion) Render() ([]byte, error) { - return yaml.Marshal(c) -} - -// MarshalYAML creates a ready to render YAML representation of the Criterion object. -func (c *Criterion) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if c.Context != "" { - m.Set(low.ContextLabel, c.Context) - } - if c.Condition != "" { - m.Set(low.ConditionLabel, c.Condition) - } - if c.ExpressionType != nil { - m.Set(low.TypeLabel, c.ExpressionType) - } else if c.Type != "" { - m.Set(low.TypeLabel, c.Type) - } - marshalExtensions(m, c.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/criterion_expression_type.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/criterion_expression_type.go deleted file mode 100644 index 903160a5def..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/criterion_expression_type.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// CriterionExpressionType represents a high-level Arazzo Criterion Expression Type Object. -// https://spec.openapis.org/arazzo/v1.0.1#criterion-expression-type-object -type CriterionExpressionType struct { - Type string `json:"type,omitempty" yaml:"type,omitempty"` - Version string `json:"version,omitempty" yaml:"version,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.CriterionExpressionType -} - -// NewCriterionExpressionType creates a new high-level CriterionExpressionType instance from a low-level one. -func NewCriterionExpressionType(cet *low.CriterionExpressionType) *CriterionExpressionType { - c := new(CriterionExpressionType) - c.low = cet - if !cet.Type.IsEmpty() { - c.Type = cet.Type.Value - } - if !cet.Version.IsEmpty() { - c.Version = cet.Version.Value - } - c.Extensions = high.ExtractExtensions(cet.Extensions) - return c -} - -// GoLow returns the low-level CriterionExpressionType instance used to create the high-level one. -func (c *CriterionExpressionType) GoLow() *low.CriterionExpressionType { - return c.low -} - -// GoLowUntyped returns the low-level CriterionExpressionType instance with no type. -func (c *CriterionExpressionType) GoLowUntyped() any { - return c.low -} - -// Render returns a YAML representation of the CriterionExpressionType object as a byte slice. -func (c *CriterionExpressionType) Render() ([]byte, error) { - return yaml.Marshal(c) -} - -// MarshalYAML creates a ready to render YAML representation of the CriterionExpressionType object. -func (c *CriterionExpressionType) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if c.Type != "" { - m.Set("type", c.Type) - } - if c.Version != "" { - m.Set("version", c.Version) - } - marshalExtensions(m, c.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/failure_action.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/failure_action.go deleted file mode 100644 index 1a5c69d26ac..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/failure_action.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// FailureAction represents a high-level Arazzo Failure Action Object. -// A failure action can be a full definition or a Reusable Object with a $components reference. -// https://spec.openapis.org/arazzo/v1.0.1#failure-action-object -type FailureAction struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"type,omitempty" yaml:"type,omitempty"` - WorkflowId string `json:"workflowId,omitempty" yaml:"workflowId,omitempty"` - StepId string `json:"stepId,omitempty" yaml:"stepId,omitempty"` - RetryAfter *float64 `json:"retryAfter,omitempty" yaml:"retryAfter,omitempty"` - RetryLimit *int64 `json:"retryLimit,omitempty" yaml:"retryLimit,omitempty"` - Criteria []*Criterion `json:"criteria,omitempty" yaml:"criteria,omitempty"` - Reference string `json:"reference,omitempty" yaml:"reference,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.FailureAction -} - -// IsReusable returns true if this failure action is a Reusable Object (has a reference field). -func (f *FailureAction) IsReusable() bool { - return f.Reference != "" -} - -// NewFailureAction creates a new high-level FailureAction instance from a low-level one. -func NewFailureAction(fa *low.FailureAction) *FailureAction { - f := new(FailureAction) - f.low = fa - if !fa.Name.IsEmpty() { - f.Name = fa.Name.Value - } - if !fa.Type.IsEmpty() { - f.Type = fa.Type.Value - } - if !fa.WorkflowId.IsEmpty() { - f.WorkflowId = fa.WorkflowId.Value - } - if !fa.StepId.IsEmpty() { - f.StepId = fa.StepId.Value - } - if !fa.RetryAfter.IsEmpty() { - v := fa.RetryAfter.Value - f.RetryAfter = &v - } - if !fa.RetryLimit.IsEmpty() { - v := fa.RetryLimit.Value - f.RetryLimit = &v - } - if !fa.ComponentRef.IsEmpty() { - f.Reference = fa.ComponentRef.Value - } - if !fa.Criteria.IsEmpty() { - f.Criteria = buildSlice(fa.Criteria.Value, NewCriterion) - } - f.Extensions = high.ExtractExtensions(fa.Extensions) - return f -} - -// GoLow returns the low-level FailureAction instance used to create the high-level one. -func (f *FailureAction) GoLow() *low.FailureAction { - return f.low -} - -// GoLowUntyped returns the low-level FailureAction instance with no type. -func (f *FailureAction) GoLowUntyped() any { - return f.low -} - -// Render returns a YAML representation of the FailureAction object as a byte slice. -func (f *FailureAction) Render() ([]byte, error) { - return yaml.Marshal(f) -} - -// MarshalYAML creates a ready to render YAML representation of the FailureAction object. -func (f *FailureAction) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if f.Reference != "" { - m.Set(low.ReferenceLabel, f.Reference) - return m, nil - } - if f.Name != "" { - m.Set(low.NameLabel, f.Name) - } - if f.Type != "" { - m.Set(low.TypeLabel, f.Type) - } - if f.WorkflowId != "" { - m.Set(low.WorkflowIdLabel, f.WorkflowId) - } - if f.StepId != "" { - m.Set(low.StepIdLabel, f.StepId) - } - if f.RetryAfter != nil { - m.Set(low.RetryAfterLabel, *f.RetryAfter) - } - if f.RetryLimit != nil { - m.Set(low.RetryLimitLabel, *f.RetryLimit) - } - if len(f.Criteria) > 0 { - m.Set(low.CriteriaLabel, f.Criteria) - } - marshalExtensions(m, f.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/info.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/info.go deleted file mode 100644 index 387d8cc9583..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/info.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Info represents a high-level Arazzo Info Object. -// https://spec.openapis.org/arazzo/v1.0.1#info-object -type Info struct { - Title string `json:"title,omitempty" yaml:"title,omitempty"` - Summary string `json:"summary,omitempty" yaml:"summary,omitempty"` - Description string `json:"description,omitempty" yaml:"description,omitempty"` - Version string `json:"version,omitempty" yaml:"version,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.Info -} - -// NewInfo creates a new high-level Info instance from a low-level one. -func NewInfo(info *low.Info) *Info { - i := new(Info) - i.low = info - if !info.Title.IsEmpty() { - i.Title = info.Title.Value - } - if !info.Summary.IsEmpty() { - i.Summary = info.Summary.Value - } - if !info.Description.IsEmpty() { - i.Description = info.Description.Value - } - if !info.Version.IsEmpty() { - i.Version = info.Version.Value - } - i.Extensions = high.ExtractExtensions(info.Extensions) - return i -} - -// GoLow returns the low-level Info instance used to create the high-level one. -func (i *Info) GoLow() *low.Info { - return i.low -} - -// GoLowUntyped returns the low-level Info instance with no type. -func (i *Info) GoLowUntyped() any { - return i.low -} - -// Render returns a YAML representation of the Info object as a byte slice. -func (i *Info) Render() ([]byte, error) { - return yaml.Marshal(i) -} - -// MarshalYAML creates a ready to render YAML representation of the Info object. -func (i *Info) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if i.Title != "" { - m.Set(low.TitleLabel, i.Title) - } - if i.Summary != "" { - m.Set(low.SummaryLabel, i.Summary) - } - if i.Description != "" { - m.Set(low.DescriptionLabel, i.Description) - } - if i.Version != "" { - m.Set(low.VersionLabel, i.Version) - } - marshalExtensions(m, i.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/marshal_helpers.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/marshal_helpers.go deleted file mode 100644 index cefc627553e..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/marshal_helpers.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// marshalExtensions appends extension key-value pairs from ext into the ordered map m. -func marshalExtensions(m *orderedmap.Map[string, any], ext *orderedmap.Map[string, *yaml.Node]) { - if ext == nil { - return - } - for pair := ext.First(); pair != nil; pair = pair.Next() { - m.Set(pair.Key(), pair.Value()) - } -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/parameter.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/parameter.go deleted file mode 100644 index a398a5c990d..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/parameter.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Parameter represents a high-level Arazzo Parameter Object. -// A parameter can be a full parameter definition or a Reusable Object with a $components reference. -// https://spec.openapis.org/arazzo/v1.0.1#parameter-object -type Parameter struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - In string `json:"in,omitempty" yaml:"in,omitempty"` - Value *yaml.Node `json:"value,omitempty" yaml:"value,omitempty"` - Reference string `json:"reference,omitempty" yaml:"reference,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.Parameter -} - -// IsReusable returns true if this parameter is a Reusable Object (has a reference field). -func (p *Parameter) IsReusable() bool { - return p.Reference != "" -} - -// NewParameter creates a new high-level Parameter instance from a low-level one. -func NewParameter(param *low.Parameter) *Parameter { - p := new(Parameter) - p.low = param - if !param.Name.IsEmpty() { - p.Name = param.Name.Value - } - if !param.In.IsEmpty() { - p.In = param.In.Value - } - if !param.Value.IsEmpty() { - p.Value = param.Value.Value - } - if !param.ComponentRef.IsEmpty() { - p.Reference = param.ComponentRef.Value - } - p.Extensions = high.ExtractExtensions(param.Extensions) - return p -} - -// GoLow returns the low-level Parameter instance used to create the high-level one. -func (p *Parameter) GoLow() *low.Parameter { - return p.low -} - -// GoLowUntyped returns the low-level Parameter instance with no type. -func (p *Parameter) GoLowUntyped() any { - return p.low -} - -// Render returns a YAML representation of the Parameter object as a byte slice. -func (p *Parameter) Render() ([]byte, error) { - return yaml.Marshal(p) -} - -// MarshalYAML creates a ready to render YAML representation of the Parameter object. -func (p *Parameter) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if p.Reference != "" { - m.Set(low.ReferenceLabel, p.Reference) - if p.Value != nil { - m.Set(low.ValueLabel, p.Value) - } - return m, nil - } - if p.Name != "" { - m.Set(low.NameLabel, p.Name) - } - if p.In != "" { - m.Set(low.InLabel, p.In) - } - if p.Value != nil { - m.Set(low.ValueLabel, p.Value) - } - marshalExtensions(m, p.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/payload_replacement.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/payload_replacement.go deleted file mode 100644 index a168fc84529..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/payload_replacement.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// PayloadReplacement represents a high-level Arazzo Payload Replacement Object. -// https://spec.openapis.org/arazzo/v1.0.1#payload-replacement-object -type PayloadReplacement struct { - Target string `json:"target,omitempty" yaml:"target,omitempty"` - Value *yaml.Node `json:"value,omitempty" yaml:"value,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.PayloadReplacement -} - -// NewPayloadReplacement creates a new high-level PayloadReplacement instance from a low-level one. -func NewPayloadReplacement(pr *low.PayloadReplacement) *PayloadReplacement { - p := new(PayloadReplacement) - p.low = pr - if !pr.Target.IsEmpty() { - p.Target = pr.Target.Value - } - if !pr.Value.IsEmpty() { - p.Value = pr.Value.Value - } - p.Extensions = high.ExtractExtensions(pr.Extensions) - return p -} - -// GoLow returns the low-level PayloadReplacement instance used to create the high-level one. -func (p *PayloadReplacement) GoLow() *low.PayloadReplacement { - return p.low -} - -// GoLowUntyped returns the low-level PayloadReplacement instance with no type. -func (p *PayloadReplacement) GoLowUntyped() any { - return p.low -} - -// Render returns a YAML representation of the PayloadReplacement object as a byte slice. -func (p *PayloadReplacement) Render() ([]byte, error) { - return yaml.Marshal(p) -} - -// MarshalYAML creates a ready to render YAML representation of the PayloadReplacement object. -func (p *PayloadReplacement) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if p.Target != "" { - m.Set(low.TargetLabel, p.Target) - } - if p.Value != nil { - m.Set(low.ValueLabel, p.Value) - } - marshalExtensions(m, p.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/request_body.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/request_body.go deleted file mode 100644 index 712069cee40..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/request_body.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// RequestBody represents a high-level Arazzo Request Body Object. -// https://spec.openapis.org/arazzo/v1.0.1#request-body-object -type RequestBody struct { - ContentType string `json:"contentType,omitempty" yaml:"contentType,omitempty"` - Payload *yaml.Node `json:"payload,omitempty" yaml:"payload,omitempty"` - Replacements []*PayloadReplacement `json:"replacements,omitempty" yaml:"replacements,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.RequestBody -} - -// NewRequestBody creates a new high-level RequestBody instance from a low-level one. -func NewRequestBody(rb *low.RequestBody) *RequestBody { - r := new(RequestBody) - r.low = rb - if !rb.ContentType.IsEmpty() { - r.ContentType = rb.ContentType.Value - } - if !rb.Payload.IsEmpty() { - r.Payload = rb.Payload.Value - } - if !rb.Replacements.IsEmpty() { - r.Replacements = buildSlice(rb.Replacements.Value, NewPayloadReplacement) - } - r.Extensions = high.ExtractExtensions(rb.Extensions) - return r -} - -// GoLow returns the low-level RequestBody instance used to create the high-level one. -func (r *RequestBody) GoLow() *low.RequestBody { - return r.low -} - -// GoLowUntyped returns the low-level RequestBody instance with no type. -func (r *RequestBody) GoLowUntyped() any { - return r.low -} - -// Render returns a YAML representation of the RequestBody object as a byte slice. -func (r *RequestBody) Render() ([]byte, error) { - return yaml.Marshal(r) -} - -// MarshalYAML creates a ready to render YAML representation of the RequestBody object. -func (r *RequestBody) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if r.ContentType != "" { - m.Set(low.ContentTypeLabel, r.ContentType) - } - if r.Payload != nil { - m.Set(low.PayloadLabel, r.Payload) - } - if len(r.Replacements) > 0 { - m.Set(low.ReplacementsLabel, r.Replacements) - } - marshalExtensions(m, r.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/source_description.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/source_description.go deleted file mode 100644 index c8c40e7ff49..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/source_description.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// SourceDescription represents a high-level Arazzo Source Description Object. -// https://spec.openapis.org/arazzo/v1.0.1#source-description-object -type SourceDescription struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - URL string `json:"url,omitempty" yaml:"url,omitempty"` - Type string `json:"type,omitempty" yaml:"type,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.SourceDescription -} - -// NewSourceDescription creates a new high-level SourceDescription instance from a low-level one. -func NewSourceDescription(sd *low.SourceDescription) *SourceDescription { - s := new(SourceDescription) - s.low = sd - if !sd.Name.IsEmpty() { - s.Name = sd.Name.Value - } - if !sd.URL.IsEmpty() { - s.URL = sd.URL.Value - } - if !sd.Type.IsEmpty() { - s.Type = sd.Type.Value - } - s.Extensions = high.ExtractExtensions(sd.Extensions) - return s -} - -// GoLow returns the low-level SourceDescription instance used to create the high-level one. -func (s *SourceDescription) GoLow() *low.SourceDescription { - return s.low -} - -// GoLowUntyped returns the low-level SourceDescription instance with no type. -func (s *SourceDescription) GoLowUntyped() any { - return s.low -} - -// Render returns a YAML representation of the SourceDescription object as a byte slice. -func (s *SourceDescription) Render() ([]byte, error) { - return yaml.Marshal(s) -} - -// MarshalYAML creates a ready to render YAML representation of the SourceDescription object. -func (s *SourceDescription) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if s.Name != "" { - m.Set(low.NameLabel, s.Name) - } - if s.URL != "" { - m.Set(low.URLLabel, s.URL) - } - if s.Type != "" { - m.Set(low.TypeLabel, s.Type) - } - marshalExtensions(m, s.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/step.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/step.go deleted file mode 100644 index a6745cbf16c..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/step.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - lowmodel "github.com/pb33f/libopenapi/datamodel/low" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Step represents a high-level Arazzo Step Object. -// https://spec.openapis.org/arazzo/v1.0.1#step-object -type Step struct { - StepId string `json:"stepId,omitempty" yaml:"stepId,omitempty"` - Description string `json:"description,omitempty" yaml:"description,omitempty"` - OperationId string `json:"operationId,omitempty" yaml:"operationId,omitempty"` - OperationPath string `json:"operationPath,omitempty" yaml:"operationPath,omitempty"` - WorkflowId string `json:"workflowId,omitempty" yaml:"workflowId,omitempty"` - Parameters []*Parameter `json:"parameters,omitempty" yaml:"parameters,omitempty"` - RequestBody *RequestBody `json:"requestBody,omitempty" yaml:"requestBody,omitempty"` - SuccessCriteria []*Criterion `json:"successCriteria,omitempty" yaml:"successCriteria,omitempty"` - OnSuccess []*SuccessAction `json:"onSuccess,omitempty" yaml:"onSuccess,omitempty"` - OnFailure []*FailureAction `json:"onFailure,omitempty" yaml:"onFailure,omitempty"` - Outputs *orderedmap.Map[string, string] `json:"outputs,omitempty" yaml:"outputs,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.Step -} - -// NewStep creates a new high-level Step instance from a low-level one. -func NewStep(step *low.Step) *Step { - s := new(Step) - s.low = step - if !step.StepId.IsEmpty() { - s.StepId = step.StepId.Value - } - if !step.Description.IsEmpty() { - s.Description = step.Description.Value - } - if !step.OperationId.IsEmpty() { - s.OperationId = step.OperationId.Value - } - if !step.OperationPath.IsEmpty() { - s.OperationPath = step.OperationPath.Value - } - if !step.WorkflowId.IsEmpty() { - s.WorkflowId = step.WorkflowId.Value - } - if !step.Parameters.IsEmpty() { - s.Parameters = buildSlice(step.Parameters.Value, NewParameter) - } - if !step.RequestBody.IsEmpty() { - s.RequestBody = NewRequestBody(step.RequestBody.Value) - } - if !step.SuccessCriteria.IsEmpty() { - s.SuccessCriteria = buildSlice(step.SuccessCriteria.Value, NewCriterion) - } - if !step.OnSuccess.IsEmpty() { - s.OnSuccess = buildSlice(step.OnSuccess.Value, NewSuccessAction) - } - if !step.OnFailure.IsEmpty() { - s.OnFailure = buildSlice(step.OnFailure.Value, NewFailureAction) - } - if !step.Outputs.IsEmpty() { - s.Outputs = lowmodel.FromReferenceMap[string, string](step.Outputs.Value) - } - s.Extensions = high.ExtractExtensions(step.Extensions) - return s -} - -// GoLow returns the low-level Step instance used to create the high-level one. -func (s *Step) GoLow() *low.Step { - return s.low -} - -// GoLowUntyped returns the low-level Step instance with no type. -func (s *Step) GoLowUntyped() any { - return s.low -} - -// Render returns a YAML representation of the Step object as a byte slice. -func (s *Step) Render() ([]byte, error) { - return yaml.Marshal(s) -} - -// MarshalYAML creates a ready to render YAML representation of the Step object. -func (s *Step) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if s.StepId != "" { - m.Set(low.StepIdLabel, s.StepId) - } - if s.Description != "" { - m.Set(low.DescriptionLabel, s.Description) - } - if s.OperationId != "" { - m.Set(low.OperationIdLabel, s.OperationId) - } - if s.OperationPath != "" { - m.Set(low.OperationPathLabel, s.OperationPath) - } - if s.WorkflowId != "" { - m.Set(low.WorkflowIdLabel, s.WorkflowId) - } - if len(s.Parameters) > 0 { - m.Set(low.ParametersLabel, s.Parameters) - } - if s.RequestBody != nil { - m.Set(low.RequestBodyLabel, s.RequestBody) - } - if len(s.SuccessCriteria) > 0 { - m.Set(low.SuccessCriteriaLabel, s.SuccessCriteria) - } - if len(s.OnSuccess) > 0 { - m.Set(low.OnSuccessLabel, s.OnSuccess) - } - if len(s.OnFailure) > 0 { - m.Set(low.OnFailureLabel, s.OnFailure) - } - if s.Outputs != nil && s.Outputs.Len() > 0 { - m.Set(low.OutputsLabel, s.Outputs) - } - marshalExtensions(m, s.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/success_action.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/success_action.go deleted file mode 100644 index 9c50798d22e..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/success_action.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// SuccessAction represents a high-level Arazzo Success Action Object. -// A success action can be a full definition or a Reusable Object with a $components reference. -// https://spec.openapis.org/arazzo/v1.0.1#success-action-object -type SuccessAction struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"type,omitempty" yaml:"type,omitempty"` - WorkflowId string `json:"workflowId,omitempty" yaml:"workflowId,omitempty"` - StepId string `json:"stepId,omitempty" yaml:"stepId,omitempty"` - Criteria []*Criterion `json:"criteria,omitempty" yaml:"criteria,omitempty"` - Reference string `json:"reference,omitempty" yaml:"reference,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.SuccessAction -} - -// IsReusable returns true if this success action is a Reusable Object (has a reference field). -func (s *SuccessAction) IsReusable() bool { - return s.Reference != "" -} - -// NewSuccessAction creates a new high-level SuccessAction instance from a low-level one. -func NewSuccessAction(sa *low.SuccessAction) *SuccessAction { - s := new(SuccessAction) - s.low = sa - if !sa.Name.IsEmpty() { - s.Name = sa.Name.Value - } - if !sa.Type.IsEmpty() { - s.Type = sa.Type.Value - } - if !sa.WorkflowId.IsEmpty() { - s.WorkflowId = sa.WorkflowId.Value - } - if !sa.StepId.IsEmpty() { - s.StepId = sa.StepId.Value - } - if !sa.ComponentRef.IsEmpty() { - s.Reference = sa.ComponentRef.Value - } - if !sa.Criteria.IsEmpty() { - s.Criteria = buildSlice(sa.Criteria.Value, NewCriterion) - } - s.Extensions = high.ExtractExtensions(sa.Extensions) - return s -} - -// GoLow returns the low-level SuccessAction instance used to create the high-level one. -func (s *SuccessAction) GoLow() *low.SuccessAction { - return s.low -} - -// GoLowUntyped returns the low-level SuccessAction instance with no type. -func (s *SuccessAction) GoLowUntyped() any { - return s.low -} - -// Render returns a YAML representation of the SuccessAction object as a byte slice. -func (s *SuccessAction) Render() ([]byte, error) { - return yaml.Marshal(s) -} - -// MarshalYAML creates a ready to render YAML representation of the SuccessAction object. -func (s *SuccessAction) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if s.Reference != "" { - m.Set(low.ReferenceLabel, s.Reference) - return m, nil - } - if s.Name != "" { - m.Set(low.NameLabel, s.Name) - } - if s.Type != "" { - m.Set(low.TypeLabel, s.Type) - } - if s.WorkflowId != "" { - m.Set(low.WorkflowIdLabel, s.WorkflowId) - } - if s.StepId != "" { - m.Set(low.StepIdLabel, s.StepId) - } - if len(s.Criteria) > 0 { - m.Set(low.CriteriaLabel, s.Criteria) - } - marshalExtensions(m, s.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/workflow.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/workflow.go deleted file mode 100644 index 277774e0377..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/arazzo/workflow.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - lowmodel "github.com/pb33f/libopenapi/datamodel/low" - low "github.com/pb33f/libopenapi/datamodel/low/arazzo" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Workflow represents a high-level Arazzo Workflow Object. -// https://spec.openapis.org/arazzo/v1.0.1#workflow-object -type Workflow struct { - WorkflowId string `json:"workflowId,omitempty" yaml:"workflowId,omitempty"` - Summary string `json:"summary,omitempty" yaml:"summary,omitempty"` - Description string `json:"description,omitempty" yaml:"description,omitempty"` - Inputs *yaml.Node `json:"inputs,omitempty" yaml:"inputs,omitempty"` - DependsOn []string `json:"dependsOn,omitempty" yaml:"dependsOn,omitempty"` - Steps []*Step `json:"steps,omitempty" yaml:"steps,omitempty"` - SuccessActions []*SuccessAction `json:"successActions,omitempty" yaml:"successActions,omitempty"` - FailureActions []*FailureAction `json:"failureActions,omitempty" yaml:"failureActions,omitempty"` - Outputs *orderedmap.Map[string, string] `json:"outputs,omitempty" yaml:"outputs,omitempty"` - Parameters []*Parameter `json:"parameters,omitempty" yaml:"parameters,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.Workflow -} - -// NewWorkflow creates a new high-level Workflow instance from a low-level one. -func NewWorkflow(wf *low.Workflow) *Workflow { - w := new(Workflow) - w.low = wf - if !wf.WorkflowId.IsEmpty() { - w.WorkflowId = wf.WorkflowId.Value - } - if !wf.Summary.IsEmpty() { - w.Summary = wf.Summary.Value - } - if !wf.Description.IsEmpty() { - w.Description = wf.Description.Value - } - if !wf.Inputs.IsEmpty() { - w.Inputs = wf.Inputs.Value - } - if !wf.DependsOn.IsEmpty() { - w.DependsOn = buildValueSlice(wf.DependsOn.Value) - } - if !wf.Steps.IsEmpty() { - w.Steps = buildSlice(wf.Steps.Value, NewStep) - } - if !wf.SuccessActions.IsEmpty() { - w.SuccessActions = buildSlice(wf.SuccessActions.Value, NewSuccessAction) - } - if !wf.FailureActions.IsEmpty() { - w.FailureActions = buildSlice(wf.FailureActions.Value, NewFailureAction) - } - if !wf.Outputs.IsEmpty() { - w.Outputs = lowmodel.FromReferenceMap[string, string](wf.Outputs.Value) - } - if !wf.Parameters.IsEmpty() { - w.Parameters = buildSlice(wf.Parameters.Value, NewParameter) - } - w.Extensions = high.ExtractExtensions(wf.Extensions) - return w -} - -// GoLow returns the low-level Workflow instance used to create the high-level one. -func (w *Workflow) GoLow() *low.Workflow { - return w.low -} - -// GoLowUntyped returns the low-level Workflow instance with no type. -func (w *Workflow) GoLowUntyped() any { - return w.low -} - -// Render returns a YAML representation of the Workflow object as a byte slice. -func (w *Workflow) Render() ([]byte, error) { - return yaml.Marshal(w) -} - -// MarshalYAML creates a ready to render YAML representation of the Workflow object. -func (w *Workflow) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if w.WorkflowId != "" { - m.Set(low.WorkflowIdLabel, w.WorkflowId) - } - if w.Summary != "" { - m.Set(low.SummaryLabel, w.Summary) - } - if w.Description != "" { - m.Set(low.DescriptionLabel, w.Description) - } - if w.Inputs != nil { - m.Set(low.InputsLabel, w.Inputs) - } - if len(w.DependsOn) > 0 { - m.Set(low.DependsOnLabel, w.DependsOn) - } - if len(w.Steps) > 0 { - m.Set(low.StepsLabel, w.Steps) - } - if len(w.SuccessActions) > 0 { - m.Set(low.SuccessActionsLabel, w.SuccessActions) - } - if len(w.FailureActions) > 0 { - m.Set(low.FailureActionsLabel, w.FailureActions) - } - if w.Outputs != nil && w.Outputs.Len() > 0 { - m.Set(low.OutputsLabel, w.Outputs) - } - if len(w.Parameters) > 0 { - m.Set(low.ParametersLabel, w.Parameters) - } - marshalExtensions(m, w.Extensions) - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/overlay/action.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/overlay/action.go deleted file mode 100644 index ce88026a152..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/overlay/action.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2022-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package overlay - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/overlay" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Action represents a high-level Overlay Action Object. -// https://spec.openapis.org/overlay/v1.1.0#action-object -type Action struct { - Target string `json:"target,omitempty" yaml:"target,omitempty"` - Description string `json:"description,omitempty" yaml:"description,omitempty"` - Update *yaml.Node `json:"update,omitempty" yaml:"update,omitempty"` - Remove bool `json:"remove,omitempty" yaml:"remove,omitempty"` - Copy string `json:"copy,omitempty" yaml:"copy,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.Action -} - -// NewAction creates a new high-level Action instance from a low-level one. -func NewAction(action *low.Action) *Action { - a := new(Action) - a.low = action - if !action.Target.IsEmpty() { - a.Target = action.Target.Value - } - if !action.Description.IsEmpty() { - a.Description = action.Description.Value - } - if !action.Update.IsEmpty() { - a.Update = action.Update.Value - } - if !action.Remove.IsEmpty() { - a.Remove = action.Remove.Value - } - if !action.Copy.IsEmpty() { - a.Copy = action.Copy.Value - } - a.Extensions = high.ExtractExtensions(action.Extensions) - return a -} - -// GoLow returns the low-level Action instance used to create the high-level one. -func (a *Action) GoLow() *low.Action { - return a.low -} - -// GoLowUntyped returns the low-level Action instance with no type. -func (a *Action) GoLowUntyped() any { - return a.low -} - -// Render returns a YAML representation of the Action object as a byte slice. -func (a *Action) Render() ([]byte, error) { - return yaml.Marshal(a) -} - -// MarshalYAML creates a ready to render YAML representation of the Action object. -func (a *Action) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if a.Target != "" { - m.Set(low.TargetLabel, a.Target) - } - if a.Description != "" { - m.Set(low.DescriptionLabel, a.Description) - } - if a.Copy != "" { - m.Set(low.CopyLabel, a.Copy) - } - if a.Update != nil { - m.Set(low.UpdateLabel, a.Update) - } - if a.Remove { - m.Set(low.RemoveLabel, a.Remove) - } - for pair := a.Extensions.First(); pair != nil; pair = pair.Next() { - m.Set(pair.Key(), pair.Value()) - } - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/overlay/info.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/overlay/info.go deleted file mode 100644 index dc3eeb5f333..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/overlay/info.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2022-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package overlay - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/overlay" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Info represents a high-level Overlay Info Object. -// https://spec.openapis.org/overlay/v1.1.0#info-object -type Info struct { - Title string `json:"title,omitempty" yaml:"title,omitempty"` - Version string `json:"version,omitempty" yaml:"version,omitempty"` - Description string `json:"description,omitempty" yaml:"description,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.Info -} - -// NewInfo creates a new high-level Info instance from a low-level one. -func NewInfo(info *low.Info) *Info { - i := new(Info) - i.low = info - if !info.Title.IsEmpty() { - i.Title = info.Title.Value - } - if !info.Version.IsEmpty() { - i.Version = info.Version.Value - } - if !info.Description.IsEmpty() { - i.Description = info.Description.Value - } - i.Extensions = high.ExtractExtensions(info.Extensions) - return i -} - -// GoLow returns the low-level Info instance used to create the high-level one. -func (i *Info) GoLow() *low.Info { - return i.low -} - -// GoLowUntyped returns the low-level Info instance with no type. -func (i *Info) GoLowUntyped() any { - return i.low -} - -// Render returns a YAML representation of the Info object as a byte slice. -func (i *Info) Render() ([]byte, error) { - return yaml.Marshal(i) -} - -// MarshalYAML creates a ready to render YAML representation of the Info object. -func (i *Info) MarshalYAML() (any, error) { - m := orderedmap.New[string, any]() - if i.Title != "" { - m.Set("title", i.Title) - } - if i.Version != "" { - m.Set("version", i.Version) - } - if i.Description != "" { - m.Set("description", i.Description) - } - for pair := i.Extensions.First(); pair != nil; pair = pair.Next() { - m.Set(pair.Key(), pair.Value()) - } - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/overlay/overlay.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/overlay/overlay.go deleted file mode 100644 index 7a6288a8ea9..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/overlay/overlay.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2022-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package overlay - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/overlay" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Overlay represents a high-level OpenAPI Overlay document. -// https://spec.openapis.org/overlay/v1.0.0 -type Overlay struct { - Overlay string `json:"overlay,omitempty" yaml:"overlay,omitempty"` - Info *Info `json:"info,omitempty" yaml:"info,omitempty"` - Extends string `json:"extends,omitempty" yaml:"extends,omitempty"` - Actions []*Action `json:"actions,omitempty" yaml:"actions,omitempty"` - Extensions *orderedmap.Map[string, *yaml.Node] `json:"-" yaml:"-"` - low *low.Overlay -} - -// NewOverlay creates a new high-level Overlay instance from a low-level one. -func NewOverlay(overlay *low.Overlay) *Overlay { - o := new(Overlay) - o.low = overlay - if !overlay.Overlay.IsEmpty() { - o.Overlay = overlay.Overlay.Value - } - if !overlay.Info.IsEmpty() { - o.Info = NewInfo(overlay.Info.Value) - } - if !overlay.Extends.IsEmpty() { - o.Extends = overlay.Extends.Value - } - if !overlay.Actions.IsEmpty() { - actions := make([]*Action, 0, len(overlay.Actions.Value)) - for _, action := range overlay.Actions.Value { - actions = append(actions, NewAction(action.Value)) - } - o.Actions = actions - } - o.Extensions = high.ExtractExtensions(overlay.Extensions) - return o -} - -// GoLow returns the low-level Overlay instance used to create the high-level one. -func (o *Overlay) GoLow() *low.Overlay { - return o.low -} - -// GoLowUntyped returns the low-level Overlay instance with no type. -func (o *Overlay) GoLowUntyped() any { - return o.low -} - -// Render returns a YAML representation of the Overlay object as a byte slice. -func (o *Overlay) Render() ([]byte, error) { - return yaml.Marshal(o) -} - -// MarshalYAML creates a ready to render YAML representation of the Overlay object. -func (o *Overlay) MarshalYAML() (interface{}, error) { - m := orderedmap.New[string, any]() - if o.Overlay != "" { - m.Set("overlay", o.Overlay) - } - if o.Info != nil { - m.Set("info", o.Info) - } - if o.Extends != "" { - m.Set("extends", o.Extends) - } - if len(o.Actions) > 0 { - m.Set("actions", o.Actions) - } - for pair := o.Extensions.First(); pair != nil; pair = pair.Next() { - m.Set(pair.Key(), pair.Value()) - } - return m, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/asyncresult.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/asyncresult.go deleted file mode 100644 index d9193d7f58f..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/asyncresult.go +++ /dev/null @@ -1,6 +0,0 @@ -package v2 - -type asyncResult[T any] struct { - key string - result T -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/definitions.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/definitions.go deleted file mode 100644 index 081b778cc2a..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/definitions.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel" - highbase "github.com/pb33f/libopenapi/datamodel/high/base" - lowmodel "github.com/pb33f/libopenapi/datamodel/low" - lowbase "github.com/pb33f/libopenapi/datamodel/low/base" - low "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" -) - -// Definitions is a high-level represents of a Swagger / OpenAPI 2 Definitions object, backed by a low-level one. -// -// An object to hold data types that can be consumed and produced by operations. These data types can be primitives, -// arrays or models. -// - https://swagger.io/specification/v2/#definitionsObject -type Definitions struct { - Definitions *orderedmap.Map[string, *highbase.SchemaProxy] - low *low.Definitions -} - -// NewDefinitions will create a new high-level instance of a Definition from a low-level one. -func NewDefinitions(definitions *low.Definitions) *Definitions { - rd := new(Definitions) - rd.low = definitions - defs := orderedmap.New[string, *highbase.SchemaProxy]() - translateFunc := func(pair orderedmap.Pair[lowmodel.KeyReference[string], lowmodel.ValueReference[*lowbase.SchemaProxy]]) (asyncResult[*highbase.SchemaProxy], error) { - return asyncResult[*highbase.SchemaProxy]{ - key: pair.Key().Value, - result: highbase.NewSchemaProxy(&lowmodel.NodeReference[*lowbase.SchemaProxy]{ - Value: pair.Value().Value, - }), - }, nil - } - resultFunc := func(value asyncResult[*highbase.SchemaProxy]) error { - defs.Set(value.key, value.result) - return nil - } - _ = datamodel.TranslateMapParallel(definitions.Schemas, translateFunc, resultFunc) - rd.Definitions = defs - return rd -} - -// GoLow returns the low-level Definitions object used to create the high-level one. -func (d *Definitions) GoLow() *low.Definitions { - return d.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/examples.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/examples.go deleted file mode 100644 index 131fbb6b086..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/examples.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - lowv2 "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Example represents a high-level Swagger / OpenAPI 2 Example object, backed by a low level one. -// Allows sharing examples for operation responses -// - https://swagger.io/specification/v2/#exampleObject -type Example struct { - Values *orderedmap.Map[string, *yaml.Node] - low *lowv2.Examples -} - -// NewExample creates a new high-level Example instance from a low-level one. -func NewExample(examples *lowv2.Examples) *Example { - e := new(Example) - e.low = examples - if orderedmap.Len(examples.Values) > 0 { - e.Values = low.FromReferenceMap(examples.Values) - } - return e -} - -// GoLow returns the low-level Example used to create the high-level one. -func (e *Example) GoLow() *lowv2.Examples { - return e.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/header.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/header.go deleted file mode 100644 index bb356aed568..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/header.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Header Represents a high-level Swagger / OpenAPI 2 Header object, backed by a low-level one. -// A Header is essentially identical to a Parameter, except it does not contain 'name' or 'in' properties. -// - https://swagger.io/specification/v2/#headerObject -type Header struct { - Type string - Format string - Description string - Items *Items - CollectionFormat string - Default any - Maximum int - ExclusiveMaximum bool - Minimum int - ExclusiveMinimum bool - MaxLength int - MinLength int - Pattern string - MaxItems int - MinItems int - UniqueItems bool - Enum []any - MultipleOf int - Extensions *orderedmap.Map[string, *yaml.Node] - low *low.Header -} - -// NewHeader will create a new high-level Swagger / OpenAPI 2 Header instance, from a low-level one. -func NewHeader(header *low.Header) *Header { - h := new(Header) - h.low = header - h.Extensions = high.ExtractExtensions(header.Extensions) - if !header.Type.IsEmpty() { - h.Type = header.Type.Value - } - if !header.Format.IsEmpty() { - h.Format = header.Type.Value - } - if !header.Description.IsEmpty() { - h.Description = header.Description.Value - } - if !header.Items.IsEmpty() { - h.Items = NewItems(header.Items.Value) - } - if !header.CollectionFormat.IsEmpty() { - h.CollectionFormat = header.CollectionFormat.Value - } - if !header.Default.IsEmpty() { - h.Default = header.Default.Value - } - if !header.Maximum.IsEmpty() { - h.Maximum = header.Maximum.Value - } - if !header.ExclusiveMaximum.IsEmpty() { - h.ExclusiveMaximum = header.ExclusiveMaximum.Value - } - if !header.Minimum.IsEmpty() { - h.Minimum = header.Minimum.Value - } - if !header.ExclusiveMinimum.Value { - h.ExclusiveMinimum = header.ExclusiveMinimum.Value - } - if !header.MaxLength.IsEmpty() { - h.MaxLength = header.MaxLength.Value - } - if !header.MinLength.IsEmpty() { - h.MinLength = header.MinLength.Value - } - if !header.Pattern.IsEmpty() { - h.Pattern = header.Pattern.Value - } - if !header.MinItems.IsEmpty() { - h.MinItems = header.MinItems.Value - } - if !header.MaxItems.IsEmpty() { - h.MaxItems = header.MaxItems.Value - } - if !header.UniqueItems.IsEmpty() { - h.UniqueItems = header.UniqueItems.IsEmpty() - } - if !header.Enum.IsEmpty() { - var enums []any - for e := range header.Enum.Value { - enums = append(enums, header.Enum.Value[e].Value) - } - h.Enum = enums - } - if !header.MultipleOf.IsEmpty() { - h.MultipleOf = header.MultipleOf.Value - } - return h -} - -// GoLow returns the low-level header used to create the high-level one. -func (h *Header) GoLow() *low.Header { - return h.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/items.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/items.go deleted file mode 100644 index 2b6e1eda2af..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/items.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - low "github.com/pb33f/libopenapi/datamodel/low/v2" - "go.yaml.in/yaml/v4" -) - -// Items is a high-level representation of a Swagger / OpenAPI 2 Items object, backed by a low level one. -// Items is a limited subset of JSON-Schema's items object. It is used by parameter definitions that are not -// located in "body" -// - https://swagger.io/specification/v2/#itemsObject -type Items struct { - Type string - Format string - CollectionFormat string - Items *Items - Default *yaml.Node - Maximum int - ExclusiveMaximum bool - Minimum int - ExclusiveMinimum bool - MaxLength int - MinLength int - Pattern string - MaxItems int - MinItems int - UniqueItems bool - Enum []*yaml.Node - MultipleOf int - low *low.Items -} - -// NewItems creates a new high-level Items instance from a low-level one. -func NewItems(items *low.Items) *Items { - i := new(Items) - i.low = items - if !items.Type.IsEmpty() { - i.Type = items.Type.Value - } - if !items.Format.IsEmpty() { - i.Format = items.Format.Value - } - if !items.Items.IsEmpty() { - i.Items = NewItems(items.Items.Value) - } - if !items.CollectionFormat.IsEmpty() { - i.CollectionFormat = items.CollectionFormat.Value - } - if !items.Default.IsEmpty() { - i.Default = items.Default.Value - } - if !items.Maximum.IsEmpty() { - i.Maximum = items.Maximum.Value - } - if !items.ExclusiveMaximum.IsEmpty() { - i.ExclusiveMaximum = items.ExclusiveMaximum.Value - } - if !items.Minimum.IsEmpty() { - i.Minimum = items.Minimum.Value - } - if !items.ExclusiveMinimum.IsEmpty() { - i.ExclusiveMinimum = items.ExclusiveMinimum.Value - } - if !items.MaxLength.IsEmpty() { - i.MaxLength = items.MaxLength.Value - } - if !items.MinLength.IsEmpty() { - i.MinLength = items.MinLength.Value - } - if !items.Pattern.IsEmpty() { - i.Pattern = items.Pattern.Value - } - if !items.MinItems.IsEmpty() { - i.MinItems = items.MinItems.Value - } - if !items.MaxItems.IsEmpty() { - i.MaxItems = items.MaxItems.Value - } - if !items.UniqueItems.IsEmpty() { - i.UniqueItems = items.UniqueItems.Value - } - if !items.Enum.IsEmpty() { - var enums []*yaml.Node - for e := range items.Enum.Value { - enums = append(enums, items.Enum.Value[e].Value) - } - i.Enum = enums - } - if !items.MultipleOf.IsEmpty() { - i.MultipleOf = items.MultipleOf.Value - } - return i -} - -// GoLow returns the low-level Items object that was used to create the high-level one. -func (i *Items) GoLow() *low.Items { - return i.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/operation.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/operation.go deleted file mode 100644 index dd2a157b03a..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/operation.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - "github.com/pb33f/libopenapi/datamodel/high/base" - low "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Operation represents a high-level Swagger / OpenAPI 2 Operation object, backed by a low-level one. -// It describes a single API operation on a path. -// - https://swagger.io/specification/v2/#operationObject -type Operation struct { - Tags []string - Summary string - Description string - ExternalDocs *base.ExternalDoc - OperationId string - Consumes []string - Produces []string - Parameters []*Parameter - Responses *Responses - Schemes []string - Deprecated bool - Security []*base.SecurityRequirement - Extensions *orderedmap.Map[string, *yaml.Node] - low *low.Operation -} - -// NewOperation creates a new high-level Operation instance from a low-level one. -func NewOperation(operation *low.Operation) *Operation { - o := new(Operation) - o.low = operation - o.Extensions = high.ExtractExtensions(operation.Extensions) - if !operation.Tags.IsEmpty() { - var tags []string - for t := range operation.Tags.Value { - tags = append(tags, operation.Tags.Value[t].Value) - } - o.Tags = tags - } - if !operation.Summary.IsEmpty() { - o.Summary = operation.Summary.Value - } - if !operation.Description.IsEmpty() { - o.Description = operation.Description.Value - } - if !operation.ExternalDocs.IsEmpty() { - o.ExternalDocs = base.NewExternalDoc(operation.ExternalDocs.Value) - } - if !operation.OperationId.IsEmpty() { - o.OperationId = operation.OperationId.Value - } - if !operation.Consumes.IsEmpty() { - var cons []string - for c := range operation.Consumes.Value { - cons = append(cons, operation.Consumes.Value[c].Value) - } - o.Consumes = cons - } - if !operation.Produces.IsEmpty() { - var prods []string - for p := range operation.Produces.Value { - prods = append(prods, operation.Produces.Value[p].Value) - } - o.Produces = prods - } - if !operation.Parameters.IsEmpty() { - var params []*Parameter - for p := range operation.Parameters.Value { - params = append(params, NewParameter(operation.Parameters.Value[p].Value)) - } - o.Parameters = params - } - if !operation.Responses.IsEmpty() { - o.Responses = NewResponses(operation.Responses.Value) - } - if !operation.Schemes.IsEmpty() { - var schemes []string - for s := range operation.Schemes.Value { - schemes = append(schemes, operation.Schemes.Value[s].Value) - } - o.Schemes = schemes - } - if !operation.Deprecated.IsEmpty() { - o.Deprecated = operation.Deprecated.Value - } - if !operation.Security.IsEmpty() { - var sec []*base.SecurityRequirement - for s := range operation.Security.Value { - sec = append(sec, base.NewSecurityRequirement(operation.Security.Value[s].Value)) - } - o.Security = sec - } - return o -} - -// GoLow returns the low-level operation used to create the high-level one. -func (o *Operation) GoLow() *low.Operation { - return o.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/parameter.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/parameter.go deleted file mode 100644 index c064f4b0d39..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/parameter.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - "github.com/pb33f/libopenapi/datamodel/high/base" - low "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Parameter represents a high-level Swagger / OpenAPI 2 Parameter object, backed by a low-level one. -// -// A unique parameter is defined by a combination of a name and location. -// -// There are five possible parameter types. -// -// Path -// -// Used together with Path Templating, where the parameter value is actually part of the operation's URL. -// This does not include the host or base path of the API. For example, in /items/{itemId}, the path parameter is itemId. -// -// Query -// -// Parameters that are appended to the URL. For example, in /items?id=###, the query parameter is id. -// -// Header -// -// Custom headers that are expected as part of the request. -// -// Body -// -// The payload that's appended to the HTTP request. Since there can only be one payload, there can only be one body parameter. -// The name of the body parameter has no effect on the parameter itself and is used for documentation purposes only. -// Since Form parameters are also in the payload, body and form parameters cannot exist together for the same operation. -// -// Form -// -// Used to describe the payload of an HTTP request when either application/x-www-form-urlencoded, multipart/form-data -// or both are used as the content type of the request (in Swagger's definition, the consumes property of an operation). -// This is the only parameter type that can be used to send files, thus supporting the file type. Since form parameters -// are sent in the payload, they cannot be declared together with a body parameter for the same operation. Form -// parameters have a different format based on the content-type used (for further details, -// consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4): -// application/x-www-form-urlencoded - Similar to the format of Query parameters but as a payload. For example, -// foo=1&bar=swagger - both foo and bar are form parameters. This is normally used for simple parameters that are -// being transferred. -// multipart/form-data - each parameter takes a section in the payload with an internal header. For example, for -// the header Content-Disposition: form-data; name="submit-name" the name of the parameter is -// submit-name. This type of form parameters is more commonly used for file transfers -// -// https://swagger.io/specification/v2/#parameterObject -type Parameter struct { - Name string - In string - Type string - Format string - Description string - Required *bool - AllowEmptyValue *bool - Schema *base.SchemaProxy - Items *Items - CollectionFormat string - Default *yaml.Node - Maximum *int - ExclusiveMaximum *bool - Minimum *int - ExclusiveMinimum *bool - MaxLength *int - MinLength *int - Pattern string - MaxItems *int - MinItems *int - UniqueItems *bool - Enum []*yaml.Node - MultipleOf *int - Extensions *orderedmap.Map[string, *yaml.Node] - low *low.Parameter -} - -// NewParameter creates a new high-level instance of a Parameter from a low-level one. -func NewParameter(parameter *low.Parameter) *Parameter { - p := new(Parameter) - p.low = parameter - p.Extensions = high.ExtractExtensions(parameter.Extensions) - if !parameter.Name.IsEmpty() { - p.Name = parameter.Name.Value - } - if !parameter.In.IsEmpty() { - p.In = parameter.In.Value - } - if !parameter.Type.IsEmpty() { - p.Type = parameter.Type.Value - } - if !parameter.Format.IsEmpty() { - p.Format = parameter.Format.Value - } - if !parameter.Description.IsEmpty() { - p.Description = parameter.Description.Value - } - if !parameter.Required.IsEmpty() { - p.Required = ¶meter.Required.Value - } - if !parameter.AllowEmptyValue.IsEmpty() { - p.AllowEmptyValue = ¶meter.AllowEmptyValue.Value - } - if !parameter.Schema.IsEmpty() { - p.Schema = base.NewSchemaProxy(¶meter.Schema) - } - if !parameter.Items.IsEmpty() { - p.Items = NewItems(parameter.Items.Value) - } - if !parameter.CollectionFormat.IsEmpty() { - p.CollectionFormat = parameter.CollectionFormat.Value - } - if !parameter.Default.IsEmpty() { - p.Default = parameter.Default.Value - } - if !parameter.Maximum.IsEmpty() { - p.Maximum = ¶meter.Maximum.Value - } - if !parameter.ExclusiveMaximum.IsEmpty() { - p.ExclusiveMaximum = ¶meter.ExclusiveMaximum.Value - } - if !parameter.Minimum.IsEmpty() { - p.Minimum = ¶meter.Minimum.Value - } - if !parameter.ExclusiveMinimum.IsEmpty() { - p.ExclusiveMinimum = ¶meter.ExclusiveMinimum.Value - } - if !parameter.MaxLength.IsEmpty() { - p.MaxLength = ¶meter.MaxLength.Value - } - if !parameter.MinLength.IsEmpty() { - p.MinLength = ¶meter.MinLength.Value - } - if !parameter.Pattern.IsEmpty() { - p.Pattern = parameter.Pattern.Value - } - if !parameter.MinItems.IsEmpty() { - p.MinItems = ¶meter.MinItems.Value - } - if !parameter.MaxItems.IsEmpty() { - p.MaxItems = ¶meter.MaxItems.Value - } - if !parameter.UniqueItems.IsEmpty() { - p.UniqueItems = ¶meter.UniqueItems.Value - } - if !parameter.Enum.IsEmpty() { - var enums []*yaml.Node - for e := range parameter.Enum.Value { - enums = append(enums, parameter.Enum.Value[e].Value) - } - p.Enum = enums - } - if !parameter.MultipleOf.IsEmpty() { - p.MultipleOf = ¶meter.MultipleOf.Value - } - return p -} - -// GoLow returns the low-level Parameter used to create the high-level one. -func (p *Parameter) GoLow() *low.Parameter { - return p.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/parameter_definitions.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/parameter_definitions.go deleted file mode 100644 index 7fede3822d2..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/parameter_definitions.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel" - lowmodel "github.com/pb33f/libopenapi/datamodel/low" - low "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" -) - -// ParameterDefinitions is a high-level representation of a Swagger / OpenAPI 2 Parameters Definitions object -// that is backed by a low-level one. -// -// ParameterDefinitions holds parameters to be reused across operations. Parameter definitions can be -// referenced to the ones defined here. It does not define global operation parameters -// - https://swagger.io/specification/v2/#parametersDefinitionsObject -type ParameterDefinitions struct { - Definitions *orderedmap.Map[string, *Parameter] - low *low.ParameterDefinitions -} - -// NewParametersDefinitions creates a new instance of a high-level ParameterDefinitions, from a low-level one. -// Every parameter is extracted asynchronously due to the potential depth -func NewParametersDefinitions(parametersDefinitions *low.ParameterDefinitions) *ParameterDefinitions { - pd := new(ParameterDefinitions) - pd.low = parametersDefinitions - params := orderedmap.New[string, *Parameter]() - translateFunc := func(pair orderedmap.Pair[lowmodel.KeyReference[string], lowmodel.ValueReference[*low.Parameter]]) (asyncResult[*Parameter], error) { - return asyncResult[*Parameter]{ - key: pair.Key().Value, - result: NewParameter(pair.Value().Value), - }, nil - } - resultFunc := func(value asyncResult[*Parameter]) error { - params.Set(value.key, value.result) - return nil - } - _ = datamodel.TranslateMapParallel(parametersDefinitions.Definitions, translateFunc, resultFunc) - pd.Definitions = params - return pd -} - -// GoLow returns the low-level ParameterDefinitions instance that backs the low-level one. -func (p *ParameterDefinitions) GoLow() *low.ParameterDefinitions { - return p.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/path_item.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/path_item.go deleted file mode 100644 index ee7b13b76c1..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/path_item.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "reflect" - "slices" - "sync" - - "github.com/pb33f/libopenapi/datamodel/high" - "github.com/pb33f/libopenapi/datamodel/low" - lowV2 "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// PathItem represents a high-level Swagger / OpenAPI 2 PathItem object backed by a low-level one. -// -// Describes the operations available on a single path. A Path Item may be empty, due to ACL constraints. -// The path itself is still exposed to the tooling, but will not know which operations and parameters -// are available. -// - https://swagger.io/specification/v2/#pathItemObject -type PathItem struct { - Ref string - Get *Operation - Put *Operation - Post *Operation - Delete *Operation - Options *Operation - Head *Operation - Patch *Operation - Parameters []*Parameter - Extensions *orderedmap.Map[string, *yaml.Node] - low *lowV2.PathItem -} - -// NewPathItem will create a new high-level PathItem from a low-level one. All paths are built out asynchronously. -func NewPathItem(pathItem *lowV2.PathItem) *PathItem { - p := new(PathItem) - p.low = pathItem - p.Extensions = high.ExtractExtensions(pathItem.Extensions) - if !pathItem.Parameters.IsEmpty() { - var params []*Parameter - for k := range pathItem.Parameters.Value { - params = append(params, NewParameter(pathItem.Parameters.Value[k].Value)) - } - p.Parameters = params - } - buildOperation := func(method string, op *lowV2.Operation) *Operation { - return NewOperation(op) - } - - var wg sync.WaitGroup - if !pathItem.Get.IsEmpty() { - wg.Add(1) - go func() { - p.Get = buildOperation(lowV2.GetLabel, pathItem.Get.Value) - wg.Done() - }() - } - if !pathItem.Put.IsEmpty() { - wg.Add(1) - go func() { - p.Put = buildOperation(lowV2.PutLabel, pathItem.Put.Value) - wg.Done() - }() - } - if !pathItem.Post.IsEmpty() { - wg.Add(1) - go func() { - p.Post = buildOperation(lowV2.PostLabel, pathItem.Post.Value) - wg.Done() - }() - } - if !pathItem.Patch.IsEmpty() { - wg.Add(1) - go func() { - p.Patch = buildOperation(lowV2.PatchLabel, pathItem.Patch.Value) - wg.Done() - }() - } - if !pathItem.Delete.IsEmpty() { - wg.Add(1) - go func() { - p.Delete = buildOperation(lowV2.DeleteLabel, pathItem.Delete.Value) - wg.Done() - }() - } - if !pathItem.Head.IsEmpty() { - wg.Add(1) - go func() { - p.Head = buildOperation(lowV2.HeadLabel, pathItem.Head.Value) - wg.Done() - }() - } - if !pathItem.Options.IsEmpty() { - wg.Add(1) - go func() { - p.Options = buildOperation(lowV2.OptionsLabel, pathItem.Options.Value) - wg.Done() - }() - } - wg.Wait() - return p -} - -// GoLow returns the low-level PathItem used to create the high-level one. -func (p *PathItem) GoLow() *lowV2.PathItem { - return p.low -} - -func (p *PathItem) GetOperations() *orderedmap.Map[string, *Operation] { - o := orderedmap.New[string, *Operation]() - - // TODO: this is a bit of a hack, but it works for now. We might just want to actually pull the data out of the document as a map and split it into the individual operations - - type op struct { - name string - op *Operation - line int - } - - getLine := func(field string, idx int) int { - if p.GoLow() == nil { - return idx - } - - l, ok := reflect.ValueOf(p.GoLow()).Elem().FieldByName(field).Interface().(low.NodeReference[*lowV2.Operation]) - if !ok || l.GetKeyNode() == nil { - return idx - } - - return l.GetKeyNode().Line - } - - ops := []op{} - - if p.Get != nil { - ops = append(ops, op{name: lowV2.GetLabel, op: p.Get, line: getLine("Get", -7)}) - } - if p.Put != nil { - ops = append(ops, op{name: lowV2.PutLabel, op: p.Put, line: getLine("Put", -6)}) - } - if p.Post != nil { - ops = append(ops, op{name: lowV2.PostLabel, op: p.Post, line: getLine("Post", -5)}) - } - if p.Delete != nil { - ops = append(ops, op{name: lowV2.DeleteLabel, op: p.Delete, line: getLine("Delete", -4)}) - } - if p.Options != nil { - ops = append(ops, op{name: lowV2.OptionsLabel, op: p.Options, line: getLine("Options", -3)}) - } - if p.Head != nil { - ops = append(ops, op{name: lowV2.HeadLabel, op: p.Head, line: getLine("Head", -2)}) - } - if p.Patch != nil { - ops = append(ops, op{name: lowV2.PatchLabel, op: p.Patch, line: getLine("Patch", -1)}) - } - - slices.SortStableFunc(ops, func(a op, b op) int { - return a.line - b.line - }) - - for _, op := range ops { - o.Set(op.name, op.op) - } - - return o -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/paths.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/paths.go deleted file mode 100644 index ed271d24c2c..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/paths.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel" - "github.com/pb33f/libopenapi/datamodel/high" - "github.com/pb33f/libopenapi/datamodel/low" - v2low "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Paths represents a high-level Swagger / OpenAPI Paths object, backed by a low-level one. -type Paths struct { - PathItems *orderedmap.Map[string, *PathItem] - Extensions *orderedmap.Map[string, *yaml.Node] - low *v2low.Paths -} - -// NewPaths creates a new high-level instance of Paths from a low-level one. -func NewPaths(paths *v2low.Paths) *Paths { - p := new(Paths) - p.low = paths - p.Extensions = high.ExtractExtensions(paths.Extensions) - pathItems := orderedmap.New[string, *PathItem]() - - translateFunc := func(pair orderedmap.Pair[low.KeyReference[string], low.ValueReference[*v2low.PathItem]]) (asyncResult[*PathItem], error) { - return asyncResult[*PathItem]{ - key: pair.Key().Value, - result: NewPathItem(pair.Value().Value), - }, nil - } - resultFunc := func(result asyncResult[*PathItem]) error { - pathItems.Set(result.key, result.result) - return nil - } - _ = datamodel.TranslateMapParallel[low.KeyReference[string], low.ValueReference[*v2low.PathItem], asyncResult[*PathItem]]( - paths.PathItems, translateFunc, resultFunc, - ) - p.PathItems = pathItems - return p -} - -// GoLow returns the low-level Paths instance that backs the high level one. -func (p *Paths) GoLow() *v2low.Paths { - return p.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/response.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/response.go deleted file mode 100644 index 9478c1e72aa..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/response.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - "github.com/pb33f/libopenapi/datamodel/high/base" - "github.com/pb33f/libopenapi/datamodel/low" - lowv2 "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Response is a representation of a high-level Swagger / OpenAPI 2 Response object, backed by a low-level one. -// Response describes a single response from an API Operation -// - https://swagger.io/specification/v2/#responseObject -type Response struct { - Description string - Schema *base.SchemaProxy - Headers *orderedmap.Map[string, *Header] - Examples *Example - Extensions *orderedmap.Map[string, *yaml.Node] - low *lowv2.Response -} - -// NewResponse creates a new high-level instance of Response from a low level one. -func NewResponse(response *lowv2.Response) *Response { - r := new(Response) - r.low = response - r.Extensions = high.ExtractExtensions(response.Extensions) - if !response.Description.IsEmpty() { - r.Description = response.Description.Value - } - if !response.Schema.IsEmpty() { - r.Schema = base.NewSchemaProxy(&response.Schema) - } - if !response.Headers.IsEmpty() { - r.Headers = low.FromReferenceMapWithFunc(response.Headers.Value, NewHeader) - } - if !response.Examples.IsEmpty() { - r.Examples = NewExample(response.Examples.Value) - } - return r -} - -// GoLow will return the low-level Response instance used to create the high level one. -func (r *Response) GoLow() *lowv2.Response { - return r.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/responses.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/responses.go deleted file mode 100644 index 0da1e063534..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/responses.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel" - "github.com/pb33f/libopenapi/datamodel/high" - lowmodel "github.com/pb33f/libopenapi/datamodel/low" - low "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Responses is a high-level representation of a Swagger / OpenAPI 2 Responses object, backed by a low level one. -type Responses struct { - Codes *orderedmap.Map[string, *Response] - Default *Response - Extensions *orderedmap.Map[string, *yaml.Node] - low *low.Responses -} - -// NewResponses will create a new high-level instance of Responses from a low-level one. -func NewResponses(responses *low.Responses) *Responses { - r := new(Responses) - r.low = responses - r.Extensions = high.ExtractExtensions(responses.Extensions) - - if !responses.Default.IsEmpty() { - r.Default = NewResponse(responses.Default.Value) - } - - if orderedmap.Len(responses.Codes) > 0 { - resp := orderedmap.New[string, *Response]() - translateFunc := func(pair orderedmap.Pair[lowmodel.KeyReference[string], lowmodel.ValueReference[*low.Response]]) (asyncResult[*Response], error) { - return asyncResult[*Response]{ - key: pair.Key().Value, - result: NewResponse(pair.Value().Value), - }, nil - } - resultFunc := func(value asyncResult[*Response]) error { - resp.Set(value.key, value.result) - return nil - } - _ = datamodel.TranslateMapParallel(responses.Codes, translateFunc, resultFunc) - r.Codes = resp - } - - return r -} - -// GoLow will return the low-level object used to create the high-level one. -func (r *Responses) GoLow() *low.Responses { - return r.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/responses_definitions.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/responses_definitions.go deleted file mode 100644 index 141b7be56dc..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/responses_definitions.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel" - lowmodel "github.com/pb33f/libopenapi/datamodel/low" - low "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" -) - -// ResponsesDefinitions is a high-level representation of a Swagger / OpenAPI 2 Responses Definitions object. -// that is backed by a low-level one. -// -// ResponsesDefinitions is an object to hold responses to be reused across operations. Response definitions can be -// referenced to the ones defined here. It does not define global operation responses -// - https://swagger.io/specification/v2/#responsesDefinitionsObject -type ResponsesDefinitions struct { - Definitions *orderedmap.Map[string, *Response] - low *low.ResponsesDefinitions -} - -// NewResponsesDefinitions will create a new high-level instance of ResponsesDefinitions from a low-level one. -func NewResponsesDefinitions(responsesDefinitions *low.ResponsesDefinitions) *ResponsesDefinitions { - rd := new(ResponsesDefinitions) - rd.low = responsesDefinitions - responses := orderedmap.New[string, *Response]() - translateFunc := func(pair orderedmap.Pair[lowmodel.KeyReference[string], lowmodel.ValueReference[*low.Response]]) (asyncResult[*Response], error) { - return asyncResult[*Response]{ - key: pair.Key().Value, - result: NewResponse(pair.Value().Value), - }, nil - } - resultFunc := func(value asyncResult[*Response]) error { - responses.Set(value.key, value.result) - return nil - } - - _ = datamodel.TranslateMapParallel(responsesDefinitions.Definitions, translateFunc, resultFunc) - rd.Definitions = responses - return rd -} - -// GoLow returns the low-level ResponsesDefinitions used to create the high-level one. -func (r *ResponsesDefinitions) GoLow() *low.ResponsesDefinitions { - return r.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/scopes.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/scopes.go deleted file mode 100644 index 52ec296cb4e..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/scopes.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - lowv2 "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" -) - -// Scopes is a high-level representation of a Swagger / OpenAPI 2 OAuth2 Scopes object, that is backed by a low-level one. -// -// Scopes lists the available scopes for an OAuth2 security scheme. -// - https://swagger.io/specification/v2/#scopesObject -type Scopes struct { - Values *orderedmap.Map[string, string] - low *lowv2.Scopes -} - -// NewScopes creates a new high-level instance of Scopes from a low-level one. -func NewScopes(scopes *lowv2.Scopes) *Scopes { - s := new(Scopes) - s.low = scopes - s.Values = low.FromReferenceMap(scopes.Values) - return s -} - -// GoLow returns the low-level instance of Scopes used to create the high-level one. -func (s *Scopes) GoLow() *lowv2.Scopes { - return s.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/security_definitions.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/security_definitions.go deleted file mode 100644 index 67ad0697ce0..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/security_definitions.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel" - lowmodel "github.com/pb33f/libopenapi/datamodel/low" - low "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" -) - -// SecurityDefinitions is a high-level representation of a Swagger / OpenAPI 2 Security Definitions object, that -// is backed by a low-level one. -// -// A declaration of the security schemes available to be used in the specification. This does not enforce the security -// schemes on the operations and only serves to provide the relevant details for each scheme -// - https://swagger.io/specification/v2/#securityDefinitionsObject -type SecurityDefinitions struct { - Definitions *orderedmap.Map[string, *SecurityScheme] - low *low.SecurityDefinitions -} - -// NewSecurityDefinitions creates a new high-level instance of a SecurityDefinitions from a low-level one. -func NewSecurityDefinitions(definitions *low.SecurityDefinitions) *SecurityDefinitions { - sd := new(SecurityDefinitions) - sd.low = definitions - schemes := orderedmap.New[string, *SecurityScheme]() - translateFunc := func(pair orderedmap.Pair[lowmodel.KeyReference[string], lowmodel.ValueReference[*low.SecurityScheme]]) (asyncResult[*SecurityScheme], error) { - return asyncResult[*SecurityScheme]{ - key: pair.Key().Value, - result: NewSecurityScheme(pair.Value().Value), - }, nil - } - resultFunc := func(value asyncResult[*SecurityScheme]) error { - schemes.Set(value.key, value.result) - return nil - } - _ = datamodel.TranslateMapParallel(definitions.Definitions, translateFunc, resultFunc) - - sd.Definitions = schemes - return sd -} - -// GoLow returns the low-level SecurityDefinitions instance used to create the high-level one. -func (sd *SecurityDefinitions) GoLow() *low.SecurityDefinitions { - return sd.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/security_scheme.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/security_scheme.go deleted file mode 100644 index 55606f3757d..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/security_scheme.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - low "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// SecurityScheme is a high-level representation of a Swagger / OpenAPI 2 SecurityScheme object -// backed by a low-level one. -// -// SecurityScheme allows the definition of a security scheme that can be used by the operations. Supported schemes are -// basic authentication, an API key (either as a header or as a query parameter) and OAuth2's common flows -// (implicit, password, application and access code) -// - https://swagger.io/specification/v2/#securityDefinitionsObject -type SecurityScheme struct { - Type string - Description string - Name string - In string - Flow string - AuthorizationUrl string - TokenUrl string - Scopes *Scopes - Extensions *orderedmap.Map[string, *yaml.Node] - low *low.SecurityScheme -} - -// NewSecurityScheme creates a new instance of SecurityScheme from a low-level one. -func NewSecurityScheme(securityScheme *low.SecurityScheme) *SecurityScheme { - s := new(SecurityScheme) - s.low = securityScheme - s.Extensions = high.ExtractExtensions(securityScheme.Extensions) - if !securityScheme.Type.IsEmpty() { - s.Type = securityScheme.Type.Value - } - if !securityScheme.Description.IsEmpty() { - s.Description = securityScheme.Description.Value - } - if !securityScheme.Name.IsEmpty() { - s.Name = securityScheme.Name.Value - } - if !securityScheme.In.IsEmpty() { - s.In = securityScheme.In.Value - } - if !securityScheme.Flow.IsEmpty() { - s.Flow = securityScheme.Flow.Value - } - if !securityScheme.AuthorizationUrl.IsEmpty() { - s.AuthorizationUrl = securityScheme.AuthorizationUrl.Value - } - if !securityScheme.TokenUrl.IsEmpty() { - s.TokenUrl = securityScheme.TokenUrl.Value - } - if !securityScheme.Scopes.IsEmpty() { - s.Scopes = NewScopes(securityScheme.Scopes.Value) - } - return s -} - -// GoLow returns the low-level SecurityScheme that was used to create the high-level one. -func (s *SecurityScheme) GoLow() *low.SecurityScheme { - return s.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/swagger.go b/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/swagger.go deleted file mode 100644 index bd06ae4f0fd..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/high/v2/swagger.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package v2 represents all Swagger / OpenAPI 2 high-level models. High-level models are easy to navigate -// and simple to extract what ever is required from a specification. -// -// High-level models are backed by low-level ones. There is a 'GoLow()' method available on every high level -// object. 'Going Low' allows engineers to transition from a high-level or 'porcelain' API, to a low-level 'plumbing' -// API, which provides fine grain detail to the underlying AST powering the data, lines, columns, raw nodes etc. -// -// IMPORTANT: As a general rule, Swagger / OpenAPI 2 should be avoided for new projects. -// VERY IMPORTANT: pb33f is no longer maintaining the v2 model. It's a commercial product (Swagger) by a company (SmartBear) and not OpenAPI. -// PLEASE DO NOT USE THIS MODEL UNLESS YOU HAVE TO. IT'S HERE FOR LEGACY SUPPORT ONLY. Upgrade to 3x! -package v2 - -import ( - "github.com/pb33f/libopenapi/datamodel/high" - "github.com/pb33f/libopenapi/datamodel/high/base" - low "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Swagger represents a high-level Swagger / OpenAPI 2 document. An instance of Swagger is the root of the specification. -type Swagger struct { - // Swagger is the version of Swagger / OpenAPI being used, extracted from the 'swagger: 2.x' definition. - Swagger string - - // Info represents a specification Info definition. - // Provides metadata about the API. The metadata can be used by the clients if needed. - // - https://swagger.io/specification/v2/#infoObject - Info *base.Info - - // Host is The host (name or ip) serving the API. This MUST be the host only and does not include the scheme nor - // sub-paths. It MAY include a port. If the host is not included, the host serving the documentation is to be used - // (including the port). The host does not support path templating. - Host string - - // BasePath is The base path on which the API is served, which is relative to the host. If it is not included, the API is - // served directly under the host. The value MUST start with a leading slash (/). - // The basePath does not support path templating. - BasePath string - - // Schemes represents the transfer protocol of the API. Requirements MUST be from the list: "http", "https", "ws", "wss". - // If the schemes is not included, the default scheme to be used is the one used to access - // the Swagger definition itself. - Schemes []string - - // Consumes is a list of MIME types the APIs can consume. This is global to all APIs but can be overridden on - // specific API calls. Value MUST be as described under Mime Types. - Consumes []string - - // Produces is a list of MIME types the APIs can produce. This is global to all APIs but can be overridden on - // specific API calls. Value MUST be as described under Mime Types. - Produces []string - - // Paths are the paths and operations for the API. Perhaps the most important part of the specification. - // - https://swagger.io/specification/v2/#pathsObject - Paths *Paths - - // Definitions is an object to hold data types produced and consumed by operations. It's composed of Schema instances - // - https://swagger.io/specification/v2/#definitionsObject - Definitions *Definitions - - // Parameters is an object to hold parameters that can be used across operations. - // This property does not define global parameters for all operations. - // - https://swagger.io/specification/v2/#parametersDefinitionsObject - Parameters *ParameterDefinitions - - // Responses is an object to hold responses that can be used across operations. - // This property does not define global responses for all operations. - // - https://swagger.io/specification/v2/#responsesDefinitionsObject - Responses *ResponsesDefinitions - - // SecurityDefinitions represents security scheme definitions that can be used across the specification. - // - https://swagger.io/specification/v2/#securityDefinitionsObject - SecurityDefinitions *SecurityDefinitions - - // Security is a declaration of which security schemes are applied for the API as a whole. The list of values - // describes alternative security schemes that can be used (that is, there is a logical OR between the security - // requirements). Individual operations can override this definition. - // - https://swagger.io/specification/v2/#securityRequirementObject - Security []*base.SecurityRequirement - - // Tags are A list of tags used by the specification with additional metadata. - // The order of the tags can be used to reflect on their order by the parsing tools. Not all tags that are used - // by the Operation Object must be declared. The tags that are not declared may be organized randomly or based - // on the tools' logic. Each tag name in the list MUST be unique. - // - https://swagger.io/specification/v2/#tagObject - Tags []*base.Tag - - // ExternalDocs is an instance of base.ExternalDoc for.. well, obvious really, innit. - ExternalDocs *base.ExternalDoc - - // Extensions contains all custom extensions defined for the top-level document. - Extensions *orderedmap.Map[string, *yaml.Node] - low *low.Swagger -} - -// NewSwaggerDocument will create a new high-level Swagger document from a low-level one. -func NewSwaggerDocument(document *low.Swagger) *Swagger { - d := new(Swagger) - d.low = document - d.Extensions = high.ExtractExtensions(document.Extensions) - if !document.Info.IsEmpty() { - d.Info = base.NewInfo(document.Info.Value) - } - if !document.Swagger.IsEmpty() { - d.Swagger = document.Swagger.Value - } - if !document.Host.IsEmpty() { - d.Host = document.Host.Value - } - if !document.BasePath.IsEmpty() { - d.BasePath = document.BasePath.Value - } - - if !document.Schemes.IsEmpty() { - var schemes []string - for s := range document.Schemes.Value { - schemes = append(schemes, document.Schemes.Value[s].Value) - } - d.Schemes = schemes - } - if !document.Consumes.IsEmpty() { - var consumes []string - for c := range document.Consumes.Value { - consumes = append(consumes, document.Consumes.Value[c].Value) - } - d.Consumes = consumes - } - if !document.Produces.IsEmpty() { - var produces []string - for p := range document.Produces.Value { - produces = append(produces, document.Produces.Value[p].Value) - } - d.Produces = produces - } - if !document.Paths.IsEmpty() { - d.Paths = NewPaths(document.Paths.Value) - } - if !document.Definitions.IsEmpty() { - d.Definitions = NewDefinitions(document.Definitions.Value) - } - if !document.Parameters.IsEmpty() { - d.Parameters = NewParametersDefinitions(document.Parameters.Value) - } - - if !document.Responses.IsEmpty() { - d.Responses = NewResponsesDefinitions(document.Responses.Value) - } - if !document.SecurityDefinitions.IsEmpty() { - d.SecurityDefinitions = NewSecurityDefinitions(document.SecurityDefinitions.Value) - } - if !document.Security.IsEmpty() { - var security []*base.SecurityRequirement - for s := range document.Security.Value { - security = append(security, base.NewSecurityRequirement(document.Security.Value[s].Value)) - } - d.Security = security - } - if !document.Tags.IsEmpty() { - var tags []*base.Tag - for t := range document.Tags.Value { - tags = append(tags, base.NewTag(document.Tags.Value[t].Value)) - } - d.Tags = tags - } - if !document.ExternalDocs.IsEmpty() { - d.ExternalDocs = base.NewExternalDoc(document.ExternalDocs.Value) - } - return d -} - -// GoLow returns the low-level Swagger instance that was used to create the high-level one. -func (s *Swagger) GoLow() *low.Swagger { - return s.low -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/arazzo.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/arazzo.go deleted file mode 100644 index cf65e40fc68..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/arazzo.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Arazzo represents a low-level Arazzo document. -// https://spec.openapis.org/arazzo/v1.0.1 -type Arazzo struct { - Arazzo low.NodeReference[string] - Info low.NodeReference[*Info] - SourceDescriptions low.NodeReference[[]low.ValueReference[*SourceDescription]] - Workflows low.NodeReference[[]low.ValueReference[*Workflow]] - Components low.NodeReference[*Components] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -var extractArazzoSourceDescriptions = extractArray[SourceDescription] - -// GetIndex returns the index.SpecIndex instance attached to the Arazzo object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (a *Arazzo) GetIndex() *index.SpecIndex { - return a.index -} - -// GetContext returns the context.Context instance used when building the Arazzo object. -func (a *Arazzo) GetContext() context.Context { - return a.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (a *Arazzo) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, a.Extensions) -} - -// GetRootNode returns the root yaml node of the Arazzo object. -func (a *Arazzo) GetRootNode() *yaml.Node { - return a.RootNode -} - -// GetKeyNode returns the key yaml node of the Arazzo object. -func (a *Arazzo) GetKeyNode() *yaml.Node { - return a.KeyNode -} - -// Build will extract all properties of the Arazzo document. -func (a *Arazzo) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &a.KeyNode, - RootNode: &a.RootNode, - Reference: &a.Reference, - NodeMap: &a.NodeMap, - Extensions: &a.Extensions, - Index: &a.index, - Context: &a.context, - }, ctx, keyNode, root, idx) - - info, err := low.ExtractObject[*Info](ctx, InfoLabel, root, idx) - if err != nil { - return err - } - a.Info = info - - sourceDescs, err := extractArazzoSourceDescriptions(ctx, SourceDescriptionsLabel, root, idx) - if err != nil { - return err - } - a.SourceDescriptions = sourceDescs - - workflows, err := extractArray[Workflow](ctx, WorkflowsLabel, root, idx) - if err != nil { - return err - } - a.Workflows = workflows - - components, err := low.ExtractObject[*Components](ctx, ComponentsLabel, root, idx) - if err != nil { - return err - } - a.Components = components - - return nil -} - -// GetExtensions returns all Arazzo extensions and satisfies the low.HasExtensions interface. -func (a *Arazzo) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return a.Extensions -} - -// Hash will return a consistent hash of the Arazzo object. -func (a *Arazzo) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !a.Arazzo.IsEmpty() { - h.WriteString(a.Arazzo.Value) - h.WriteByte(low.HASH_PIPE) - } - if !a.Info.IsEmpty() { - low.HashUint64(h, a.Info.Value.Hash()) - } - if !a.SourceDescriptions.IsEmpty() { - for _, sd := range a.SourceDescriptions.Value { - low.HashUint64(h, sd.Value.Hash()) - } - } - if !a.Workflows.IsEmpty() { - for _, w := range a.Workflows.Value { - low.HashUint64(h, w.Value.Hash()) - } - } - if !a.Components.IsEmpty() { - low.HashUint64(h, a.Components.Value.Hash()) - } - hashExtensionsInto(h, a.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/components.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/components.go deleted file mode 100644 index 73eb1522075..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/components.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Components represents a low-level Arazzo Components Object. -// https://spec.openapis.org/arazzo/v1.0.1#components-object -type Components struct { - Inputs low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]]] - Parameters low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[*Parameter]]] - SuccessActions low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[*SuccessAction]]] - FailureActions low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[*FailureAction]]] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -var extractComponentsParametersMap = extractObjectMap[Parameter] -var extractComponentsSuccessActionsMap = extractObjectMap[SuccessAction] - -// GetIndex returns the index.SpecIndex instance attached to the Components object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (c *Components) GetIndex() *index.SpecIndex { - return c.index -} - -// GetContext returns the context.Context instance used when building the Components object. -func (c *Components) GetContext() context.Context { - return c.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (c *Components) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, c.Extensions) -} - -// GetRootNode returns the root yaml node of the Components object. -func (c *Components) GetRootNode() *yaml.Node { - return c.RootNode -} - -// GetKeyNode returns the key yaml node of the Components object. -func (c *Components) GetKeyNode() *yaml.Node { - return c.KeyNode -} - -// Build will extract all properties of the Components object. -func (c *Components) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &c.KeyNode, - RootNode: &c.RootNode, - Reference: &c.Reference, - NodeMap: &c.NodeMap, - Extensions: &c.Extensions, - Index: &c.index, - Context: &c.context, - }, ctx, keyNode, root, idx) - - // Extract inputs as raw node map (JSON Schema objects keyed by name) - c.Inputs = extractRawNodeMap(InputsLabel, root) - - // Extract parameters map - params, err := extractComponentsParametersMap(ctx, ParametersLabel, root, idx) - if err != nil { - return err - } - c.Parameters = params - - // Extract successActions map - successActions, err := extractComponentsSuccessActionsMap(ctx, SuccessActionsLabel, root, idx) - if err != nil { - return err - } - c.SuccessActions = successActions - - // Extract failureActions map - failureActions, err := extractObjectMap[FailureAction](ctx, FailureActionsLabel, root, idx) - if err != nil { - return err - } - c.FailureActions = failureActions - - return nil -} - -// GetExtensions returns all Components extensions and satisfies the low.HasExtensions interface. -func (c *Components) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return c.Extensions -} - -// Hash will return a consistent hash of the Components object. -func (c *Components) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !c.Inputs.IsEmpty() && c.Inputs.Value != nil { - for pair := c.Inputs.Value.First(); pair != nil; pair = pair.Next() { - h.WriteString(pair.Key().Value) - h.WriteByte(low.HASH_PIPE) - hashYAMLNode(h, pair.Value().Value) - } - } - if !c.Parameters.IsEmpty() && c.Parameters.Value != nil { - for pair := c.Parameters.Value.First(); pair != nil; pair = pair.Next() { - h.WriteString(pair.Key().Value) - h.WriteByte(low.HASH_PIPE) - low.HashUint64(h, pair.Value().Value.Hash()) - } - } - if !c.SuccessActions.IsEmpty() && c.SuccessActions.Value != nil { - for pair := c.SuccessActions.Value.First(); pair != nil; pair = pair.Next() { - h.WriteString(pair.Key().Value) - h.WriteByte(low.HASH_PIPE) - low.HashUint64(h, pair.Value().Value.Hash()) - } - } - if !c.FailureActions.IsEmpty() && c.FailureActions.Value != nil { - for pair := c.FailureActions.Value.First(); pair != nil; pair = pair.Next() { - h.WriteString(pair.Key().Value) - h.WriteByte(low.HASH_PIPE) - low.HashUint64(h, pair.Value().Value.Hash()) - } - } - hashExtensionsInto(h, c.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/constants.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/constants.go deleted file mode 100644 index 21007ce42a8..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/constants.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -// Constants for labels used to look up values within Arazzo specifications. -// https://spec.openapis.org/arazzo/v1.0.1 -const ( - ArazzoLabel = "arazzo" - InfoLabel = "info" - SourceDescriptionsLabel = "sourceDescriptions" - WorkflowsLabel = "workflows" - ComponentsLabel = "components" - TitleLabel = "title" - SummaryLabel = "summary" - DescriptionLabel = "description" - VersionLabel = "version" - NameLabel = "name" - URLLabel = "url" - TypeLabel = "type" - WorkflowIdLabel = "workflowId" - StepsLabel = "steps" - InputsLabel = "inputs" - DependsOnLabel = "dependsOn" - SuccessActionsLabel = "successActions" - FailureActionsLabel = "failureActions" - OutputsLabel = "outputs" - ParametersLabel = "parameters" - StepIdLabel = "stepId" - OperationIdLabel = "operationId" - OperationPathLabel = "operationPath" - RequestBodyLabel = "requestBody" - SuccessCriteriaLabel = "successCriteria" - OnSuccessLabel = "onSuccess" - OnFailureLabel = "onFailure" - InLabel = "in" - ValueLabel = "value" - ReferenceLabel = "reference" - CriteriaLabel = "criteria" - RetryAfterLabel = "retryAfter" - RetryLimitLabel = "retryLimit" - ContextLabel = "context" - ConditionLabel = "condition" - ContentTypeLabel = "contentType" - PayloadLabel = "payload" - ReplacementsLabel = "replacements" - TargetLabel = "target" -) diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/criterion.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/criterion.go deleted file mode 100644 index fa12978e3b0..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/criterion.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Criterion represents a low-level Arazzo Criterion Object. -// https://spec.openapis.org/arazzo/v1.0.1#criterion-object -type Criterion struct { - Context low.NodeReference[string] - Condition low.NodeReference[string] - Type low.NodeReference[*yaml.Node] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -// GetIndex returns the index.SpecIndex instance attached to the Criterion object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (c *Criterion) GetIndex() *index.SpecIndex { - return c.index -} - -// GetContext returns the context.Context instance used when building the Criterion object. -func (c *Criterion) GetContext() context.Context { - return c.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (c *Criterion) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, c.Extensions) -} - -// GetRootNode returns the root yaml node of the Criterion object. -func (c *Criterion) GetRootNode() *yaml.Node { - return c.RootNode -} - -// GetKeyNode returns the key yaml node of the Criterion object. -func (c *Criterion) GetKeyNode() *yaml.Node { - return c.KeyNode -} - -// Build will extract all properties of the Criterion object. -// The Type field is a union: it can be a scalar string ("simple", "regex") or a mapping node -// (CriterionExpressionType). We store it as a raw *yaml.Node for the high-level to interpret. -func (c *Criterion) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &c.KeyNode, - RootNode: &c.RootNode, - Reference: &c.Reference, - NodeMap: &c.NodeMap, - Extensions: &c.Extensions, - Index: &c.index, - Context: &c.context, - }, ctx, keyNode, root, idx) - - // Extract type as raw node since it's a union type - c.Type = extractRawNode(TypeLabel, root) - return nil -} - -// GetExtensions returns all Criterion extensions and satisfies the low.HasExtensions interface. -func (c *Criterion) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return c.Extensions -} - -// Hash will return a consistent hash of the Criterion object. -func (c *Criterion) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !c.Context.IsEmpty() { - h.WriteString(c.Context.Value) - h.WriteByte(low.HASH_PIPE) - } - if !c.Condition.IsEmpty() { - h.WriteString(c.Condition.Value) - h.WriteByte(low.HASH_PIPE) - } - if !c.Type.IsEmpty() { - hashYAMLNode(h, c.Type.Value) - } - hashExtensionsInto(h, c.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/criterion_expression_type.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/criterion_expression_type.go deleted file mode 100644 index 3f7c20c2c04..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/criterion_expression_type.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// CriterionExpressionType represents a low-level Arazzo Criterion Expression Type Object. -// https://spec.openapis.org/arazzo/v1.0.1#criterion-expression-type-object -type CriterionExpressionType struct { - Type low.NodeReference[string] - Version low.NodeReference[string] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -// GetIndex returns the index.SpecIndex instance attached to the CriterionExpressionType object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (c *CriterionExpressionType) GetIndex() *index.SpecIndex { - return c.index -} - -// GetContext returns the context.Context instance used when building the CriterionExpressionType object. -func (c *CriterionExpressionType) GetContext() context.Context { - return c.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (c *CriterionExpressionType) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, c.Extensions) -} - -// GetRootNode returns the root yaml node of the CriterionExpressionType object. -func (c *CriterionExpressionType) GetRootNode() *yaml.Node { - return c.RootNode -} - -// GetKeyNode returns the key yaml node of the CriterionExpressionType object. -func (c *CriterionExpressionType) GetKeyNode() *yaml.Node { - return c.KeyNode -} - -// Build will extract all properties of the CriterionExpressionType object. -func (c *CriterionExpressionType) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &c.KeyNode, - RootNode: &c.RootNode, - Reference: &c.Reference, - NodeMap: &c.NodeMap, - Extensions: &c.Extensions, - Index: &c.index, - Context: &c.context, - }, ctx, keyNode, root, idx) - return nil -} - -// GetExtensions returns all CriterionExpressionType extensions and satisfies the low.HasExtensions interface. -func (c *CriterionExpressionType) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return c.Extensions -} - -// Hash will return a consistent hash of the CriterionExpressionType object. -func (c *CriterionExpressionType) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !c.Type.IsEmpty() { - h.WriteString(c.Type.Value) - h.WriteByte(low.HASH_PIPE) - } - if !c.Version.IsEmpty() { - h.WriteString(c.Version.Value) - h.WriteByte(low.HASH_PIPE) - } - hashExtensionsInto(h, c.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/doc.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/doc.go deleted file mode 100644 index 9db7e5f6f20..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package arazzo contains low-level Arazzo models. -// -// Arazzo low models include an *index.SpecIndex in Build signatures to remain -// compatible with the shared low.Buildable interface and generic extraction -// pipeline used across low-level model packages. -// -// In current Arazzo parsing paths, no SpecIndex is built and nil is passed for -// idx (for example via libopenapi.NewArazzoDocument), so GetIndex() will -// typically return nil unless callers explicitly provide an index. -package arazzo diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/failure_action.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/failure_action.go deleted file mode 100644 index e7d73366a7d..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/failure_action.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "fmt" - "hash/maphash" - "strconv" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// FailureAction represents a low-level Arazzo Failure Action Object. -// A failure action can be a full definition or a Reusable Object with a $components reference. -// https://spec.openapis.org/arazzo/v1.0.1#failure-action-object -type FailureAction struct { - Name low.NodeReference[string] - Type low.NodeReference[string] - WorkflowId low.NodeReference[string] - StepId low.NodeReference[string] - RetryAfter low.NodeReference[float64] - RetryLimit low.NodeReference[int64] - Criteria low.NodeReference[[]low.ValueReference[*Criterion]] - ComponentRef low.NodeReference[string] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -var extractFailureActionCriteria = extractArray[Criterion] - -// IsReusable returns true if this failure action is a Reusable Object (has a reference field). -func (f *FailureAction) IsReusable() bool { - return !f.ComponentRef.IsEmpty() -} - -// GetIndex returns the index.SpecIndex instance attached to the FailureAction object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (f *FailureAction) GetIndex() *index.SpecIndex { - return f.index -} - -// GetContext returns the context.Context instance used when building the FailureAction object. -func (f *FailureAction) GetContext() context.Context { - return f.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (f *FailureAction) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, f.Extensions) -} - -// GetRootNode returns the root yaml node of the FailureAction object. -func (f *FailureAction) GetRootNode() *yaml.Node { - return f.RootNode -} - -// GetKeyNode returns the key yaml node of the FailureAction object. -func (f *FailureAction) GetKeyNode() *yaml.Node { - return f.KeyNode -} - -// Build will extract all properties of the FailureAction object. -func (f *FailureAction) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &f.KeyNode, - RootNode: &f.RootNode, - Reference: &f.Reference, - NodeMap: &f.NodeMap, - Extensions: &f.Extensions, - Index: &f.index, - Context: &f.context, - }, ctx, keyNode, root, idx) - - f.ComponentRef = extractComponentRef(ReferenceLabel, root) - - // Extract numeric fields (retryAfter, retryLimit) which need special parsing - for i := 0; i < len(root.Content); i += 2 { - if i+1 >= len(root.Content) { - break - } - k := root.Content[i] - v := root.Content[i+1] - switch k.Value { - case RetryAfterLabel: - val, err := strconv.ParseFloat(v.Value, 64) - if err != nil { - return fmt.Errorf("invalid retryAfter value %q: %w", v.Value, err) - } - f.RetryAfter = low.NodeReference[float64]{ - Value: val, - KeyNode: k, - ValueNode: v, - } - case RetryLimitLabel: - val, err := strconv.ParseInt(v.Value, 10, 64) - if err != nil { - return fmt.Errorf("invalid retryLimit value %q: %w", v.Value, err) - } - f.RetryLimit = low.NodeReference[int64]{ - Value: val, - KeyNode: k, - ValueNode: v, - } - } - } - - // Extract criteria array - criteria, err := extractFailureActionCriteria(ctx, CriteriaLabel, root, idx) - if err != nil { - return err - } - f.Criteria = criteria - return nil -} - -// GetExtensions returns all FailureAction extensions and satisfies the low.HasExtensions interface. -func (f *FailureAction) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return f.Extensions -} - -// Hash will return a consistent hash of the FailureAction object. -func (f *FailureAction) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !f.ComponentRef.IsEmpty() { - h.WriteString(f.ComponentRef.Value) - h.WriteByte(low.HASH_PIPE) - } - if !f.Name.IsEmpty() { - h.WriteString(f.Name.Value) - h.WriteByte(low.HASH_PIPE) - } - if !f.Type.IsEmpty() { - h.WriteString(f.Type.Value) - h.WriteByte(low.HASH_PIPE) - } - if !f.WorkflowId.IsEmpty() { - h.WriteString(f.WorkflowId.Value) - h.WriteByte(low.HASH_PIPE) - } - if !f.StepId.IsEmpty() { - h.WriteString(f.StepId.Value) - h.WriteByte(low.HASH_PIPE) - } - if !f.RetryAfter.IsEmpty() { - h.WriteString(strconv.FormatFloat(f.RetryAfter.Value, 'f', -1, 64)) - h.WriteByte(low.HASH_PIPE) - } - if !f.RetryLimit.IsEmpty() { - low.HashInt64(h, f.RetryLimit.Value) - h.WriteByte(low.HASH_PIPE) - } - if !f.Criteria.IsEmpty() { - for _, c := range f.Criteria.Value { - low.HashUint64(h, c.Value.Hash()) - } - } - hashExtensionsInto(h, f.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/helpers.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/helpers.go deleted file mode 100644 index 6430a002daf..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/helpers.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// arazzoBase bundles the common fields found in every Arazzo low-level struct -// so they can be initialized in a single helper call. -type arazzoBase struct { - KeyNode **yaml.Node - RootNode **yaml.Node - Reference **low.Reference - NodeMap *low.NodeMap - Extensions **orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - Index **index.SpecIndex - Context *context.Context -} - -// initBuild performs the common preamble shared by every Arazzo low-level Build method. -// It returns the resolved root node (after alias/merge processing) for further extraction. -func initBuild(b *arazzoBase, ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) *yaml.Node { - *b.KeyNode = keyNode - root = utils.NodeAlias(root) - *b.RootNode = root - utils.CheckForMergeNodes(root) - *b.Reference = new(low.Reference) - b.NodeMap.Nodes = low.ExtractNodes(ctx, root) - ext := low.ExtractExtensions(root) - *b.Extensions = ext - *b.Index = idx - *b.Context = ctx - low.ExtractExtensionNodes(ctx, ext, b.NodeMap.Nodes) - return root -} - -// findLabeledNode searches root's Content pairs for a key matching label. -// Returns the key node, value node, and whether the label was found. -func findLabeledNode(label string, root *yaml.Node) (key, value *yaml.Node, found bool) { - for i := 0; i < len(root.Content); i += 2 { - if i+1 >= len(root.Content) { - break - } - if root.Content[i].Value == label { - return root.Content[i], root.Content[i+1], true - } - } - return nil, nil, false -} - -// assignNodeReference centralizes the common "if err return; set field" pattern -// used by Build methods when extracting nested NodeReferences. -func assignNodeReference[T any]( - ref low.NodeReference[T], - err error, - assign func(low.NodeReference[T]), -) error { - if err != nil { - return err - } - assign(ref) - return nil -} - -// extractArray extracts a YAML sequence node into a slice of ValueReferences for the given label. -func extractArray[N any, T interface { - *N - Build(context.Context, *yaml.Node, *yaml.Node, *index.SpecIndex) error -}]( - ctx context.Context, label string, root *yaml.Node, idx *index.SpecIndex, -) (low.NodeReference[[]low.ValueReference[T]], error) { - var result low.NodeReference[[]low.ValueReference[T]] - key, value, found := findLabeledNode(label, root) - if !found { - return result, nil - } - result.KeyNode = key - result.ValueNode = value - if value.Kind != yaml.SequenceNode { - return result, nil - } - items := make([]low.ValueReference[T], 0, len(value.Content)) - for _, itemNode := range value.Content { - obj := T(new(N)) - if err := low.BuildModel(itemNode, obj); err != nil { - return result, err - } - if err := obj.Build(ctx, nil, itemNode, idx); err != nil { - return result, err - } - items = append(items, low.ValueReference[T]{ - Value: obj, - ValueNode: itemNode, - }) - } - result.Value = items - return result, nil -} - -// extractObjectMap extracts a YAML mapping node into an ordered map of string keys to built objects. -func extractObjectMap[N any, T interface { - *N - Build(context.Context, *yaml.Node, *yaml.Node, *index.SpecIndex) error -}]( - ctx context.Context, label string, root *yaml.Node, idx *index.SpecIndex, -) (low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[T]]], error) { - var result low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[T]]] - key, value, found := findLabeledNode(label, root) - if !found { - return result, nil - } - result.KeyNode = key - result.ValueNode = value - if value.Kind != yaml.MappingNode { - return result, nil - } - m := orderedmap.New[low.KeyReference[string], low.ValueReference[T]]() - for j := 0; j < len(value.Content); j += 2 { - if j+1 >= len(value.Content) { - break - } - mapKey := value.Content[j] - mapVal := value.Content[j+1] - obj := T(new(N)) - if err := low.BuildModel(mapVal, obj); err != nil { - return result, err - } - if err := obj.Build(ctx, mapKey, mapVal, idx); err != nil { - return result, err - } - m.Set(low.KeyReference[string]{ - Value: mapKey.Value, - KeyNode: mapKey, - }, low.ValueReference[T]{ - Value: obj, - ValueNode: mapVal, - }) - } - result.Value = m - return result, nil -} - -// extractStringArray extracts a YAML sequence of scalar strings into a NodeReference. -func extractStringArray(label string, root *yaml.Node) low.NodeReference[[]low.ValueReference[string]] { - var result low.NodeReference[[]low.ValueReference[string]] - key, value, found := findLabeledNode(label, root) - if !found { - return result - } - result.KeyNode = key - result.ValueNode = value - if value.Kind != yaml.SequenceNode { - return result - } - items := make([]low.ValueReference[string], 0, len(value.Content)) - for _, itemNode := range value.Content { - items = append(items, low.ValueReference[string]{ - Value: itemNode.Value, - ValueNode: itemNode, - }) - } - result.Value = items - return result -} - -// extractRawNode extracts a raw *yaml.Node for a given label without further processing. -func extractRawNode(label string, root *yaml.Node) low.NodeReference[*yaml.Node] { - var result low.NodeReference[*yaml.Node] - key, value, found := findLabeledNode(label, root) - if !found { - return result - } - result.KeyNode = key - result.ValueNode = value - result.Value = value - return result -} - -// extractExpressionsMap extracts a YAML mapping node into an ordered map of string keys to string values. -func extractExpressionsMap(label string, root *yaml.Node) low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[string]]] { - var result low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[string]]] - key, value, found := findLabeledNode(label, root) - if !found { - return result - } - result.KeyNode = key - result.ValueNode = value - if value.Kind != yaml.MappingNode { - return result - } - m := orderedmap.New[low.KeyReference[string], low.ValueReference[string]]() - for j := 0; j < len(value.Content); j += 2 { - if j+1 >= len(value.Content) { - break - } - mapKey := value.Content[j] - mapVal := value.Content[j+1] - m.Set(low.KeyReference[string]{ - Value: mapKey.Value, - KeyNode: mapKey, - }, low.ValueReference[string]{ - Value: mapVal.Value, - ValueNode: mapVal, - }) - } - result.Value = m - return result -} - -// extractRawNodeMap extracts a YAML mapping node into an ordered map of string keys to raw *yaml.Node values. -func extractRawNodeMap(label string, root *yaml.Node) low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]]] { - var result low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]]] - key, value, found := findLabeledNode(label, root) - if !found { - return result - } - result.KeyNode = key - result.ValueNode = value - if value.Kind != yaml.MappingNode { - return result - } - m := orderedmap.New[low.KeyReference[string], low.ValueReference[*yaml.Node]]() - for j := 0; j < len(value.Content); j += 2 { - if j+1 >= len(value.Content) { - break - } - mapKey := value.Content[j] - mapVal := value.Content[j+1] - m.Set(low.KeyReference[string]{ - Value: mapKey.Value, - KeyNode: mapKey, - }, low.ValueReference[*yaml.Node]{ - Value: mapVal, - ValueNode: mapVal, - }) - } - result.Value = m - return result -} - -// extractComponentRef extracts a string field from root.Content by label, returning it as a NodeReference. -// Used for the 'reference' field which is renamed to ComponentRef in structs to avoid collision -// with the embedded *low.Reference. -func extractComponentRef(label string, root *yaml.Node) low.NodeReference[string] { - key, value, found := findLabeledNode(label, root) - if !found { - return low.NodeReference[string]{} - } - return low.NodeReference[string]{ - Value: value.Value, - KeyNode: key, - ValueNode: value, - } -} - -// hashYAMLNode writes a yaml.Node tree directly into a maphash.Hash for efficient hashing. -func hashYAMLNode(h *maphash.Hash, node *yaml.Node) { - if node == nil { - return - } - switch node.Kind { - case yaml.ScalarNode: - h.WriteString(node.Value) - h.WriteByte(low.HASH_PIPE) - case yaml.MappingNode, yaml.SequenceNode: - for _, child := range node.Content { - hashYAMLNode(h, child) - } - case yaml.DocumentNode: - for _, child := range node.Content { - hashYAMLNode(h, child) - } - case yaml.AliasNode: - if node.Alias != nil { - hashYAMLNode(h, node.Alias) - } - } -} - -// hashExtensionsInto writes extension hashes directly into the hasher without intermediate allocations. -func hashExtensionsInto(h *maphash.Hash, ext *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]]) { - if ext == nil { - return - } - for pair := ext.First(); pair != nil; pair = pair.Next() { - h.WriteString(pair.Key().Value) - h.WriteByte(low.HASH_PIPE) - hashYAMLNode(h, pair.Value().Value) - } -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/info.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/info.go deleted file mode 100644 index f332e0e6c96..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/info.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Info represents a low-level Arazzo Info Object. -// https://spec.openapis.org/arazzo/v1.0.1#info-object -type Info struct { - Title low.NodeReference[string] - Summary low.NodeReference[string] - Description low.NodeReference[string] - Version low.NodeReference[string] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -// GetIndex returns the index.SpecIndex instance attached to the Info object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (i *Info) GetIndex() *index.SpecIndex { - return i.index -} - -// GetContext returns the context.Context instance used when building the Info object. -func (i *Info) GetContext() context.Context { - return i.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (i *Info) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, i.Extensions) -} - -// GetRootNode returns the root yaml node of the Info object. -func (i *Info) GetRootNode() *yaml.Node { - return i.RootNode -} - -// GetKeyNode returns the key yaml node of the Info object. -func (i *Info) GetKeyNode() *yaml.Node { - return i.KeyNode -} - -// Build will extract all properties of the Info object. -func (i *Info) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &i.KeyNode, - RootNode: &i.RootNode, - Reference: &i.Reference, - NodeMap: &i.NodeMap, - Extensions: &i.Extensions, - Index: &i.index, - Context: &i.context, - }, ctx, keyNode, root, idx) - return nil -} - -// GetExtensions returns all Info extensions and satisfies the low.HasExtensions interface. -func (i *Info) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return i.Extensions -} - -// Hash will return a consistent hash of the Info object. -func (i *Info) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !i.Title.IsEmpty() { - h.WriteString(i.Title.Value) - h.WriteByte(low.HASH_PIPE) - } - if !i.Summary.IsEmpty() { - h.WriteString(i.Summary.Value) - h.WriteByte(low.HASH_PIPE) - } - if !i.Description.IsEmpty() { - h.WriteString(i.Description.Value) - h.WriteByte(low.HASH_PIPE) - } - if !i.Version.IsEmpty() { - h.WriteString(i.Version.Value) - h.WriteByte(low.HASH_PIPE) - } - hashExtensionsInto(h, i.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/parameter.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/parameter.go deleted file mode 100644 index 2a2d6947858..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/parameter.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Parameter represents a low-level Arazzo Parameter Object. -// A parameter can be a full parameter definition or a Reusable Object with a $components reference. -// https://spec.openapis.org/arazzo/v1.0.1#parameter-object -type Parameter struct { - Name low.NodeReference[string] - In low.NodeReference[string] - Value low.NodeReference[*yaml.Node] - ComponentRef low.NodeReference[string] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -// IsReusable returns true if this parameter is a Reusable Object (has a reference field). -func (p *Parameter) IsReusable() bool { - return !p.ComponentRef.IsEmpty() -} - -// GetIndex returns the index.SpecIndex instance attached to the Parameter object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (p *Parameter) GetIndex() *index.SpecIndex { - return p.index -} - -// GetContext returns the context.Context instance used when building the Parameter object. -func (p *Parameter) GetContext() context.Context { - return p.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (p *Parameter) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, p.Extensions) -} - -// GetRootNode returns the root yaml node of the Parameter object. -func (p *Parameter) GetRootNode() *yaml.Node { - return p.RootNode -} - -// GetKeyNode returns the key yaml node of the Parameter object. -func (p *Parameter) GetKeyNode() *yaml.Node { - return p.KeyNode -} - -// Build will extract all properties of the Parameter object. -func (p *Parameter) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &p.KeyNode, - RootNode: &p.RootNode, - Reference: &p.Reference, - NodeMap: &p.NodeMap, - Extensions: &p.Extensions, - Index: &p.index, - Context: &p.context, - }, ctx, keyNode, root, idx) - - p.Value = extractRawNode(ValueLabel, root) - p.ComponentRef = extractComponentRef(ReferenceLabel, root) - return nil -} - -// GetExtensions returns all Parameter extensions and satisfies the low.HasExtensions interface. -func (p *Parameter) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return p.Extensions -} - -// Hash will return a consistent hash of the Parameter object. -func (p *Parameter) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !p.ComponentRef.IsEmpty() { - h.WriteString(p.ComponentRef.Value) - h.WriteByte(low.HASH_PIPE) - } - if !p.Name.IsEmpty() { - h.WriteString(p.Name.Value) - h.WriteByte(low.HASH_PIPE) - } - if !p.In.IsEmpty() { - h.WriteString(p.In.Value) - h.WriteByte(low.HASH_PIPE) - } - if !p.Value.IsEmpty() { - hashYAMLNode(h, p.Value.Value) - } - hashExtensionsInto(h, p.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/payload_replacement.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/payload_replacement.go deleted file mode 100644 index 2ed85c06922..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/payload_replacement.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// PayloadReplacement represents a low-level Arazzo Payload Replacement Object. -// https://spec.openapis.org/arazzo/v1.0.1#payload-replacement-object -type PayloadReplacement struct { - Target low.NodeReference[string] - Value low.NodeReference[*yaml.Node] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -// GetIndex returns the index.SpecIndex instance attached to the PayloadReplacement object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (p *PayloadReplacement) GetIndex() *index.SpecIndex { - return p.index -} - -// GetContext returns the context.Context instance used when building the PayloadReplacement object. -func (p *PayloadReplacement) GetContext() context.Context { - return p.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (p *PayloadReplacement) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, p.Extensions) -} - -// GetRootNode returns the root yaml node of the PayloadReplacement object. -func (p *PayloadReplacement) GetRootNode() *yaml.Node { - return p.RootNode -} - -// GetKeyNode returns the key yaml node of the PayloadReplacement object. -func (p *PayloadReplacement) GetKeyNode() *yaml.Node { - return p.KeyNode -} - -// Build will extract all properties of the PayloadReplacement object. -func (p *PayloadReplacement) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &p.KeyNode, - RootNode: &p.RootNode, - Reference: &p.Reference, - NodeMap: &p.NodeMap, - Extensions: &p.Extensions, - Index: &p.index, - Context: &p.context, - }, ctx, keyNode, root, idx) - - p.Value = extractRawNode(ValueLabel, root) - return nil -} - -// GetExtensions returns all PayloadReplacement extensions and satisfies the low.HasExtensions interface. -func (p *PayloadReplacement) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return p.Extensions -} - -// Hash will return a consistent hash of the PayloadReplacement object. -func (p *PayloadReplacement) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !p.Target.IsEmpty() { - h.WriteString(p.Target.Value) - h.WriteByte(low.HASH_PIPE) - } - if !p.Value.IsEmpty() { - hashYAMLNode(h, p.Value.Value) - } - hashExtensionsInto(h, p.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/request_body.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/request_body.go deleted file mode 100644 index 40381d2699b..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/request_body.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// RequestBody represents a low-level Arazzo Request Body Object. -// https://spec.openapis.org/arazzo/v1.0.1#request-body-object -type RequestBody struct { - ContentType low.NodeReference[string] - Payload low.NodeReference[*yaml.Node] - Replacements low.NodeReference[[]low.ValueReference[*PayloadReplacement]] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -var extractRequestBodyReplacements = extractArray[PayloadReplacement] - -// GetIndex returns the index.SpecIndex instance attached to the RequestBody object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (r *RequestBody) GetIndex() *index.SpecIndex { - return r.index -} - -// GetContext returns the context.Context instance used when building the RequestBody object. -func (r *RequestBody) GetContext() context.Context { - return r.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (r *RequestBody) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, r.Extensions) -} - -// GetRootNode returns the root yaml node of the RequestBody object. -func (r *RequestBody) GetRootNode() *yaml.Node { - return r.RootNode -} - -// GetKeyNode returns the key yaml node of the RequestBody object. -func (r *RequestBody) GetKeyNode() *yaml.Node { - return r.KeyNode -} - -// Build will extract all properties of the RequestBody object. -func (r *RequestBody) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &r.KeyNode, - RootNode: &r.RootNode, - Reference: &r.Reference, - NodeMap: &r.NodeMap, - Extensions: &r.Extensions, - Index: &r.index, - Context: &r.context, - }, ctx, keyNode, root, idx) - - r.Payload = extractRawNode(PayloadLabel, root) - - replacements, err := extractRequestBodyReplacements(ctx, ReplacementsLabel, root, idx) - if err != nil { - return err - } - r.Replacements = replacements - return nil -} - -// GetExtensions returns all RequestBody extensions and satisfies the low.HasExtensions interface. -func (r *RequestBody) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return r.Extensions -} - -// Hash will return a consistent hash of the RequestBody object. -func (r *RequestBody) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !r.ContentType.IsEmpty() { - h.WriteString(r.ContentType.Value) - h.WriteByte(low.HASH_PIPE) - } - if !r.Payload.IsEmpty() { - hashYAMLNode(h, r.Payload.Value) - } - if !r.Replacements.IsEmpty() { - for _, rep := range r.Replacements.Value { - low.HashUint64(h, rep.Value.Hash()) - } - } - hashExtensionsInto(h, r.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/source_description.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/source_description.go deleted file mode 100644 index 17da0705662..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/source_description.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// SourceDescription represents a low-level Arazzo Source Description Object. -// https://spec.openapis.org/arazzo/v1.0.1#source-description-object -type SourceDescription struct { - Name low.NodeReference[string] - URL low.NodeReference[string] - Type low.NodeReference[string] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -// GetIndex returns the index.SpecIndex instance attached to the SourceDescription object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (s *SourceDescription) GetIndex() *index.SpecIndex { - return s.index -} - -// GetContext returns the context.Context instance used when building the SourceDescription object. -func (s *SourceDescription) GetContext() context.Context { - return s.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (s *SourceDescription) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, s.Extensions) -} - -// GetRootNode returns the root yaml node of the SourceDescription object. -func (s *SourceDescription) GetRootNode() *yaml.Node { - return s.RootNode -} - -// GetKeyNode returns the key yaml node of the SourceDescription object. -func (s *SourceDescription) GetKeyNode() *yaml.Node { - return s.KeyNode -} - -// Build will extract all properties of the SourceDescription object. -func (s *SourceDescription) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &s.KeyNode, - RootNode: &s.RootNode, - Reference: &s.Reference, - NodeMap: &s.NodeMap, - Extensions: &s.Extensions, - Index: &s.index, - Context: &s.context, - }, ctx, keyNode, root, idx) - return nil -} - -// GetExtensions returns all SourceDescription extensions and satisfies the low.HasExtensions interface. -func (s *SourceDescription) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return s.Extensions -} - -// Hash will return a consistent hash of the SourceDescription object. -func (s *SourceDescription) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !s.Name.IsEmpty() { - h.WriteString(s.Name.Value) - h.WriteByte(low.HASH_PIPE) - } - if !s.URL.IsEmpty() { - h.WriteString(s.URL.Value) - h.WriteByte(low.HASH_PIPE) - } - if !s.Type.IsEmpty() { - h.WriteString(s.Type.Value) - h.WriteByte(low.HASH_PIPE) - } - hashExtensionsInto(h, s.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/step.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/step.go deleted file mode 100644 index 43ea941251c..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/step.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Step represents a low-level Arazzo Step Object. -// https://spec.openapis.org/arazzo/v1.0.1#step-object -type Step struct { - StepId low.NodeReference[string] - Description low.NodeReference[string] - OperationId low.NodeReference[string] - OperationPath low.NodeReference[string] - WorkflowId low.NodeReference[string] - Parameters low.NodeReference[[]low.ValueReference[*Parameter]] - RequestBody low.NodeReference[*RequestBody] - SuccessCriteria low.NodeReference[[]low.ValueReference[*Criterion]] - OnSuccess low.NodeReference[[]low.ValueReference[*SuccessAction]] - OnFailure low.NodeReference[[]low.ValueReference[*FailureAction]] - Outputs low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[string]]] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -var extractStepParameters = extractArray[Parameter] -var extractStepSuccessCriteria = extractArray[Criterion] -var extractStepOnSuccess = extractArray[SuccessAction] - -// GetIndex returns the index.SpecIndex instance attached to the Step object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (s *Step) GetIndex() *index.SpecIndex { - return s.index -} - -// GetContext returns the context.Context instance used when building the Step object. -func (s *Step) GetContext() context.Context { - return s.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (s *Step) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, s.Extensions) -} - -// GetRootNode returns the root yaml node of the Step object. -func (s *Step) GetRootNode() *yaml.Node { - return s.RootNode -} - -// GetKeyNode returns the key yaml node of the Step object. -func (s *Step) GetKeyNode() *yaml.Node { - return s.KeyNode -} - -// Build will extract all properties of the Step object. -func (s *Step) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &s.KeyNode, - RootNode: &s.RootNode, - Reference: &s.Reference, - NodeMap: &s.NodeMap, - Extensions: &s.Extensions, - Index: &s.index, - Context: &s.context, - }, ctx, keyNode, root, idx) - - params, err := extractStepParameters(ctx, ParametersLabel, root, idx) - if err != nil { - return err - } - s.Parameters = params - - reqBody, err := low.ExtractObject[*RequestBody](ctx, RequestBodyLabel, root, idx) - if err != nil { - return err - } - s.RequestBody = reqBody - - criteria, err := extractStepSuccessCriteria(ctx, SuccessCriteriaLabel, root, idx) - if err != nil { - return err - } - s.SuccessCriteria = criteria - - onSuccess, err := extractStepOnSuccess(ctx, OnSuccessLabel, root, idx) - if err != nil { - return err - } - s.OnSuccess = onSuccess - - onFailure, err := extractArray[FailureAction](ctx, OnFailureLabel, root, idx) - if err != nil { - return err - } - s.OnFailure = onFailure - - s.Outputs = extractExpressionsMap(OutputsLabel, root) - - return nil -} - -// GetExtensions returns all Step extensions and satisfies the low.HasExtensions interface. -func (s *Step) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return s.Extensions -} - -// Hash will return a consistent hash of the Step object. -func (s *Step) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !s.StepId.IsEmpty() { - h.WriteString(s.StepId.Value) - h.WriteByte(low.HASH_PIPE) - } - if !s.Description.IsEmpty() { - h.WriteString(s.Description.Value) - h.WriteByte(low.HASH_PIPE) - } - if !s.OperationId.IsEmpty() { - h.WriteString(s.OperationId.Value) - h.WriteByte(low.HASH_PIPE) - } - if !s.OperationPath.IsEmpty() { - h.WriteString(s.OperationPath.Value) - h.WriteByte(low.HASH_PIPE) - } - if !s.WorkflowId.IsEmpty() { - h.WriteString(s.WorkflowId.Value) - h.WriteByte(low.HASH_PIPE) - } - if !s.Parameters.IsEmpty() { - for _, p := range s.Parameters.Value { - low.HashUint64(h, p.Value.Hash()) - } - } - if !s.RequestBody.IsEmpty() { - low.HashUint64(h, s.RequestBody.Value.Hash()) - } - if !s.SuccessCriteria.IsEmpty() { - for _, c := range s.SuccessCriteria.Value { - low.HashUint64(h, c.Value.Hash()) - } - } - if !s.OnSuccess.IsEmpty() { - for _, a := range s.OnSuccess.Value { - low.HashUint64(h, a.Value.Hash()) - } - } - if !s.OnFailure.IsEmpty() { - for _, a := range s.OnFailure.Value { - low.HashUint64(h, a.Value.Hash()) - } - } - if !s.Outputs.IsEmpty() { - for pair := s.Outputs.Value.First(); pair != nil; pair = pair.Next() { - h.WriteString(pair.Key().Value) - h.WriteByte(low.HASH_PIPE) - h.WriteString(pair.Value().Value) - h.WriteByte(low.HASH_PIPE) - } - } - hashExtensionsInto(h, s.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/success_action.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/success_action.go deleted file mode 100644 index 093bfce8e94..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/success_action.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// SuccessAction represents a low-level Arazzo Success Action Object. -// A success action can be a full definition or a Reusable Object with a $components reference. -// https://spec.openapis.org/arazzo/v1.0.1#success-action-object -type SuccessAction struct { - Name low.NodeReference[string] - Type low.NodeReference[string] - WorkflowId low.NodeReference[string] - StepId low.NodeReference[string] - Criteria low.NodeReference[[]low.ValueReference[*Criterion]] - ComponentRef low.NodeReference[string] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -var extractSuccessActionCriteria = extractArray[Criterion] - -// IsReusable returns true if this success action is a Reusable Object (has a reference field). -func (s *SuccessAction) IsReusable() bool { - return !s.ComponentRef.IsEmpty() -} - -// GetIndex returns the index.SpecIndex instance attached to the SuccessAction object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (s *SuccessAction) GetIndex() *index.SpecIndex { - return s.index -} - -// GetContext returns the context.Context instance used when building the SuccessAction object. -func (s *SuccessAction) GetContext() context.Context { - return s.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (s *SuccessAction) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, s.Extensions) -} - -// GetRootNode returns the root yaml node of the SuccessAction object. -func (s *SuccessAction) GetRootNode() *yaml.Node { - return s.RootNode -} - -// GetKeyNode returns the key yaml node of the SuccessAction object. -func (s *SuccessAction) GetKeyNode() *yaml.Node { - return s.KeyNode -} - -// Build will extract all properties of the SuccessAction object. -func (s *SuccessAction) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &s.KeyNode, - RootNode: &s.RootNode, - Reference: &s.Reference, - NodeMap: &s.NodeMap, - Extensions: &s.Extensions, - Index: &s.index, - Context: &s.context, - }, ctx, keyNode, root, idx) - - s.ComponentRef = extractComponentRef(ReferenceLabel, root) - - // Extract criteria array - criteria, err := extractSuccessActionCriteria(ctx, CriteriaLabel, root, idx) - if err != nil { - return err - } - s.Criteria = criteria - return nil -} - -// GetExtensions returns all SuccessAction extensions and satisfies the low.HasExtensions interface. -func (s *SuccessAction) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return s.Extensions -} - -// Hash will return a consistent hash of the SuccessAction object. -func (s *SuccessAction) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !s.ComponentRef.IsEmpty() { - h.WriteString(s.ComponentRef.Value) - h.WriteByte(low.HASH_PIPE) - } - if !s.Name.IsEmpty() { - h.WriteString(s.Name.Value) - h.WriteByte(low.HASH_PIPE) - } - if !s.Type.IsEmpty() { - h.WriteString(s.Type.Value) - h.WriteByte(low.HASH_PIPE) - } - if !s.WorkflowId.IsEmpty() { - h.WriteString(s.WorkflowId.Value) - h.WriteByte(low.HASH_PIPE) - } - if !s.StepId.IsEmpty() { - h.WriteString(s.StepId.Value) - h.WriteByte(low.HASH_PIPE) - } - if !s.Criteria.IsEmpty() { - for _, c := range s.Criteria.Value { - low.HashUint64(h, c.Value.Hash()) - } - } - hashExtensionsInto(h, s.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/workflow.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/workflow.go deleted file mode 100644 index c2efff22952..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/arazzo/workflow.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2022-2026 Princess Beef Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package arazzo - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// Workflow represents a low-level Arazzo Workflow Object. -// https://spec.openapis.org/arazzo/v1.0.1#workflow-object -type Workflow struct { - WorkflowId low.NodeReference[string] - Summary low.NodeReference[string] - Description low.NodeReference[string] - Inputs low.NodeReference[*yaml.Node] - DependsOn low.NodeReference[[]low.ValueReference[string]] - Steps low.NodeReference[[]low.ValueReference[*Step]] - SuccessActions low.NodeReference[[]low.ValueReference[*SuccessAction]] - FailureActions low.NodeReference[[]low.ValueReference[*FailureAction]] - Outputs low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[string]]] - Parameters low.NodeReference[[]low.ValueReference[*Parameter]] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -var extractWorkflowSuccessActions = extractArray[SuccessAction] -var extractWorkflowParameters = extractArray[Parameter] - -// GetIndex returns the index.SpecIndex instance attached to the Workflow object. -// For Arazzo low models this is typically nil, because Arazzo parsing does not build a SpecIndex. -// The index parameter is still required to satisfy the shared low.Buildable interface and generic extractors. -func (w *Workflow) GetIndex() *index.SpecIndex { - return w.index -} - -// GetContext returns the context.Context instance used when building the Workflow object. -func (w *Workflow) GetContext() context.Context { - return w.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (w *Workflow) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, w.Extensions) -} - -// GetRootNode returns the root yaml node of the Workflow object. -func (w *Workflow) GetRootNode() *yaml.Node { - return w.RootNode -} - -// GetKeyNode returns the key yaml node of the Workflow object. -func (w *Workflow) GetKeyNode() *yaml.Node { - return w.KeyNode -} - -// Build will extract all properties of the Workflow object. -func (w *Workflow) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - root = initBuild(&arazzoBase{ - KeyNode: &w.KeyNode, - RootNode: &w.RootNode, - Reference: &w.Reference, - NodeMap: &w.NodeMap, - Extensions: &w.Extensions, - Index: &w.index, - Context: &w.context, - }, ctx, keyNode, root, idx) - - w.Inputs = extractRawNode(InputsLabel, root) // raw node: JSON Schema - w.DependsOn = extractStringArray(DependsOnLabel, root) - - steps, err := extractArray[Step](ctx, StepsLabel, root, idx) - if err != nil { - return err - } - w.Steps = steps - - successActions, err := extractWorkflowSuccessActions(ctx, SuccessActionsLabel, root, idx) - if err != nil { - return err - } - w.SuccessActions = successActions - - failureActions, err := extractArray[FailureAction](ctx, FailureActionsLabel, root, idx) - if err != nil { - return err - } - w.FailureActions = failureActions - - w.Outputs = extractExpressionsMap(OutputsLabel, root) - - params, err := extractWorkflowParameters(ctx, ParametersLabel, root, idx) - if err != nil { - return err - } - w.Parameters = params - - return nil -} - -// GetExtensions returns all Workflow extensions and satisfies the low.HasExtensions interface. -func (w *Workflow) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return w.Extensions -} - -// Hash will return a consistent hash of the Workflow object. -func (w *Workflow) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !w.WorkflowId.IsEmpty() { - h.WriteString(w.WorkflowId.Value) - h.WriteByte(low.HASH_PIPE) - } - if !w.Summary.IsEmpty() { - h.WriteString(w.Summary.Value) - h.WriteByte(low.HASH_PIPE) - } - if !w.Description.IsEmpty() { - h.WriteString(w.Description.Value) - h.WriteByte(low.HASH_PIPE) - } - if !w.Inputs.IsEmpty() { - hashYAMLNode(h, w.Inputs.Value) - } - if !w.DependsOn.IsEmpty() { - for _, d := range w.DependsOn.Value { - h.WriteString(d.Value) - h.WriteByte(low.HASH_PIPE) - } - } - if !w.Steps.IsEmpty() { - for _, s := range w.Steps.Value { - low.HashUint64(h, s.Value.Hash()) - } - } - if !w.SuccessActions.IsEmpty() { - for _, a := range w.SuccessActions.Value { - low.HashUint64(h, a.Value.Hash()) - } - } - if !w.FailureActions.IsEmpty() { - for _, a := range w.FailureActions.Value { - low.HashUint64(h, a.Value.Hash()) - } - } - if !w.Outputs.IsEmpty() { - for pair := w.Outputs.Value.First(); pair != nil; pair = pair.Next() { - h.WriteString(pair.Key().Value) - h.WriteByte(low.HASH_PIPE) - h.WriteString(pair.Value().Value) - h.WriteByte(low.HASH_PIPE) - } - } - if !w.Parameters.IsEmpty() { - for _, p := range w.Parameters.Value { - low.HashUint64(h, p.Value.Hash()) - } - } - hashExtensionsInto(h, w.Extensions) - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/overlay/action.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/overlay/action.go deleted file mode 100644 index 7b9df036489..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/overlay/action.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2022-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package overlay - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// Action represents a low-level Overlay Action Object. -// https://spec.openapis.org/overlay/v1.1.0#action-object -type Action struct { - Target low.NodeReference[string] - Description low.NodeReference[string] - Update low.NodeReference[*yaml.Node] - Remove low.NodeReference[bool] - Copy low.NodeReference[string] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -// GetIndex returns the index.SpecIndex instance attached to the Action object -func (a *Action) GetIndex() *index.SpecIndex { - return a.index -} - -// GetContext returns the context.Context instance used when building the Action object -func (a *Action) GetContext() context.Context { - return a.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (a *Action) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, a.Extensions) -} - -// GetRootNode returns the root yaml node of the Action object -func (a *Action) GetRootNode() *yaml.Node { - return a.RootNode -} - -// GetKeyNode returns the key yaml node of the Action object -func (a *Action) GetKeyNode() *yaml.Node { - return a.KeyNode -} - -// Build will extract extensions for the Action object. -func (a *Action) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - a.KeyNode = keyNode - root = utils.NodeAlias(root) - a.RootNode = root - utils.CheckForMergeNodes(root) - a.Reference = new(low.Reference) - a.Nodes = low.ExtractNodes(ctx, root) - a.Extensions = low.ExtractExtensions(root) - a.index = idx - a.context = ctx - low.ExtractExtensionNodes(ctx, a.Extensions, a.Nodes) - - // Extract the update node directly if present - for i := 0; i < len(root.Content); i += 2 { - if i+1 < len(root.Content) && root.Content[i].Value == UpdateLabel { - a.Update = low.NodeReference[*yaml.Node]{ - Value: root.Content[i+1], - KeyNode: root.Content[i], - ValueNode: root.Content[i+1], - } - break - } - } - return nil -} - -// GetExtensions returns all Action extensions and satisfies the low.HasExtensions interface. -func (a *Action) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return a.Extensions -} - -// Hash will return a consistent Hash of the Action object -func (a *Action) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !a.Target.IsEmpty() { - h.WriteString(a.Target.Value) - h.WriteByte(low.HASH_PIPE) - } - if !a.Description.IsEmpty() { - h.WriteString(a.Description.Value) - h.WriteByte(low.HASH_PIPE) - } - if !a.Update.IsEmpty() { - h.WriteString(low.GenerateHashString(a.Update.Value)) - h.WriteByte(low.HASH_PIPE) - } - if !a.Remove.IsEmpty() { - low.HashBool(h, a.Remove.Value) - h.WriteByte(low.HASH_PIPE) - } - if !a.Copy.IsEmpty() { - h.WriteString(a.Copy.Value) - h.WriteByte(low.HASH_PIPE) - } - for _, ext := range low.HashExtensions(a.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/overlay/constants.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/overlay/constants.go deleted file mode 100644 index 0bbcaf019ae..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/overlay/constants.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package overlay - -// Constants for labels used to look up values within OpenAPI Overlay specifications. -// https://spec.openapis.org/overlay/v1.1.0 -const ( - OverlayLabel = "overlay" - InfoLabel = "info" - ExtendsLabel = "extends" - ActionsLabel = "actions" - TitleLabel = "title" - VersionLabel = "version" - TargetLabel = "target" - DescriptionLabel = "description" - UpdateLabel = "update" - RemoveLabel = "remove" - CopyLabel = "copy" -) diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/overlay/info.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/overlay/info.go deleted file mode 100644 index a2781029146..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/overlay/info.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2022-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package overlay - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// Info represents a low-level Overlay Info Object. -// https://spec.openapis.org/overlay/v1.1.0#info-object -type Info struct { - Title low.NodeReference[string] - Version low.NodeReference[string] - Description low.NodeReference[string] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -// GetIndex returns the index.SpecIndex instance attached to the Info object -func (i *Info) GetIndex() *index.SpecIndex { - return i.index -} - -// GetContext returns the context.Context instance used when building the Info object -func (i *Info) GetContext() context.Context { - return i.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (i *Info) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, i.Extensions) -} - -// GetRootNode returns the root yaml node of the Info object -func (i *Info) GetRootNode() *yaml.Node { - return i.RootNode -} - -// GetKeyNode returns the key yaml node of the Info object -func (i *Info) GetKeyNode() *yaml.Node { - return i.KeyNode -} - -// Build will extract extensions for the Info object. -func (i *Info) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - i.KeyNode = keyNode - root = utils.NodeAlias(root) - i.RootNode = root - utils.CheckForMergeNodes(root) - i.Reference = new(low.Reference) - i.Nodes = low.ExtractNodes(ctx, root) - i.Extensions = low.ExtractExtensions(root) - i.index = idx - i.context = ctx - low.ExtractExtensionNodes(ctx, i.Extensions, i.Nodes) - return nil -} - -// GetExtensions returns all Info extensions and satisfies the low.HasExtensions interface. -func (i *Info) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return i.Extensions -} - -// Hash will return a consistent Hash of the Info object -func (inf *Info) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !inf.Title.IsEmpty() { - h.WriteString(inf.Title.Value) - h.WriteByte(low.HASH_PIPE) - } - if !inf.Version.IsEmpty() { - h.WriteString(inf.Version.Value) - h.WriteByte(low.HASH_PIPE) - } - if !inf.Description.IsEmpty() { - h.WriteString(inf.Description.Value) - h.WriteByte(low.HASH_PIPE) - } - for _, ext := range low.HashExtensions(inf.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/overlay/overlay.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/overlay/overlay.go deleted file mode 100644 index d0e9e3f0e2c..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/overlay/overlay.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2022-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package overlay - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// Overlay represents a low-level OpenAPI Overlay document. -// https://spec.openapis.org/overlay/v1.0.0 -type Overlay struct { - Overlay low.NodeReference[string] - Info low.NodeReference[*Info] - Extends low.NodeReference[string] - Actions low.NodeReference[[]low.ValueReference[*Action]] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - KeyNode *yaml.Node - RootNode *yaml.Node - index *index.SpecIndex - context context.Context - *low.Reference - low.NodeMap -} - -// GetIndex returns the index.SpecIndex instance attached to the Overlay object -func (o *Overlay) GetIndex() *index.SpecIndex { - return o.index -} - -// GetContext returns the context.Context instance used when building the Overlay object -func (o *Overlay) GetContext() context.Context { - return o.context -} - -// FindExtension returns a ValueReference containing the extension value, if found. -func (o *Overlay) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, o.Extensions) -} - -// GetRootNode returns the root yaml node of the Overlay object -func (o *Overlay) GetRootNode() *yaml.Node { - return o.RootNode -} - -// GetKeyNode returns the key yaml node of the Overlay object -func (o *Overlay) GetKeyNode() *yaml.Node { - return o.KeyNode -} - -// Build will extract all properties of the Overlay document. -func (o *Overlay) Build(ctx context.Context, keyNode, root *yaml.Node, idx *index.SpecIndex) error { - o.KeyNode = keyNode - root = utils.NodeAlias(root) - o.RootNode = root - utils.CheckForMergeNodes(root) - o.Reference = new(low.Reference) - o.Nodes = low.ExtractNodes(ctx, root) - o.Extensions = low.ExtractExtensions(root) - o.index = idx - o.context = ctx - low.ExtractExtensionNodes(ctx, o.Extensions, o.Nodes) - - // Extract info object - info, err := low.ExtractObject[*Info](ctx, InfoLabel, root, idx) - if err != nil { - return err - } - o.Info = info - - // Extract actions array - o.Actions = o.extractActions(ctx, root, idx) - - return nil -} - -func (o *Overlay) extractActions(ctx context.Context, root *yaml.Node, idx *index.SpecIndex) low.NodeReference[[]low.ValueReference[*Action]] { - var result low.NodeReference[[]low.ValueReference[*Action]] - - for i := 0; i < len(root.Content); i += 2 { - if i+1 >= len(root.Content) { - break - } - key := root.Content[i] - value := root.Content[i+1] - - if key.Value == ActionsLabel { - result.KeyNode = key - result.ValueNode = value - - if value.Kind != yaml.SequenceNode { - continue - } - - actions := make([]low.ValueReference[*Action], 0, len(value.Content)) - for _, actionNode := range value.Content { - action := &Action{} - _ = low.BuildModel(actionNode, action) - _ = action.Build(ctx, nil, actionNode, idx) - actions = append(actions, low.ValueReference[*Action]{ - Value: action, - ValueNode: actionNode, - }) - } - result.Value = actions - break - } - } - return result -} - -// GetExtensions returns all Overlay extensions and satisfies the low.HasExtensions interface. -func (o *Overlay) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return o.Extensions -} - -// Hash will return a consistent Hash of the Overlay object -func (o *Overlay) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !o.Overlay.IsEmpty() { - h.WriteString(o.Overlay.Value) - h.WriteByte(low.HASH_PIPE) - } - if !o.Info.IsEmpty() { - h.WriteString(low.GenerateHashString(o.Info.Value)) - h.WriteByte(low.HASH_PIPE) - } - if !o.Extends.IsEmpty() { - h.WriteString(o.Extends.Value) - h.WriteByte(low.HASH_PIPE) - } - if !o.Actions.IsEmpty() { - for _, action := range o.Actions.Value { - h.WriteString(low.GenerateHashString(action.Value)) - h.WriteByte(low.HASH_PIPE) - } - } - for _, ext := range low.HashExtensions(o.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/constants.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/constants.go deleted file mode 100644 index 45e373775f7..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/constants.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -const ( - DefinitionsLabel = "definitions" - SecurityDefinitionsLabel = "securityDefinitions" - ExamplesLabel = "examples" - HeadersLabel = "headers" - DefaultLabel = "default" - ItemsLabel = "items" - ParametersLabel = "parameters" - PathsLabel = "paths" - GetLabel = "get" - PostLabel = "post" - PatchLabel = "patch" - PutLabel = "put" - DeleteLabel = "delete" - OptionsLabel = "options" - HeadLabel = "head" - SecurityLabel = "security" - ScopesLabel = "scopes" - ResponsesLabel = "responses" -) diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/definitions.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/definitions.go deleted file mode 100644 index cbc6156f816..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/definitions.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "context" - "hash/maphash" - "sync" - - "github.com/pb33f/libopenapi/datamodel" - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// ParameterDefinitions is a low-level representation of a Swagger / OpenAPI 2 Parameters Definitions object. -// -// ParameterDefinitions holds parameters to be reused across operations. Parameter definitions can be -// referenced to the ones defined here. It does not define global operation parameters -// - https://swagger.io/specification/v2/#parametersDefinitionsObject -type ParameterDefinitions struct { - Definitions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*Parameter]] -} - -// ResponsesDefinitions is a low-level representation of a Swagger / OpenAPI 2 Responses Definitions object. -// -// ResponsesDefinitions is an object to hold responses to be reused across operations. Response definitions can be -// referenced to the ones defined here. It does not define global operation responses -// - https://swagger.io/specification/v2/#responsesDefinitionsObject -type ResponsesDefinitions struct { - Definitions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*Response]] -} - -// SecurityDefinitions is a low-level representation of a Swagger / OpenAPI 2 Security Definitions object. -// -// A declaration of the security schemes available to be used in the specification. This does not enforce the security -// schemes on the operations and only serves to provide the relevant details for each scheme -// - https://swagger.io/specification/v2/#securityDefinitionsObject -type SecurityDefinitions struct { - Definitions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*SecurityScheme]] -} - -// Definitions is a low-level representation of a Swagger / OpenAPI 2 Definitions object -// -// An object to hold data types that can be consumed and produced by operations. These data types can be primitives, -// arrays or models. -// - https://swagger.io/specification/v2/#definitionsObject -type Definitions struct { - Schemas *orderedmap.Map[low.KeyReference[string], low.ValueReference[*base.SchemaProxy]] -} - -// FindSchema will attempt to locate a base.SchemaProxy instance using a name. -func (d *Definitions) FindSchema(schema string) *low.ValueReference[*base.SchemaProxy] { - return low.FindItemInOrderedMap[*base.SchemaProxy](schema, d.Schemas) -} - -// FindParameter will attempt to locate a Parameter instance using a name. -func (pd *ParameterDefinitions) FindParameter(parameter string) *low.ValueReference[*Parameter] { - return low.FindItemInOrderedMap[*Parameter](parameter, pd.Definitions) -} - -// FindResponse will attempt to locate a Response instance using a name. -func (r *ResponsesDefinitions) FindResponse(response string) *low.ValueReference[*Response] { - return low.FindItemInOrderedMap[*Response](response, r.Definitions) -} - -// FindSecurityDefinition will attempt to locate a SecurityScheme using a name. -func (s *SecurityDefinitions) FindSecurityDefinition(securityDef string) *low.ValueReference[*SecurityScheme] { - return low.FindItemInOrderedMap[*SecurityScheme](securityDef, s.Definitions) -} - -// Build will extract all definitions into SchemaProxy instances. -func (d *Definitions) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - root = utils.NodeAlias(root) - utils.CheckForMergeNodes(root) - type buildInput struct { - label *yaml.Node - value *yaml.Node - } - results := orderedmap.New[low.KeyReference[string], low.ValueReference[*base.SchemaProxy]]() - in := make(chan buildInput) - out := make(chan definitionResult[*base.SchemaProxy]) - done := make(chan struct{}) - var wg sync.WaitGroup - wg.Add(2) // input and output goroutines. - - // TranslatePipeline input. - go func() { - defer func() { - close(in) - wg.Done() - }() - var label *yaml.Node - for i, value := range root.Content { - if i%2 == 0 { - label = value - continue - } - - select { - case in <- buildInput{ - label: label, - value: value, - }: - case <-done: - return - } - } - }() - - // TranslatePipeline output. - go func() { - for { - result, ok := <-out - if !ok { - break - } - - key := low.KeyReference[string]{ - Value: result.k.Value, - KeyNode: result.k, - } - results.Set(key, result.v) - } - close(done) - wg.Done() - }() - - translateFunc := func(value buildInput) (definitionResult[*base.SchemaProxy], error) { - obj, err, _, rv := low.ExtractObjectRaw[*base.SchemaProxy](ctx, value.label, value.value, idx) - if err != nil { - return definitionResult[*base.SchemaProxy]{}, err - } - - v := low.ValueReference[*base.SchemaProxy]{ - Value: obj, ValueNode: value.value, - } - v.SetReference(rv, value.value) - - return definitionResult[*base.SchemaProxy]{k: value.label, v: v}, nil - } - - err := datamodel.TranslatePipeline[buildInput, definitionResult[*base.SchemaProxy]](in, out, translateFunc) - wg.Wait() - if err != nil { - return err - } - - d.Schemas = results - return nil -} - -// Hash will return a consistent Hash of the Definitions object -func (d *Definitions) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - for k := range orderedmap.SortAlpha(d.Schemas).KeysFromOldest() { - h.WriteString(low.GenerateHashString(d.FindSchema(k.Value).Value)) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} - -// Build will extract all ParameterDefinitions into Parameter instances. -func (pd *ParameterDefinitions) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - errorChan := make(chan error) - resultChan := make(chan definitionResult[*Parameter]) - var defLabel *yaml.Node - totalDefinitions := 0 - buildFunc := func(label *yaml.Node, value *yaml.Node, idx *index.SpecIndex, - r chan definitionResult[*Parameter], e chan error, - ) { - obj, err, _, rv := low.ExtractObjectRaw[*Parameter](ctx, label, value, idx) - if err != nil { - e <- err - } - - v := low.ValueReference[*Parameter]{ - Value: obj, - ValueNode: value, - } - v.SetReference(rv, value) - - r <- definitionResult[*Parameter]{k: label, v: v} - } - for i := range root.Content { - if i%2 == 0 { - defLabel = root.Content[i] - continue - } - totalDefinitions++ - go buildFunc(defLabel, root.Content[i], idx, resultChan, errorChan) - } - - completedDefs := 0 - results := orderedmap.New[low.KeyReference[string], low.ValueReference[*Parameter]]() - for completedDefs < totalDefinitions { - select { - case err := <-errorChan: - return err - case sch := <-resultChan: - completedDefs++ - key := low.KeyReference[string]{ - Value: sch.k.Value, - KeyNode: sch.k, - } - results.Set(key, sch.v) - } - } - pd.Definitions = results - return nil -} - -// re-usable struct for holding results as k/v pairs. -type definitionResult[T any] struct { - k *yaml.Node - v low.ValueReference[T] -} - -// Build will extract all ResponsesDefinitions into Response instances. -func (r *ResponsesDefinitions) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - errorChan := make(chan error) - resultChan := make(chan definitionResult[*Response]) - var defLabel *yaml.Node - totalDefinitions := 0 - buildFunc := func(label *yaml.Node, value *yaml.Node, idx *index.SpecIndex, - r chan definitionResult[*Response], e chan error, - ) { - obj, err, _, rv := low.ExtractObjectRaw[*Response](ctx, label, value, idx) - if err != nil { - e <- err - } - - v := low.ValueReference[*Response]{ - Value: obj, - ValueNode: value, - } - v.SetReference(rv, value) - - r <- definitionResult[*Response]{k: label, v: v} - } - for i := range root.Content { - if i%2 == 0 { - defLabel = root.Content[i] - continue - } - totalDefinitions++ - go buildFunc(defLabel, root.Content[i], idx, resultChan, errorChan) - } - - completedDefs := 0 - results := orderedmap.New[low.KeyReference[string], low.ValueReference[*Response]]() - for completedDefs < totalDefinitions { - select { - case err := <-errorChan: - return err - case sch := <-resultChan: - completedDefs++ - key := low.KeyReference[string]{ - Value: sch.k.Value, - KeyNode: sch.k, - } - results.Set(key, sch.v) - } - } - r.Definitions = results - return nil -} - -// Build will extract all SecurityDefinitions into SecurityScheme instances. -func (s *SecurityDefinitions) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - errorChan := make(chan error) - resultChan := make(chan definitionResult[*SecurityScheme]) - var defLabel *yaml.Node - totalDefinitions := 0 - - buildFunc := func(label *yaml.Node, value *yaml.Node, idx *index.SpecIndex, - r chan definitionResult[*SecurityScheme], e chan error, - ) { - obj, err, _, rv := low.ExtractObjectRaw[*SecurityScheme](ctx, label, value, idx) - if err != nil { - e <- err - } - - v := low.ValueReference[*SecurityScheme]{ - Value: obj, ValueNode: value, - } - v.SetReference(rv, value) - - r <- definitionResult[*SecurityScheme]{k: label, v: v} - } - - for i := range root.Content { - if i%2 == 0 { - defLabel = root.Content[i] - continue - } - totalDefinitions++ - go buildFunc(defLabel, root.Content[i], idx, resultChan, errorChan) - } - - completedDefs := 0 - results := orderedmap.New[low.KeyReference[string], low.ValueReference[*SecurityScheme]]() - for completedDefs < totalDefinitions { - select { - case err := <-errorChan: - return err - case sch := <-resultChan: - completedDefs++ - key := low.KeyReference[string]{ - Value: sch.k.Value, - KeyNode: sch.k, - } - results.Set(key, sch.v) - } - } - s.Definitions = results - return nil -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/examples.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/examples.go deleted file mode 100644 index 6e18c58ad99..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/examples.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// Examples represents a low-level Swagger / OpenAPI 2 Example object. -// Allows sharing examples for operation responses -// - https://swagger.io/specification/v2/#exampleObject -type Examples struct { - Values *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] -} - -// FindExample attempts to locate an example value, using a key label. -func (e *Examples) FindExample(name string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(name, e.Values) -} - -// Build will extract all examples and will attempt to unmarshal content into a map or slice based on type. -func (e *Examples) Build(_ context.Context, _, root *yaml.Node, _ *index.SpecIndex) error { - root = utils.NodeAlias(root) - utils.CheckForMergeNodes(root) - var keyNode, currNode *yaml.Node - e.Values = orderedmap.New[low.KeyReference[string], low.ValueReference[*yaml.Node]]() - for i := range root.Content { - if i%2 == 0 { - keyNode = root.Content[i] - continue - } - currNode = root.Content[i] - - e.Values.Set( - low.KeyReference[string]{ - Value: keyNode.Value, - KeyNode: keyNode, - }, - low.ValueReference[*yaml.Node]{ - Value: currNode, - ValueNode: currNode, - }, - ) - } - return nil -} - -// Hash will return a consistent Hash of the Examples object -func (e *Examples) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - for v := range orderedmap.SortAlpha(e.Values).ValuesFromOldest() { - h.WriteString(low.GenerateHashString(v.Value)) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/header.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/header.go deleted file mode 100644 index 890d6dd6d95..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/header.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "context" - "hash/maphash" - "sort" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// Header Represents a low-level Swagger / OpenAPI 2 Header object. -// -// A Header is essentially identical to a Parameter, except it does not contain 'name' or 'in' properties. -// - https://swagger.io/specification/v2/#headerObject -type Header struct { - Type low.NodeReference[string] - Format low.NodeReference[string] - Description low.NodeReference[string] - Items low.NodeReference[*Items] - CollectionFormat low.NodeReference[string] - Default low.NodeReference[*yaml.Node] - Maximum low.NodeReference[int] - ExclusiveMaximum low.NodeReference[bool] - Minimum low.NodeReference[int] - ExclusiveMinimum low.NodeReference[bool] - MaxLength low.NodeReference[int] - MinLength low.NodeReference[int] - Pattern low.NodeReference[string] - MaxItems low.NodeReference[int] - MinItems low.NodeReference[int] - UniqueItems low.NodeReference[bool] - Enum low.NodeReference[[]low.ValueReference[*yaml.Node]] - MultipleOf low.NodeReference[int] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] -} - -// FindExtension will attempt to locate an extension value using a name lookup. -func (h *Header) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, h.Extensions) -} - -// GetExtensions returns all Header extensions and satisfies the low.HasExtensions interface. -func (h *Header) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return h.Extensions -} - -// Build will build out items, extensions and default value from the supplied node. -func (h *Header) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - root = utils.NodeAlias(root) - utils.CheckForMergeNodes(root) - h.Extensions = low.ExtractExtensions(root) - items, err := low.ExtractObject[*Items](ctx, ItemsLabel, root, idx) - if err != nil { - return err - } - h.Items = items - - _, ln, vn := utils.FindKeyNodeFull(DefaultLabel, root.Content) - if vn != nil { - h.Default = low.NodeReference[*yaml.Node]{ - Value: vn, - KeyNode: ln, - ValueNode: vn, - } - return nil - } - - return nil -} - -// Hash will return a consistent Hash of the Header object -func (hdr *Header) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if hdr.Description.Value != "" { - h.WriteString(hdr.Description.Value) - h.WriteByte(low.HASH_PIPE) - } - if hdr.Type.Value != "" { - h.WriteString(hdr.Type.Value) - h.WriteByte(low.HASH_PIPE) - } - if hdr.Format.Value != "" { - h.WriteString(hdr.Format.Value) - h.WriteByte(low.HASH_PIPE) - } - if hdr.CollectionFormat.Value != "" { - h.WriteString(hdr.CollectionFormat.Value) - h.WriteByte(low.HASH_PIPE) - } - if hdr.Default.Value != nil && !hdr.Default.Value.IsZero() { - h.WriteString(low.GenerateHashString(hdr.Default.Value)) - h.WriteByte(low.HASH_PIPE) - } - low.HashInt64(h, int64(hdr.Maximum.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(hdr.Minimum.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashBool(h, hdr.ExclusiveMinimum.Value) - h.WriteByte(low.HASH_PIPE) - low.HashBool(h, hdr.ExclusiveMaximum.Value) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(hdr.MinLength.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(hdr.MaxLength.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(hdr.MinItems.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(hdr.MaxItems.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(hdr.MultipleOf.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashBool(h, hdr.UniqueItems.Value) - h.WriteByte(low.HASH_PIPE) - if hdr.Pattern.Value != "" { - h.WriteString(hdr.Pattern.Value) - h.WriteByte(low.HASH_PIPE) - } - for _, ext := range low.HashExtensions(hdr.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - - keys := make([]string, len(hdr.Enum.Value)) - for k := range hdr.Enum.Value { - keys[k] = low.ValueToString(hdr.Enum.Value[k].Value) - } - sort.Strings(keys) - for _, key := range keys { - h.WriteString(key) - h.WriteByte(low.HASH_PIPE) - } - - if hdr.Items.Value != nil { - h.WriteString(low.GenerateHashString(hdr.Items.Value)) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} - -// Getter methods to satisfy SwaggerHeader interface. - -func (h *Header) GetType() *low.NodeReference[string] { - return &h.Type -} - -func (h *Header) GetDescription() *low.NodeReference[string] { - return &h.Description -} - -func (h *Header) GetFormat() *low.NodeReference[string] { - return &h.Format -} - -func (h *Header) GetItems() *low.NodeReference[any] { - i := low.NodeReference[any]{ - KeyNode: h.Items.KeyNode, - ValueNode: h.Items.ValueNode, - Value: h.Items.Value, - } - return &i -} - -func (h *Header) GetCollectionFormat() *low.NodeReference[string] { - return &h.CollectionFormat -} - -func (h *Header) GetDefault() *low.NodeReference[*yaml.Node] { - return &h.Default -} - -func (h *Header) GetMaximum() *low.NodeReference[int] { - return &h.Maximum -} - -func (h *Header) GetExclusiveMaximum() *low.NodeReference[bool] { - return &h.ExclusiveMaximum -} - -func (h *Header) GetMinimum() *low.NodeReference[int] { - return &h.Minimum -} - -func (h *Header) GetExclusiveMinimum() *low.NodeReference[bool] { - return &h.ExclusiveMinimum -} - -func (h *Header) GetMaxLength() *low.NodeReference[int] { - return &h.MaxLength -} - -func (h *Header) GetMinLength() *low.NodeReference[int] { - return &h.MinLength -} - -func (h *Header) GetPattern() *low.NodeReference[string] { - return &h.Pattern -} - -func (h *Header) GetMaxItems() *low.NodeReference[int] { - return &h.MaxItems -} - -func (h *Header) GetMinItems() *low.NodeReference[int] { - return &h.MinItems -} - -func (h *Header) GetUniqueItems() *low.NodeReference[bool] { - return &h.UniqueItems -} - -func (h *Header) GetEnum() *low.NodeReference[[]low.ValueReference[*yaml.Node]] { - return &h.Enum -} - -func (h *Header) GetMultipleOf() *low.NodeReference[int] { - return &h.MultipleOf -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/items.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/items.go deleted file mode 100644 index 279fc856768..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/items.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "context" - "hash/maphash" - "sort" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// Items is a low-level representation of a Swagger / OpenAPI 2 Items object. -// -// Items is a limited subset of JSON-Schema's items object. It is used by parameter definitions that are not -// located in "body". Items, is actually identical to a Header, except it does not have description. -// - https://swagger.io/specification/v2/#itemsObject -type Items struct { - Type low.NodeReference[string] - Format low.NodeReference[string] - CollectionFormat low.NodeReference[string] - Items low.NodeReference[*Items] - Default low.NodeReference[*yaml.Node] - Maximum low.NodeReference[int] - ExclusiveMaximum low.NodeReference[bool] - Minimum low.NodeReference[int] - ExclusiveMinimum low.NodeReference[bool] - MaxLength low.NodeReference[int] - MinLength low.NodeReference[int] - Pattern low.NodeReference[string] - MaxItems low.NodeReference[int] - MinItems low.NodeReference[int] - UniqueItems low.NodeReference[bool] - Enum low.NodeReference[[]low.ValueReference[*yaml.Node]] - MultipleOf low.NodeReference[int] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] -} - -// FindExtension will attempt to locate an extension value using a name lookup. -func (i *Items) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, i.Extensions) -} - -// GetExtensions returns all Items extensions and satisfies the low.HasExtensions interface. -func (i *Items) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return i.Extensions -} - -// Hash will return a consistent Hash of the Items object -func (itm *Items) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if itm.Type.Value != "" { - h.WriteString(itm.Type.Value) - h.WriteByte(low.HASH_PIPE) - } - if itm.Format.Value != "" { - h.WriteString(itm.Format.Value) - h.WriteByte(low.HASH_PIPE) - } - if itm.CollectionFormat.Value != "" { - h.WriteString(itm.CollectionFormat.Value) - h.WriteByte(low.HASH_PIPE) - } - if itm.Default.Value != nil && !itm.Default.Value.IsZero() { - h.WriteString(low.GenerateHashString(itm.Default.Value)) - h.WriteByte(low.HASH_PIPE) - } - low.HashInt64(h, int64(itm.Maximum.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(itm.Minimum.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashBool(h, itm.ExclusiveMinimum.Value) - h.WriteByte(low.HASH_PIPE) - low.HashBool(h, itm.ExclusiveMaximum.Value) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(itm.MinLength.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(itm.MaxLength.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(itm.MinItems.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(itm.MaxItems.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(itm.MultipleOf.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashBool(h, itm.UniqueItems.Value) - h.WriteByte(low.HASH_PIPE) - if itm.Pattern.Value != "" { - h.WriteString(itm.Pattern.Value) - h.WriteByte(low.HASH_PIPE) - } - keys := make([]string, len(itm.Enum.Value)) - for k := range itm.Enum.Value { - keys[k] = low.ValueToString(itm.Enum.Value[k].Value) - } - sort.Strings(keys) - for _, key := range keys { - h.WriteString(key) - h.WriteByte(low.HASH_PIPE) - } - - if itm.Items.Value != nil { - h.WriteString(low.GenerateHashString(itm.Items.Value)) - h.WriteByte(low.HASH_PIPE) - } - for _, ext := range low.HashExtensions(itm.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} - -// Build will build out items and default value. -func (i *Items) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - root = utils.NodeAlias(root) - utils.CheckForMergeNodes(root) - i.Extensions = low.ExtractExtensions(root) - items, iErr := low.ExtractObject[*Items](ctx, ItemsLabel, root, idx) - if iErr != nil { - return iErr - } - i.Items = items - - _, ln, vn := utils.FindKeyNodeFull(DefaultLabel, root.Content) - if vn != nil { - i.Default = low.NodeReference[*yaml.Node]{ - Value: vn, - KeyNode: ln, - ValueNode: vn, - } - return nil - } - return nil -} - -// IsHeader compliance methods - -func (i *Items) GetType() *low.NodeReference[string] { - return &i.Type -} - -func (i *Items) GetFormat() *low.NodeReference[string] { - return &i.Format -} - -func (i *Items) GetItems() *low.NodeReference[any] { - k := low.NodeReference[any]{ - KeyNode: i.Items.KeyNode, - ValueNode: i.Items.ValueNode, - Value: i.Items.Value, - } - return &k -} - -func (i *Items) GetCollectionFormat() *low.NodeReference[string] { - return &i.CollectionFormat -} - -func (i *Items) GetDescription() *low.NodeReference[string] { - return nil // not implemented, but required to align with header contract -} - -func (i *Items) GetDefault() *low.NodeReference[*yaml.Node] { - return &i.Default -} - -func (i *Items) GetMaximum() *low.NodeReference[int] { - return &i.Maximum -} - -func (i *Items) GetExclusiveMaximum() *low.NodeReference[bool] { - return &i.ExclusiveMaximum -} - -func (i *Items) GetMinimum() *low.NodeReference[int] { - return &i.Minimum -} - -func (i *Items) GetExclusiveMinimum() *low.NodeReference[bool] { - return &i.ExclusiveMinimum -} - -func (i *Items) GetMaxLength() *low.NodeReference[int] { - return &i.MaxLength -} - -func (i *Items) GetMinLength() *low.NodeReference[int] { - return &i.MinLength -} - -func (i *Items) GetPattern() *low.NodeReference[string] { - return &i.Pattern -} - -func (i *Items) GetMaxItems() *low.NodeReference[int] { - return &i.MaxItems -} - -func (i *Items) GetMinItems() *low.NodeReference[int] { - return &i.MinItems -} - -func (i *Items) GetUniqueItems() *low.NodeReference[bool] { - return &i.UniqueItems -} - -func (i *Items) GetEnum() *low.NodeReference[[]low.ValueReference[*yaml.Node]] { - return &i.Enum -} - -func (i *Items) GetMultipleOf() *low.NodeReference[int] { - return &i.MultipleOf -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/operation.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/operation.go deleted file mode 100644 index 1dcfa481753..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/operation.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "context" - "hash/maphash" - "sort" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// Operation represents a low-level Swagger / OpenAPI 2 Operation object. -// -// It describes a single API operation on a path. -// - https://swagger.io/specification/v2/#operationObject -type Operation struct { - Tags low.NodeReference[[]low.ValueReference[string]] - Summary low.NodeReference[string] - Description low.NodeReference[string] - ExternalDocs low.NodeReference[*base.ExternalDoc] - OperationId low.NodeReference[string] - Consumes low.NodeReference[[]low.ValueReference[string]] - Produces low.NodeReference[[]low.ValueReference[string]] - Parameters low.NodeReference[[]low.ValueReference[*Parameter]] - Responses low.NodeReference[*Responses] - Schemes low.NodeReference[[]low.ValueReference[string]] - Deprecated low.NodeReference[bool] - Security low.NodeReference[[]low.ValueReference[*base.SecurityRequirement]] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] -} - -// Build will extract external docs, extensions, parameters, responses and security requirements. -func (o *Operation) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - root = utils.NodeAlias(root) - utils.CheckForMergeNodes(root) - o.Extensions = low.ExtractExtensions(root) - - // extract externalDocs - extDocs, dErr := low.ExtractObject[*base.ExternalDoc](ctx, base.ExternalDocsLabel, root, idx) - if dErr != nil { - return dErr - } - o.ExternalDocs = extDocs - - // extract parameters - params, ln, vn, pErr := low.ExtractArray[*Parameter](ctx, ParametersLabel, root, idx) - if pErr != nil { - return pErr - } - if params != nil { - o.Parameters = low.NodeReference[[]low.ValueReference[*Parameter]]{ - Value: params, - KeyNode: ln, - ValueNode: vn, - } - } - - // extract responses - respBody, respErr := low.ExtractObject[*Responses](ctx, ResponsesLabel, root, idx) - if respErr != nil { - return respErr - } - o.Responses = respBody - - // extract security - sec, sln, svn, sErr := low.ExtractArray[*base.SecurityRequirement](ctx, SecurityLabel, root, idx) - if sErr != nil { - return sErr - } - if sec != nil { - o.Security = low.NodeReference[[]low.ValueReference[*base.SecurityRequirement]]{ - Value: sec, - KeyNode: sln, - ValueNode: svn, - } - } - return nil -} - -// Hash will return a consistent Hash of the Operation object -func (o *Operation) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !o.Summary.IsEmpty() { - h.WriteString(o.Summary.Value) - h.WriteByte(low.HASH_PIPE) - } - if !o.Description.IsEmpty() { - h.WriteString(o.Description.Value) - h.WriteByte(low.HASH_PIPE) - } - if !o.OperationId.IsEmpty() { - h.WriteString(o.OperationId.Value) - h.WriteByte(low.HASH_PIPE) - } - if !o.ExternalDocs.IsEmpty() { - h.WriteString(low.GenerateHashString(o.ExternalDocs.Value)) - h.WriteByte(low.HASH_PIPE) - } - if !o.Responses.IsEmpty() { - h.WriteString(low.GenerateHashString(o.Responses.Value)) - h.WriteByte(low.HASH_PIPE) - } - if !o.Deprecated.IsEmpty() { - low.HashBool(h, o.Deprecated.Value) - h.WriteByte(low.HASH_PIPE) - } - var keys []string - keys = make([]string, len(o.Tags.Value)) - for k := range o.Tags.Value { - keys[k] = o.Tags.Value[k].Value - } - sort.Strings(keys) - for _, key := range keys { - h.WriteString(key) - h.WriteByte(low.HASH_PIPE) - } - - keys = make([]string, len(o.Consumes.Value)) - for k := range o.Consumes.Value { - keys[k] = o.Consumes.Value[k].Value - } - sort.Strings(keys) - for _, key := range keys { - h.WriteString(key) - h.WriteByte(low.HASH_PIPE) - } - - keys = make([]string, len(o.Produces.Value)) - for k := range o.Produces.Value { - keys[k] = o.Produces.Value[k].Value - } - sort.Strings(keys) - for _, key := range keys { - h.WriteString(key) - h.WriteByte(low.HASH_PIPE) - } - - keys = make([]string, len(o.Schemes.Value)) - for k := range o.Schemes.Value { - keys[k] = o.Schemes.Value[k].Value - } - sort.Strings(keys) - for _, key := range keys { - h.WriteString(key) - h.WriteByte(low.HASH_PIPE) - } - - keys = make([]string, len(o.Parameters.Value)) - for k := range o.Parameters.Value { - keys[k] = low.GenerateHashString(o.Parameters.Value[k].Value) - } - sort.Strings(keys) - for _, key := range keys { - h.WriteString(key) - h.WriteByte(low.HASH_PIPE) - } - - keys = make([]string, len(o.Security.Value)) - for k := range o.Security.Value { - keys[k] = low.GenerateHashString(o.Security.Value[k].Value) - } - sort.Strings(keys) - for _, key := range keys { - h.WriteString(key) - h.WriteByte(low.HASH_PIPE) - } - for _, ext := range low.HashExtensions(o.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} - -// methods to satisfy swagger operations interface - -func (o *Operation) GetTags() low.NodeReference[[]low.ValueReference[string]] { - return o.Tags -} - -func (o *Operation) GetSummary() low.NodeReference[string] { - return o.Summary -} - -func (o *Operation) GetDescription() low.NodeReference[string] { - return o.Description -} - -func (o *Operation) GetExternalDocs() low.NodeReference[any] { - return low.NodeReference[any]{ - ValueNode: o.ExternalDocs.ValueNode, - KeyNode: o.ExternalDocs.KeyNode, - Value: o.ExternalDocs.Value, - } -} - -func (o *Operation) GetOperationId() low.NodeReference[string] { - return o.OperationId -} - -func (o *Operation) GetDeprecated() low.NodeReference[bool] { - return o.Deprecated -} - -func (o *Operation) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return o.Extensions -} - -func (o *Operation) GetResponses() low.NodeReference[any] { - return low.NodeReference[any]{ - ValueNode: o.Responses.ValueNode, - KeyNode: o.Responses.KeyNode, - Value: o.Responses.Value, - } -} - -func (o *Operation) GetParameters() low.NodeReference[any] { - return low.NodeReference[any]{ - ValueNode: o.Parameters.ValueNode, - KeyNode: o.Parameters.KeyNode, - Value: o.Parameters.Value, - } -} - -func (o *Operation) GetSecurity() low.NodeReference[any] { - return low.NodeReference[any]{ - ValueNode: o.Security.ValueNode, - KeyNode: o.Security.KeyNode, - Value: o.Security.Value, - } -} - -func (o *Operation) GetSchemes() low.NodeReference[[]low.ValueReference[string]] { - return o.Schemes -} - -func (o *Operation) GetProduces() low.NodeReference[[]low.ValueReference[string]] { - return o.Produces -} - -func (o *Operation) GetConsumes() low.NodeReference[[]low.ValueReference[string]] { - return o.Consumes -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/parameter.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/parameter.go deleted file mode 100644 index e38aaf6c449..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/parameter.go +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "context" - "hash/maphash" - "sort" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// Parameter represents a low-level Swagger / OpenAPI 2 Parameter object. -// -// A unique parameter is defined by a combination of a name and location. -// -// There are five possible parameter types. -// -// Path -// -// Used together with Path Templating, where the parameter value is actually part of the operation's URL. -// This does not include the host or base path of the API. For example, in /items/{itemId}, the path parameter is itemId. -// -// Query -// -// Parameters that are appended to the URL. For example, in /items?id=###, the query parameter is id. -// -// Header -// -// Custom headers that are expected as part of the request. -// -// Body -// -// The payload that's appended to the HTTP request. Since there can only be one payload, there can only be one body parameter. -// The name of the body parameter has no effect on the parameter itself and is used for documentation purposes only. -// Since Form parameters are also in the payload, body and form parameters cannot exist together for the same operation. -// -// Form -// -// Used to describe the payload of an HTTP request when either application/x-www-form-urlencoded, multipart/form-data -// or both are used as the content type of the request (in Swagger's definition, the consumes property of an operation). -// This is the only parameter type that can be used to send files, thus supporting the file type. Since form parameters -// are sent in the payload, they cannot be declared together with a body parameter for the same operation. Form -// parameters have a different format based on the content-type used (for further details, -// consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4): -// application/x-www-form-urlencoded - Similar to the format of Query parameters but as a payload. For example, -// foo=1&bar=swagger - both foo and bar are form parameters. This is normally used for simple parameters that are -// being transferred. -// multipart/form-data - each parameter takes a section in the payload with an internal header. For example, for -// the header Content-Disposition: form-data; name="submit-name" the name of the parameter is -// submit-name. This type of form parameters is more commonly used for file transfers -// -// https://swagger.io/specification/v2/#parameterObject -type Parameter struct { - Name low.NodeReference[string] - In low.NodeReference[string] - Type low.NodeReference[string] - Format low.NodeReference[string] - Description low.NodeReference[string] - Required low.NodeReference[bool] - AllowEmptyValue low.NodeReference[bool] - Schema low.NodeReference[*base.SchemaProxy] - Items low.NodeReference[*Items] - CollectionFormat low.NodeReference[string] - Default low.NodeReference[*yaml.Node] - Maximum low.NodeReference[int] - ExclusiveMaximum low.NodeReference[bool] - Minimum low.NodeReference[int] - ExclusiveMinimum low.NodeReference[bool] - MaxLength low.NodeReference[int] - MinLength low.NodeReference[int] - Pattern low.NodeReference[string] - MaxItems low.NodeReference[int] - MinItems low.NodeReference[int] - UniqueItems low.NodeReference[bool] - Enum low.NodeReference[[]low.ValueReference[*yaml.Node]] - MultipleOf low.NodeReference[int] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] -} - -// FindExtension attempts to locate a extension value given a name. -func (p *Parameter) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, p.Extensions) -} - -// GetExtensions returns all Parameter extensions and satisfies the low.HasExtensions interface. -func (p *Parameter) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return p.Extensions -} - -// Build will extract out extensions, schema, items and default value -func (p *Parameter) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - root = utils.NodeAlias(root) - utils.CheckForMergeNodes(root) - p.Extensions = low.ExtractExtensions(root) - sch, sErr := base.ExtractSchema(ctx, root, idx) - if sErr != nil { - return sErr - } - if sch != nil { - p.Schema = *sch - } - items, iErr := low.ExtractObject[*Items](ctx, ItemsLabel, root, idx) - if iErr != nil { - return iErr - } - p.Items = items - - _, ln, vn := utils.FindKeyNodeFull(DefaultLabel, root.Content) - if vn != nil { - p.Default = low.NodeReference[*yaml.Node]{ - Value: vn, - KeyNode: ln, - ValueNode: vn, - } - return nil - } - return nil -} - -// Hash will return a consistent Hash of the Parameter object -func (p *Parameter) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if p.Name.Value != "" { - h.WriteString(p.Name.Value) - h.WriteByte(low.HASH_PIPE) - } - if p.In.Value != "" { - h.WriteString(p.In.Value) - h.WriteByte(low.HASH_PIPE) - } - if p.Type.Value != "" { - h.WriteString(p.Type.Value) - h.WriteByte(low.HASH_PIPE) - } - if p.Format.Value != "" { - h.WriteString(p.Format.Value) - h.WriteByte(low.HASH_PIPE) - } - if p.Description.Value != "" { - h.WriteString(p.Description.Value) - h.WriteByte(low.HASH_PIPE) - } - low.HashBool(h, p.Required.Value) - h.WriteByte(low.HASH_PIPE) - low.HashBool(h, p.AllowEmptyValue.Value) - h.WriteByte(low.HASH_PIPE) - if p.Schema.Value != nil { - h.WriteString(low.GenerateHashString(p.Schema.Value.Schema())) - h.WriteByte(low.HASH_PIPE) - } - if p.CollectionFormat.Value != "" { - h.WriteString(p.CollectionFormat.Value) - h.WriteByte(low.HASH_PIPE) - } - if p.Default.Value != nil && !p.Default.Value.IsZero() { - h.WriteString(low.GenerateHashString(p.Default.Value)) - h.WriteByte(low.HASH_PIPE) - } - low.HashInt64(h, int64(p.Maximum.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(p.Minimum.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashBool(h, p.ExclusiveMinimum.Value) - h.WriteByte(low.HASH_PIPE) - low.HashBool(h, p.ExclusiveMaximum.Value) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(p.MinLength.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(p.MaxLength.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(p.MinItems.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(p.MaxItems.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashInt64(h, int64(p.MultipleOf.Value)) - h.WriteByte(low.HASH_PIPE) - low.HashBool(h, p.UniqueItems.Value) - h.WriteByte(low.HASH_PIPE) - if p.Pattern.Value != "" { - h.WriteString(p.Pattern.Value) - h.WriteByte(low.HASH_PIPE) - } - - keys := make([]string, len(p.Enum.Value)) - for k := range p.Enum.Value { - keys[k] = low.ValueToString(p.Enum.Value[k].Value) - } - sort.Strings(keys) - for _, key := range keys { - h.WriteString(key) - h.WriteByte(low.HASH_PIPE) - } - - for _, ext := range low.HashExtensions(p.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - if p.Items.Value != nil { - low.HashUint64(h, p.Items.Value.Hash()) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} - -// Getters used by what-changed feature to satisfy the SwaggerParameter interface. - -func (p *Parameter) GetName() *low.NodeReference[string] { - return &p.Name -} - -func (p *Parameter) GetIn() *low.NodeReference[string] { - return &p.In -} - -func (p *Parameter) GetType() *low.NodeReference[string] { - return &p.Type -} - -func (p *Parameter) GetDescription() *low.NodeReference[string] { - return &p.Description -} - -func (p *Parameter) GetRequired() *low.NodeReference[bool] { - return &p.Required -} - -func (p *Parameter) GetAllowEmptyValue() *low.NodeReference[bool] { - return &p.AllowEmptyValue -} - -func (p *Parameter) GetSchema() *low.NodeReference[any] { - i := low.NodeReference[any]{ - KeyNode: p.Schema.KeyNode, - ValueNode: p.Schema.ValueNode, - Value: p.Schema.Value, - } - return &i -} - -func (p *Parameter) GetFormat() *low.NodeReference[string] { - return &p.Format -} - -func (p *Parameter) GetItems() *low.NodeReference[any] { - i := low.NodeReference[any]{ - KeyNode: p.Items.KeyNode, - ValueNode: p.Items.ValueNode, - Value: p.Items.Value, - } - return &i -} - -func (p *Parameter) GetCollectionFormat() *low.NodeReference[string] { - return &p.CollectionFormat -} - -func (p *Parameter) GetDefault() *low.NodeReference[*yaml.Node] { - return &p.Default -} - -func (p *Parameter) GetMaximum() *low.NodeReference[int] { - return &p.Maximum -} - -func (p *Parameter) GetExclusiveMaximum() *low.NodeReference[bool] { - return &p.ExclusiveMaximum -} - -func (p *Parameter) GetMinimum() *low.NodeReference[int] { - return &p.Minimum -} - -func (p *Parameter) GetExclusiveMinimum() *low.NodeReference[bool] { - return &p.ExclusiveMinimum -} - -func (p *Parameter) GetMaxLength() *low.NodeReference[int] { - return &p.MaxLength -} - -func (p *Parameter) GetMinLength() *low.NodeReference[int] { - return &p.MinLength -} - -func (p *Parameter) GetPattern() *low.NodeReference[string] { - return &p.Pattern -} - -func (p *Parameter) GetMaxItems() *low.NodeReference[int] { - return &p.MaxItems -} - -func (p *Parameter) GetMinItems() *low.NodeReference[int] { - return &p.MinItems -} - -func (p *Parameter) GetUniqueItems() *low.NodeReference[bool] { - return &p.UniqueItems -} - -func (p *Parameter) GetEnum() *low.NodeReference[[]low.ValueReference[*yaml.Node]] { - return &p.Enum -} - -func (p *Parameter) GetMultipleOf() *low.NodeReference[int] { - return &p.MultipleOf -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/path_item.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/path_item.go deleted file mode 100644 index 57e29ac0b4b..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/path_item.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "context" - "hash/maphash" - "sort" - "strings" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -var buildPathItemOperationModel = low.BuildModel - -// PathItem represents a low-level Swagger / OpenAPI 2 PathItem object. -// -// Describes the operations available on a single path. A Path Item may be empty, due to ACL constraints. -// The path itself is still exposed to the tooling, but will not know which operations and parameters -// are available. -// -// - https://swagger.io/specification/v2/#pathItemObject -type PathItem struct { - Ref low.NodeReference[string] - Get low.NodeReference[*Operation] - Put low.NodeReference[*Operation] - Post low.NodeReference[*Operation] - Delete low.NodeReference[*Operation] - Options low.NodeReference[*Operation] - Head low.NodeReference[*Operation] - Patch low.NodeReference[*Operation] - Parameters low.NodeReference[[]low.ValueReference[*Parameter]] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] -} - -// FindExtension will attempt to locate an extension given a name. -func (p *PathItem) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, p.Extensions) -} - -// GetExtensions returns all PathItem extensions and satisfies the low.HasExtensions interface. -func (p *PathItem) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return p.Extensions -} - -// Build will extract extensions, parameters and operations for all methods. Every method is handled -// asynchronously, in order to keep things moving quickly for complex operations. -func (p *PathItem) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - root = utils.NodeAlias(root) - utils.CheckForMergeNodes(root) - p.Extensions = low.ExtractExtensions(root) - skip := false - var currentNode *yaml.Node - - var ops []low.NodeReference[*Operation] - - // extract parameters - params, ln, vn, pErr := low.ExtractArray[*Parameter](ctx, ParametersLabel, root, idx) - if pErr != nil { - return pErr - } - if params != nil { - p.Parameters = low.NodeReference[[]low.ValueReference[*Parameter]]{ - Value: params, - KeyNode: ln, - ValueNode: vn, - } - } - - for i, pathNode := range root.Content { - if len(pathNode.Value) >= 2 && (pathNode.Value[0] == 'x' || pathNode.Value[0] == 'X') && pathNode.Value[1] == '-' { - skip = true - continue - } - // because (for some reason) the spec for swagger docs allows for a '$ref' property for path items. - // this is kinda nuts, because '$ref' is a reserved keyword for JSON references, which is ALSO used - // in swagger. Why this choice was made, I do not know. - if strings.Contains(strings.ToLower(pathNode.Value), "$ref") { - rn := root.Content[i+1] - p.Ref = low.NodeReference[string]{ - Value: rn.Value, - ValueNode: rn, - KeyNode: pathNode, - } - skip = true - continue - } - if skip { - skip = false - continue - } - if i%2 == 0 { - currentNode = pathNode - continue - } - - // the only thing we now care about is handling operations, filter out anything that's not a verb. - switch currentNode.Value { - case GetLabel: - case PostLabel: - case PutLabel: - case PatchLabel: - case DeleteLabel: - case HeadLabel: - case OptionsLabel: - default: - continue // ignore everything else. - } - - var op Operation - if err := buildPathItemOperationModel(pathNode, &op); err != nil { - return err - } - - opRef := low.NodeReference[*Operation]{ - Value: &op, - KeyNode: currentNode, - ValueNode: pathNode, - } - - ops = append(ops, opRef) - - switch currentNode.Value { - case GetLabel: - p.Get = opRef - case PostLabel: - p.Post = opRef - case PutLabel: - p.Put = opRef - case PatchLabel: - p.Patch = opRef - case DeleteLabel: - p.Delete = opRef - case HeadLabel: - p.Head = opRef - case OptionsLabel: - p.Options = opRef - } - } - - // all operations have been superficially built, - // now we need to build out the operation, we will do this asynchronously for speed. - opBuildChan := make(chan struct{}) - opErrorChan := make(chan error) - - buildOpFunc := func(op low.NodeReference[*Operation], ch chan<- struct{}, errCh chan<- error) { - er := op.Value.Build(ctx, op.KeyNode, op.ValueNode, idx) - if er != nil { - errCh <- er - } - ch <- struct{}{} - } - - if len(ops) <= 0 { - return nil // nothing to do. - } - - for _, op := range ops { - go buildOpFunc(op, opBuildChan, opErrorChan) - } - - n := 0 - total := len(ops) - for n < total { - select { - case buildError := <-opErrorChan: - return buildError - case <-opBuildChan: - n++ - } - } - - return nil -} - -// Hash will return a consistent Hash of the PathItem object -func (p *PathItem) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !p.Get.IsEmpty() { - h.WriteString(GetLabel) - h.WriteByte('-') - h.WriteString(low.GenerateHashString(p.Get.Value)) - h.WriteByte(low.HASH_PIPE) - } - if !p.Put.IsEmpty() { - h.WriteString(PutLabel) - h.WriteByte('-') - h.WriteString(low.GenerateHashString(p.Put.Value)) - h.WriteByte(low.HASH_PIPE) - } - if !p.Post.IsEmpty() { - h.WriteString(PostLabel) - h.WriteByte('-') - h.WriteString(low.GenerateHashString(p.Post.Value)) - h.WriteByte(low.HASH_PIPE) - } - if !p.Delete.IsEmpty() { - h.WriteString(DeleteLabel) - h.WriteByte('-') - h.WriteString(low.GenerateHashString(p.Delete.Value)) - h.WriteByte(low.HASH_PIPE) - } - if !p.Options.IsEmpty() { - h.WriteString(OptionsLabel) - h.WriteByte('-') - h.WriteString(low.GenerateHashString(p.Options.Value)) - h.WriteByte(low.HASH_PIPE) - } - if !p.Head.IsEmpty() { - h.WriteString(HeadLabel) - h.WriteByte('-') - h.WriteString(low.GenerateHashString(p.Head.Value)) - h.WriteByte(low.HASH_PIPE) - } - if !p.Patch.IsEmpty() { - h.WriteString(PatchLabel) - h.WriteByte('-') - h.WriteString(low.GenerateHashString(p.Patch.Value)) - h.WriteByte(low.HASH_PIPE) - } - keys := make([]string, len(p.Parameters.Value)) - for k := range p.Parameters.Value { - keys[k] = low.GenerateHashString(p.Parameters.Value[k].Value) - } - sort.Strings(keys) - for _, key := range keys { - h.WriteString(key) - h.WriteByte(low.HASH_PIPE) - } - for _, ext := range low.HashExtensions(p.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/paths.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/paths.go deleted file mode 100644 index b484635a89f..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/paths.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "context" - "hash/maphash" - "sync" - - "github.com/pb33f/libopenapi/datamodel" - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// Paths represents a low-level Swagger / OpenAPI Paths object. -type Paths struct { - PathItems *orderedmap.Map[low.KeyReference[string], low.ValueReference[*PathItem]] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] -} - -// GetExtensions returns all Paths extensions and satisfies the low.HasExtensions interface. -func (p *Paths) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return p.Extensions -} - -// FindPath attempts to locate a PathItem instance, given a path key. -func (p *Paths) FindPath(path string) (result *low.ValueReference[*PathItem]) { - for pair := orderedmap.First(p.PathItems); pair != nil; pair = pair.Next() { - if pair.Key().Value == path { - result = pair.ValuePtr() - break - } - } - return result -} - -// FindPathAndKey attempts to locate a PathItem instance, given a path key. -func (p *Paths) FindPathAndKey(path string) (key *low.KeyReference[string], value *low.ValueReference[*PathItem]) { - for pair := orderedmap.First(p.PathItems); pair != nil; pair = pair.Next() { - if pair.Key().Value == path { - key = pair.KeyPtr() - value = pair.ValuePtr() - break - } - } - return key, value -} - -// FindExtension will attempt to locate an extension value given a name. -func (p *Paths) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, p.Extensions) -} - -// Build will extract extensions and paths from node. -func (p *Paths) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - root = utils.NodeAlias(root) - utils.CheckForMergeNodes(root) - p.Extensions = low.ExtractExtensions(root) - - // Translate YAML nodes to pathsMap using `TranslatePipeline`. - type pathBuildResult struct { - key low.KeyReference[string] - value low.ValueReference[*PathItem] - } - type buildInput struct { - currentNode *yaml.Node - pathNode *yaml.Node - } - pathsMap := orderedmap.New[low.KeyReference[string], low.ValueReference[*PathItem]]() - in := make(chan buildInput) - out := make(chan pathBuildResult) - done := make(chan struct{}) - var wg sync.WaitGroup - wg.Add(2) // input and output goroutines. - - // TranslatePipeline input. - go func() { - defer func() { - close(in) - wg.Done() - }() - skip := false - var currentNode *yaml.Node - for i, pathNode := range root.Content { - if len(pathNode.Value) >= 2 && (pathNode.Value[0] == 'x' || pathNode.Value[0] == 'X') && pathNode.Value[1] == '-' { - skip = true - continue - } - if skip { - skip = false - continue - } - if i%2 == 0 { - currentNode = pathNode - continue - } - - select { - case in <- buildInput{ - currentNode: currentNode, - pathNode: pathNode, - }: - case <-done: - return - } - } - }() - - // TranslatePipeline output. - go func() { - for { - result, ok := <-out - if !ok { - break - } - pathsMap.Set(result.key, result.value) - } - close(done) - wg.Done() - }() - - translateFunc := func(value buildInput) (pathBuildResult, error) { - pNode := value.pathNode - cNode := value.currentNode - path := new(PathItem) - _ = low.BuildModel(pNode, path) - err := path.Build(ctx, cNode, pNode, idx) - if err != nil { - return pathBuildResult{}, err - } - return pathBuildResult{ - key: low.KeyReference[string]{ - Value: cNode.Value, - KeyNode: cNode, - }, - value: low.ValueReference[*PathItem]{ - Value: path, - ValueNode: pNode, - }, - }, nil - } - err := datamodel.TranslatePipeline[buildInput, pathBuildResult](in, out, translateFunc) - wg.Wait() - if err != nil { - return err - } - - p.PathItems = pathsMap - return nil -} - -// Hash will return a consistent Hash of the Paths object -func (p *Paths) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - for v := range orderedmap.SortAlpha(p.PathItems).ValuesFromOldest() { - h.WriteString(low.GenerateHashString(v.Value)) - h.WriteByte(low.HASH_PIPE) - } - for _, ext := range low.HashExtensions(p.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/response.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/response.go deleted file mode 100644 index a5bf46303ea..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/response.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// Response is a representation of a high-level Swagger / OpenAPI 2 Response object, backed by a low-level one. -// -// Response describes a single response from an API Operation -// - https://swagger.io/specification/v2/#responseObject -type Response struct { - Description low.NodeReference[string] - Schema low.NodeReference[*base.SchemaProxy] - Headers low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[*Header]]] - Examples low.NodeReference[*Examples] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] -} - -// FindExtension will attempt to locate an extension value given a key to lookup. -func (r *Response) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, r.Extensions) -} - -// GetExtensions returns all Response extensions and satisfies the low.HasExtensions interface. -func (r *Response) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return r.Extensions -} - -// FindHeader will attempt to locate a Header value, given a key -func (r *Response) FindHeader(hType string) *low.ValueReference[*Header] { - return low.FindItemInOrderedMap[*Header](hType, r.Headers.Value) -} - -// Build will extract schema, extensions, examples and headers from node -func (r *Response) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - root = utils.NodeAlias(root) - utils.CheckForMergeNodes(root) - r.Extensions = low.ExtractExtensions(root) - s, err := base.ExtractSchema(ctx, root, idx) - if err != nil { - return err - } - if s != nil { - r.Schema = *s - } - - // extract examples - examples, expErr := low.ExtractObject[*Examples](ctx, ExamplesLabel, root, idx) - if expErr != nil { - return expErr - } - r.Examples = examples - - // extract headers - headers, lN, kN, err := low.ExtractMap[*Header](ctx, HeadersLabel, root, idx) - if err != nil { - return err - } - if headers != nil { - r.Headers = low.NodeReference[*orderedmap.Map[low.KeyReference[string], low.ValueReference[*Header]]]{ - Value: headers, - KeyNode: lN, - ValueNode: kN, - } - } - return nil -} - -// Hash will return a consistent Hash of the Response object -func (r *Response) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if r.Description.Value != "" { - h.WriteString(r.Description.Value) - h.WriteByte(low.HASH_PIPE) - } - if !r.Schema.IsEmpty() { - h.WriteString(low.GenerateHashString(r.Schema.Value)) - h.WriteByte(low.HASH_PIPE) - } - if !r.Examples.IsEmpty() { - for v := range orderedmap.SortAlpha(r.Examples.Value.Values).ValuesFromOldest() { - h.WriteString(low.GenerateHashString(v.Value)) - h.WriteByte(low.HASH_PIPE) - } - } - for _, ext := range low.HashExtensions(r.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/responses.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/responses.go deleted file mode 100644 index f389e284a9c..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/responses.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "context" - "fmt" - "hash/maphash" - "strings" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// Responses is a low-level representation of a Swagger / OpenAPI 2 Responses object. -type Responses struct { - Codes *orderedmap.Map[low.KeyReference[string], low.ValueReference[*Response]] - Default low.NodeReference[*Response] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] -} - -// GetExtensions returns all Responses extensions and satisfies the low.HasExtensions interface. -func (r *Responses) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return r.Extensions -} - -// Build will extract default value and extensions from node. -func (r *Responses) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - root = utils.NodeAlias(root) - utils.CheckForMergeNodes(root) - r.Extensions = low.ExtractExtensions(root) - - if utils.IsNodeMap(root) { - codes, err := low.ExtractMapNoLookup[*Response](ctx, root, idx) - if err != nil { - return err - } - if codes != nil { - r.Codes = codes - } - def := r.getDefault() - if def != nil { - // default is bundled into codes, pull it out - r.Default = *def - // remove default from codes - r.deleteCode(DefaultLabel) - } - } else { - return fmt.Errorf("responses build failed: vn node is not a map! line %d, col %d", - root.Line, root.Column) - } - return nil -} - -func (r *Responses) getDefault() *low.NodeReference[*Response] { - for code, resp := range r.Codes.FromOldest() { - if strings.ToLower(code.Value) == DefaultLabel { - return &low.NodeReference[*Response]{ - ValueNode: resp.ValueNode, - KeyNode: code.KeyNode, - Value: resp.Value, - } - } - } - return nil -} - -// used to remove default from codes extracted by Build() -func (r *Responses) deleteCode(code string) { - var key *low.KeyReference[string] - if r.Codes != nil { - for pair := orderedmap.First(r.Codes); pair != nil; pair = pair.Next() { - if pair.Key().Value == code { - key = pair.KeyPtr() - break - } - } - } - // should never be nil, but, you never know... science and all that! - if key != nil { - r.Codes.Delete(*key) - } -} - -// FindResponseByCode will attempt to locate a Response instance using an HTTP response code string. -func (r *Responses) FindResponseByCode(code string) *low.ValueReference[*Response] { - return low.FindItemInOrderedMap[*Response](code, r.Codes) -} - -// Hash will return a consistent Hash of the Responses object -func (r *Responses) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - for _, hash := range low.AppendMapHashes(nil, orderedmap.SortAlpha(r.Codes)) { - h.WriteString(hash) - h.WriteByte(low.HASH_PIPE) - } - if !r.Default.IsEmpty() { - h.WriteString(low.GenerateHashString(r.Default.Value)) - h.WriteByte(low.HASH_PIPE) - } - for _, ext := range low.HashExtensions(r.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/scopes.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/scopes.go deleted file mode 100644 index 0b22ddea690..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/scopes.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "context" - "fmt" - "hash/maphash" - "strings" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// Scopes is a low-level representation of a Swagger / OpenAPI 2 OAuth2 Scopes object. -// -// Scopes lists the available scopes for an OAuth2 security scheme. -// - https://swagger.io/specification/v2/#scopesObject -type Scopes struct { - Values *orderedmap.Map[low.KeyReference[string], low.ValueReference[string]] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] -} - -// GetExtensions returns all Scopes extensions and satisfies the low.HasExtensions interface. -func (s *Scopes) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return s.Extensions -} - -// FindScope will attempt to locate a scope string using a key. -func (s *Scopes) FindScope(scope string) *low.ValueReference[string] { - return low.FindItemInOrderedMap[string](scope, s.Values) -} - -// Build will extract scope values and extensions from node. -func (s *Scopes) Build(_ context.Context, _, root *yaml.Node, _ *index.SpecIndex) error { - root = utils.NodeAlias(root) - utils.CheckForMergeNodes(root) - s.Extensions = low.ExtractExtensions(root) - valueMap := orderedmap.New[low.KeyReference[string], low.ValueReference[string]]() - if utils.IsNodeMap(root) { - for k := range root.Content { - if k%2 == 0 { - if strings.Contains(root.Content[k].Value, "x-") { - continue - } - valueMap.Set( - low.KeyReference[string]{ - Value: root.Content[k].Value, - KeyNode: root.Content[k], - }, - low.ValueReference[string]{ - Value: root.Content[k+1].Value, - ValueNode: root.Content[k+1], - }, - ) - } - } - s.Values = valueMap - } - return nil -} - -// Hash will return a consistent Hash of the Scopes object -func (s *Scopes) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - for k, v := range orderedmap.SortAlpha(s.Values).FromOldest() { - h.WriteString(fmt.Sprintf("%s-%s", k.Value, v.Value)) - h.WriteByte(low.HASH_PIPE) - } - for _, ext := range low.HashExtensions(s.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/security_scheme.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/security_scheme.go deleted file mode 100644 index 6e93e7d49a1..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/security_scheme.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package v2 - -import ( - "context" - "hash/maphash" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - "go.yaml.in/yaml/v4" -) - -// SecurityScheme is a low-level representation of a Swagger / OpenAPI 2 SecurityScheme object. -// -// SecurityScheme allows the definition of a security scheme that can be used by the operations. Supported schemes are -// basic authentication, an API key (either as a header or as a query parameter) and OAuth2's common flows -// (implicit, password, application and access code) -// - https://swagger.io/specification/v2/#securityDefinitionsObject -type SecurityScheme struct { - Type low.NodeReference[string] - Description low.NodeReference[string] - Name low.NodeReference[string] - In low.NodeReference[string] - Flow low.NodeReference[string] - AuthorizationUrl low.NodeReference[string] - TokenUrl low.NodeReference[string] - Scopes low.NodeReference[*Scopes] - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] -} - -// GetExtensions returns all SecurityScheme extensions and satisfies the low.HasExtensions interface. -func (ss *SecurityScheme) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return ss.Extensions -} - -// Build will extract extensions and scopes from the node. -func (ss *SecurityScheme) Build(ctx context.Context, _, root *yaml.Node, idx *index.SpecIndex) error { - root = utils.NodeAlias(root) - utils.CheckForMergeNodes(root) - ss.Extensions = low.ExtractExtensions(root) - - scopes, sErr := low.ExtractObject[*Scopes](ctx, ScopesLabel, root, idx) - if sErr != nil { - return sErr - } - ss.Scopes = scopes - return nil -} - -// Hash will return a consistent Hash of the SecurityScheme object -func (ss *SecurityScheme) Hash() uint64 { - return low.WithHasher(func(h *maphash.Hash) uint64 { - if !ss.Type.IsEmpty() { - h.WriteString(ss.Type.Value) - h.WriteByte(low.HASH_PIPE) - } - if !ss.Description.IsEmpty() { - h.WriteString(ss.Description.Value) - h.WriteByte(low.HASH_PIPE) - } - if !ss.Name.IsEmpty() { - h.WriteString(ss.Name.Value) - h.WriteByte(low.HASH_PIPE) - } - if !ss.In.IsEmpty() { - h.WriteString(ss.In.Value) - h.WriteByte(low.HASH_PIPE) - } - if !ss.Flow.IsEmpty() { - h.WriteString(ss.Flow.Value) - h.WriteByte(low.HASH_PIPE) - } - if !ss.AuthorizationUrl.IsEmpty() { - h.WriteString(ss.AuthorizationUrl.Value) - h.WriteByte(low.HASH_PIPE) - } - if !ss.TokenUrl.IsEmpty() { - h.WriteString(ss.TokenUrl.Value) - h.WriteByte(low.HASH_PIPE) - } - if !ss.Scopes.IsEmpty() { - h.WriteString(low.GenerateHashString(ss.Scopes.Value)) - h.WriteByte(low.HASH_PIPE) - } - for _, ext := range low.HashExtensions(ss.Extensions) { - h.WriteString(ext) - h.WriteByte(low.HASH_PIPE) - } - return h.Sum64() - }) -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/swagger.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/swagger.go deleted file mode 100644 index 835836ad28e..00000000000 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v2/swagger.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package v2 represents all Swagger / OpenAPI 2 low-level models. -// -// Low-level models are more difficult to navigate than higher-level models, however they are packed with all the -// raw AST and node data required to perform any kind of analysis on the underlying data. -// -// Every property is wrapped in a NodeReference or a KeyReference or a ValueReference. -// -// IMPORTANT: As a general rule, Swagger / OpenAPI 2 should be avoided for new projects. -package v2 - -import ( - "context" - "errors" - "path/filepath" - - "github.com/pb33f/libopenapi/datamodel" - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - "github.com/pb33f/libopenapi/index" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// processes a property of a Swagger document asynchronously using bool and error channels for signals. -type documentFunction func(ctx context.Context, root *yaml.Node, doc *Swagger, idx *index.SpecIndex, c chan<- struct{}, e chan<- error) - -// Swagger represents a high-level Swagger / OpenAPI 2 document. An instance of Swagger is the root of the specification. -type Swagger struct { - // Swagger is the version of Swagger / OpenAPI being used, extracted from the 'swagger: 2.x' definition. - Swagger low.ValueReference[string] - - // Info represents a specification Info definition. - // Provides metadata about the API. The metadata can be used by the clients if needed. - // - https://swagger.io/specification/v2/#infoObject - Info low.NodeReference[*base.Info] - - // Host is The host (name or ip) serving the API. This MUST be the host only and does not include the scheme nor - // sub-paths. It MAY include a port. If the host is not included, the host serving the documentation is to be used - // (including the port). The host does not support path templating. - Host low.NodeReference[string] - - // BasePath is The base path on which the API is served, which is relative to the host. If it is not included, - // the API is served directly under the host. The value MUST start with a leading slash (/). - // The basePath does not support path templating. - BasePath low.NodeReference[string] - - // Schemes represents the transfer protocol of the API. Requirements MUST be from the list: "http", "https", "ws", "wss". - // If the schemes is not included, the default scheme to be used is the one used to access - // the Swagger definition itself. - Schemes low.NodeReference[[]low.ValueReference[string]] - - // Consumes is a list of MIME types the APIs can consume. This is global to all APIs but can be overridden on - // specific API calls. Value MUST be as described under Mime Types. - Consumes low.NodeReference[[]low.ValueReference[string]] - - // Produces is a list of MIME types the APIs can produce. This is global to all APIs but can be overridden on - // specific API calls. Value MUST be as described under Mime Types. - Produces low.NodeReference[[]low.ValueReference[string]] - - // Paths are the paths and operations for the API. Perhaps the most important part of the specification. - // - https://swagger.io/specification/v2/#pathsObject - Paths low.NodeReference[*Paths] - - // Definitions is an object to hold data types produced and consumed by operations. It's composed of Schema instances - // - https://swagger.io/specification/v2/#definitionsObject - Definitions low.NodeReference[*Definitions] - - // SecurityDefinitions represents security scheme definitions that can be used across the specification. - // - https://swagger.io/specification/v2/#securityDefinitionsObject - SecurityDefinitions low.NodeReference[*SecurityDefinitions] - - // Parameters is an object to hold parameters that can be used across operations. - // This property does not define global parameters for all operations. - // - https://swagger.io/specification/v2/#parametersDefinitionsObject - Parameters low.NodeReference[*ParameterDefinitions] - - // Responses is an object to hold responses that can be used across operations. - // This property does not define global responses for all operations. - // - https://swagger.io/specification/v2/#responsesDefinitionsObject - Responses low.NodeReference[*ResponsesDefinitions] - - // Security is a declaration of which security schemes are applied for the API as a whole. The list of values - // describes alternative security schemes that can be used (that is, there is a logical OR between the security - // requirements). Individual operations can override this definition. - // - https://swagger.io/specification/v2/#securityRequirementObject - Security low.NodeReference[[]low.ValueReference[*base.SecurityRequirement]] - - // Tags are A list of tags used by the specification with additional metadata. - // The order of the tags can be used to reflect on their order by the parsing tools. Not all tags that are used - // by the Operation Object must be declared. The tags that are not declared may be organized randomly or based - // on the tools' logic. Each tag name in the list MUST be unique. - // - https://swagger.io/specification/v2/#tagObject - Tags low.NodeReference[[]low.ValueReference[*base.Tag]] - - // ExternalDocs is an instance of base.ExternalDoc for.. well, obvious really, innit mate? - ExternalDocs low.NodeReference[*base.ExternalDoc] - - // Extensions contains all custom extensions defined for the top-level document. - Extensions *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - - // Index is a reference to the index.SpecIndex that was created for the document and used - // as a guide when building out the Document. Ideal if further processing is required on the model and - // the original details are required to continue the work. - // - // This property is not a part of the OpenAPI schema, this is custom to libopenapi. - Index *index.SpecIndex - - // SpecInfo is a reference to the datamodel.SpecInfo instance created when the specification was read. - // - // This property is not a part of the OpenAPI schema, this is custom to libopenapi. - SpecInfo *datamodel.SpecInfo - - // Rolodex is a reference to the index.Rolodex instance created when the specification was read. - // The rolodex is used to look up references from file systems (local or remote) - Rolodex *index.Rolodex -} - -// FindExtension locates an extension from the root of the Swagger document. -func (s *Swagger) FindExtension(ext string) *low.ValueReference[*yaml.Node] { - return low.FindItemInOrderedMap(ext, s.Extensions) -} - -// GetExtensions returns all Swagger/Top level extensions and satisfies the low.HasExtensions interface. -func (s *Swagger) GetExtensions() *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] { - return s.Extensions -} - -// CreateDocumentFromConfig will create a new Swagger document from the provided SpecInfo and DocumentConfiguration. -func CreateDocumentFromConfig(info *datamodel.SpecInfo, - configuration *datamodel.DocumentConfiguration, -) (*Swagger, error) { - return createDocument(info, configuration) -} - -func createDocument(info *datamodel.SpecInfo, config *datamodel.DocumentConfiguration) (*Swagger, error) { - doc := Swagger{Swagger: low.ValueReference[string]{Value: info.Version, ValueNode: info.RootNode}} - doc.Extensions = low.ExtractExtensions(info.RootNode.Content[0]) - - // create an index config and shadow the document configuration. - idxConfig := index.CreateClosedAPIIndexConfig() - idxConfig.SpecInfo = info - idxConfig.IgnoreArrayCircularReferences = config.IgnoreArrayCircularReferences - idxConfig.IgnorePolymorphicCircularReferences = config.IgnorePolymorphicCircularReferences - idxConfig.AllowUnknownExtensionContentDetection = config.AllowUnknownExtensionContentDetection - idxConfig.SkipExternalRefResolution = config.SkipExternalRefResolution - idxConfig.AvoidCircularReferenceCheck = true - idxConfig.BaseURL = config.BaseURL - idxConfig.BasePath = config.BasePath - idxConfig.Logger = config.Logger - idxConfig.ExcludeExtensionRefs = config.ExcludeExtensionRefs - rolodex := index.NewRolodex(idxConfig) - rolodex.SetRootNode(info.RootNode) - doc.Rolodex = rolodex - - // If basePath is provided, add a local filesystem to the rolodex. - if idxConfig.BasePath != "" { - var cwd string - cwd, _ = filepath.Abs(config.BasePath) - // if a supplied local filesystem is provided, add it to the rolodex. - if config.LocalFS != nil { - var localFS index.RolodexFS - if fs, ok := config.LocalFS.(index.RolodexFS); ok { - localFS = fs - } else { - // wrap a plain fs.FS so it can be indexed. - localFSConf := index.LocalFSConfig{ - BaseDirectory: cwd, - IndexConfig: idxConfig, - FileFilters: config.FileFilter, - DirFS: config.LocalFS, - } - - localFS, _ = index.NewLocalFSWithConfig(&localFSConf) - idxConfig.AllowFileLookup = true - } - - rolodex.AddLocalFS(cwd, localFS) - } else { - - // create a local filesystem - localFSConf := index.LocalFSConfig{ - BaseDirectory: cwd, - IndexConfig: idxConfig, - FileFilters: config.FileFilter, - } - fileFS, _ := index.NewLocalFSWithConfig(&localFSConf) - idxConfig.AllowFileLookup = true - - // add the filesystem to the rolodex - rolodex.AddLocalFS(cwd, fileFS) - } - } - - // if base url is provided, add a remote filesystem to the rolodex. - if idxConfig.BaseURL != nil { - - // create a remote filesystem - remoteFS, _ := index.NewRemoteFSWithConfig(idxConfig) - if config.RemoteURLHandler != nil { - remoteFS.RemoteHandlerFunc = config.RemoteURLHandler - } - idxConfig.AllowRemoteLookup = true - - // add to the rolodex - rolodex.AddRemoteFS(config.BaseURL.String(), remoteFS) - - } - - doc.Rolodex = rolodex - - var errs []error - - // index all the things! - _ = rolodex.IndexTheRolodex(context.Background()) - - // check for circular references - if !config.SkipCircularReferenceCheck { - rolodex.CheckForCircularReferences() - } - - // extract errors - roloErrs := rolodex.GetCaughtErrors() - if roloErrs != nil { - errs = append(errs, roloErrs...) - } - - // set the index on the document. - doc.Index = rolodex.GetRootIndex() - doc.SpecInfo = info - - // build out swagger scalar variables. - _ = low.BuildModel(info.RootNode.Content[0], &doc) - - ctx := context.Background() - - // extract externalDocs - extDocs, err := low.ExtractObject[*base.ExternalDoc](ctx, base.ExternalDocsLabel, info.RootNode, rolodex.GetRootIndex()) - if err != nil { - errs = append(errs, err) - } - - doc.ExternalDocs = extDocs - - extractionFuncs := []documentFunction{ - extractInfo, - extractPaths, - extractDefinitions, - extractParamDefinitions, - extractResponsesDefinitions, - extractSecurityDefinitions, - extractTags, - extractSecurity, - } - doneChan := make(chan struct{}) - errChan := make(chan error) - for i := range extractionFuncs { - go extractionFuncs[i](ctx, info.RootNode.Content[0], &doc, rolodex.GetRootIndex(), doneChan, errChan) - } - completedExtractions := 0 - for completedExtractions < len(extractionFuncs) { - select { - case <-doneChan: - completedExtractions++ - case e := <-errChan: - completedExtractions++ - errs = append(errs, e) - } - } - - return &doc, errors.Join(errs...) -} - -func (s *Swagger) GetExternalDocs() *low.NodeReference[any] { - return &low.NodeReference[any]{ - KeyNode: s.ExternalDocs.KeyNode, - ValueNode: s.ExternalDocs.ValueNode, - Value: s.ExternalDocs.Value, - } -} - -func extractInfo(ctx context.Context, root *yaml.Node, doc *Swagger, idx *index.SpecIndex, c chan<- struct{}, e chan<- error) { - info, err := low.ExtractObject[*base.Info](ctx, base.InfoLabel, root, idx) - if err != nil { - e <- err - return - } - doc.Info = info - c <- struct{}{} -} - -func extractPaths(ctx context.Context, root *yaml.Node, doc *Swagger, idx *index.SpecIndex, c chan<- struct{}, e chan<- error) { - paths, err := low.ExtractObject[*Paths](ctx, PathsLabel, root, idx) - if err != nil { - e <- err - return - } - doc.Paths = paths - c <- struct{}{} -} - -func extractDefinitions(ctx context.Context, root *yaml.Node, doc *Swagger, idx *index.SpecIndex, c chan<- struct{}, e chan<- error) { - def, err := low.ExtractObject[*Definitions](ctx, DefinitionsLabel, root, idx) - if err != nil { - e <- err - return - } - doc.Definitions = def - c <- struct{}{} -} - -func extractParamDefinitions(ctx context.Context, root *yaml.Node, doc *Swagger, idx *index.SpecIndex, c chan<- struct{}, e chan<- error) { - param, err := low.ExtractObject[*ParameterDefinitions](ctx, ParametersLabel, root, idx) - if err != nil { - e <- err - return - } - doc.Parameters = param - c <- struct{}{} -} - -func extractResponsesDefinitions(ctx context.Context, root *yaml.Node, doc *Swagger, idx *index.SpecIndex, c chan<- struct{}, e chan<- error) { - resp, err := low.ExtractObject[*ResponsesDefinitions](ctx, ResponsesLabel, root, idx) - if err != nil { - e <- err - return - } - doc.Responses = resp - c <- struct{}{} -} - -func extractSecurityDefinitions(ctx context.Context, root *yaml.Node, doc *Swagger, idx *index.SpecIndex, c chan<- struct{}, e chan<- error) { - sec, err := low.ExtractObject[*SecurityDefinitions](ctx, SecurityDefinitionsLabel, root, idx) - if err != nil { - e <- err - return - } - doc.SecurityDefinitions = sec - c <- struct{}{} -} - -func extractTags(ctx context.Context, root *yaml.Node, doc *Swagger, idx *index.SpecIndex, c chan<- struct{}, e chan<- error) { - tags, ln, vn, err := low.ExtractArray[*base.Tag](ctx, base.TagsLabel, root, idx) - if err != nil { - e <- err - return - } - doc.Tags = low.NodeReference[[]low.ValueReference[*base.Tag]]{ - Value: tags, - KeyNode: ln, - ValueNode: vn, - } - c <- struct{}{} -} - -func extractSecurity(ctx context.Context, root *yaml.Node, doc *Swagger, idx *index.SpecIndex, c chan<- struct{}, e chan<- error) { - sec, ln, vn, err := low.ExtractArray[*base.SecurityRequirement](ctx, SecurityLabel, root, idx) - if err != nil { - e <- err - return - } - doc.Security = low.NodeReference[[]low.ValueReference[*base.SecurityRequirement]]{ - Value: sec, - KeyNode: ln, - ValueNode: vn, - } - c <- struct{}{} -} diff --git a/vendor/github.com/pb33f/libopenapi/datamodel/low/v3/create_document.go b/vendor/github.com/pb33f/libopenapi/datamodel/low/v3/create_document.go index 953252c9f60..2a51fe72f2c 100644 --- a/vendor/github.com/pb33f/libopenapi/datamodel/low/v3/create_document.go +++ b/vendor/github.com/pb33f/libopenapi/datamodel/low/v3/create_document.go @@ -50,6 +50,7 @@ func createDocument(info *datamodel.SpecInfo, config *datamodel.DocumentConfigur idxConfig.AllowUnknownExtensionContentDetection = config.AllowUnknownExtensionContentDetection idxConfig.TransformSiblingRefs = config.TransformSiblingRefs idxConfig.SkipExternalRefResolution = config.SkipExternalRefResolution + idxConfig.ResolveNestedRefsWithDocumentContext = config.ResolveNestedRefsWithDocumentContext idxConfig.AvoidCircularReferenceCheck = true // handle $self field for OpenAPI 3.2+ documents diff --git a/vendor/github.com/pb33f/libopenapi/document.go b/vendor/github.com/pb33f/libopenapi/document.go deleted file mode 100644 index 65b571a777c..00000000000 --- a/vendor/github.com/pb33f/libopenapi/document.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package libopenapi is a library containing tools for reading and in and manipulating Swagger (OpenAPI 2) and OpenAPI 3+ -// specifications into strongly typed documents. These documents have two APIs, a high level (porcelain) and a -// low level (plumbing). -// -// Every single type has a 'GoLow()' method that drops down from the high API to the low API. Once in the low API, -// the entire original document data is available, including all comments, line and column numbers for keys and values. -// -// There are two steps to creating a using Document. First, create a new Document using the NewDocument() method -// and pass in a specification []byte array that contains the OpenAPI Specification. It doesn't matter if YAML or JSON -// are used. -package libopenapi - -import ( - "errors" - "fmt" - - lowbase "github.com/pb33f/libopenapi/datamodel/low/base" - - "github.com/pb33f/libopenapi/index" - - "github.com/pb33f/libopenapi/datamodel" - v2high "github.com/pb33f/libopenapi/datamodel/high/v2" - v3high "github.com/pb33f/libopenapi/datamodel/high/v3" - v2low "github.com/pb33f/libopenapi/datamodel/low/v2" - v3low "github.com/pb33f/libopenapi/datamodel/low/v3" - "github.com/pb33f/libopenapi/utils" - what_changed "github.com/pb33f/libopenapi/what-changed" - "github.com/pb33f/libopenapi/what-changed/model" - "go.yaml.in/yaml/v4" -) - -// Document Represents an OpenAPI specification that can then be rendered into a model or serialized back into -// a string document after being manipulated. -type Document interface { - // GetVersion will return the exact version of the OpenAPI specification set for the document. - GetVersion() string - - // GetRolodex will return the Rolodex instance that was used to load the document. - GetRolodex() *index.Rolodex - - // GetSpecInfo will return the *datamodel.SpecInfo instance that contains all specification information. - GetSpecInfo() *datamodel.SpecInfo - - // SetConfiguration will set the configuration for the document. This allows for finer grained control over - // allowing remote or local references, as well as a BaseURL to allow for relative file references. - SetConfiguration(configuration *datamodel.DocumentConfiguration) - - // GetConfiguration will return the configuration for the document. This allows for finer grained control over - // allowing remote or local references, as well as a BaseURL to allow for relative file references. - GetConfiguration() *datamodel.DocumentConfiguration - - // BuildV2Model will build out a Swagger (version 2) model from the specification used to create the document - // If there are any issues, then no model will be returned, instead a slice of errors will explain all the - // problems that occurred. This method will only support version 2 specifications and will throw an error for - // any other types. - BuildV2Model() (*DocumentModel[v2high.Swagger], error) - - // BuildV3Model will build out an OpenAPI (version 3+) model from the specification used to create the document - // If there are any issues, then no model will be returned, instead a slice of errors will explain all the - // problems that occurred. This method will only support version 3 specifications and will throw an error for - // any other types. - BuildV3Model() (*DocumentModel[v3high.Document], error) - - // RenderAndReload will render the high level model as it currently exists (including any mutations, additions - // and removals to and from any object in the tree). It will then reload the low level model with the new bytes - // extracted from the model that was re-rendered. This is useful if you want to make changes to the high level model - // and then 'reload' the model into memory, so that line numbers and column numbers are correct and all update - // according to the changes made. - // - // The method returns the raw YAML bytes that were rendered, and any errors that occurred during rebuilding of the model. - // This is a destructive operation, and will re-build the entire model from scratch using the new bytes, so any - // references to the old model will be lost. The second return is the new Document that was created, and the third - // return is any errors hit trying to re-render. - // - // **IMPORTANT** This method only supports OpenAPI Documents. The Swagger model will not support mutations correctly - // and will not update when called. This choice has been made because we don't want to continue supporting Swagger, - // it's too old, so it should be motivation to upgrade to OpenAPI 3. - RenderAndReload() ([]byte, Document, *DocumentModel[v3high.Document], error) - - // Render will render the high level model as it currently exists (including any mutations, additions - // and removals to and from any object in the tree). Unlike RenderAndReload, Render will simply print the state - // of the model as it currently exists, and will not re-load the model into memory. It means that the low-level and - // the high-level models will be out of sync, and the index will only be useful for the original document. - // - // Why use this instead of RenderAndReload? - // - // The simple answer is that RenderAndReload is a destructive operation, and will re-build the entire model from - // scratch using the new bytes, which is desirable if you want to make changes to the high level model and then - // 'reload' the model into memory, so that line numbers and column numbers are correct and the index is accurate. - // However, if you don't care about the low-level model, and you're not using the index, and you just want to - // print the state of the model as it currently exists, then Render() is the method to use. - // **IMPORTANT** This method only supports OpenAPI Documents. - Render() ([]byte, error) - - // Serialize will re-render a Document back into a []byte slice. If any modifications have been made to the - // underlying data model using low level APIs, then those changes will be reflected in the serialized output. - // - // It's important to know that this should not be used if the resolver has been used on a specification to - // for anything other than checking for circular references. If the resolver is used to resolve the spec, then this - // method may spin out forever if the specification backing the model has circular references. - // Deprecated: This method is deprecated and will be removed in a future release. Use RenderAndReload() instead. - // This method does not support mutations correctly. - Serialize() ([]byte, error) - - // Release nils all internal state so that the YAML tree, SpecIndex, Rolodex, - // and model objects can be garbage-collected even if something still holds - // a reference to the Document interface value. - Release() -} - -type document struct { - rolodex *index.Rolodex - version string - info *datamodel.SpecInfo - config *datamodel.DocumentConfiguration - highOpenAPI3Model *DocumentModel[v3high.Document] - highSwaggerModel *DocumentModel[v2high.Swagger] -} - -// DocumentModel represents either a Swagger document (version 2) or an OpenAPI document (version 3) that is -// built from a parent Document. -type DocumentModel[T v2high.Swagger | v3high.Document] struct { - Model T - Index *index.SpecIndex // index created from the document. -} - -// NewDocument will create a new OpenAPI instance from an OpenAPI specification []byte array. If anything goes -// wrong when parsing, reading or processing the OpenAPI specification, there will be no document returned, instead -// a slice of errors will be returned that explain everything that failed. -// -// After creating a Document, the option to build a model becomes available, in either V2 or V3 flavors. The models -// are about 70% different between Swagger and OpenAPI 3, which is why two different models are available. -// -// This function will NOT automatically follow (meaning load) any file or remote references that are found. -// -// If this isn't the behavior you want, then you can use the NewDocumentWithConfiguration() function instead, which allows you to set a configuration that -// will allow you to control if file or remote references are allowed. In particular the `AllowFileReferences` and `FollowRemoteReferences` -// properties. -func NewDocument(specByteArray []byte) (Document, error) { - return NewDocumentWithTypeCheck(specByteArray, false) -} - -func NewDocumentWithTypeCheck(specByteArray []byte, bypassCheck bool) (Document, error) { - info, err := datamodel.ExtractSpecInfoWithDocumentCheck(specByteArray, bypassCheck) - if err != nil { - return nil, err - } - d := new(document) - d.version = info.Version - d.info = info - return d, nil -} - -// NewDocumentWithConfiguration is the same as NewDocument, except it's a convenience function that calls NewDocument -// under the hood and then calls SetConfiguration() on the returned Document. -func NewDocumentWithConfiguration(specByteArray []byte, configuration *datamodel.DocumentConfiguration) (Document, error) { - var info *datamodel.SpecInfo - var err error - - if configuration != nil { - info, err = datamodel.ExtractSpecInfoWithConfig(specByteArray, configuration) - } else { - info, err = datamodel.ExtractSpecInfoWithDocumentCheck(specByteArray, false) - } - if err != nil { - return nil, err - } - - d := new(document) - d.version = info.Version - d.info = info - d.config = configuration - return d, nil -} - -func (d *document) Release() { - if d == nil { - return - } - if d.info != nil { - d.info.Release() - d.info = nil - } - // This method intentionally does not call SpecIndex.Release(). Low-level - // model objects (Schema, PathItem, etc.) retain their own references to the - // SpecIndex and require its config and root node for hashing and comparison - // operations that may run after a Document is released. Callers that own the - // full lifecycle should call SpecIndex.Release() separately once all model - // consumers are finished. - d.rolodex = nil - d.config = nil - d.highOpenAPI3Model = nil - d.highSwaggerModel = nil -} - -func (d *document) GetRolodex() *index.Rolodex { - return d.rolodex -} - -func (d *document) GetVersion() string { - return d.version -} - -func (d *document) GetSpecInfo() *datamodel.SpecInfo { - return d.info -} - -func (d *document) GetConfiguration() *datamodel.DocumentConfiguration { - return d.config -} - -func (d *document) SetConfiguration(configuration *datamodel.DocumentConfiguration) { - d.config = configuration -} - -func (d *document) Serialize() ([]byte, error) { - if d.info == nil { - return nil, fmt.Errorf("unable to serialize, document has not yet been initialized") - } - if d.info.SpecFileType == datamodel.YAMLFileType { - return yaml.Marshal(d.info.RootNode) - } else { - yamlData, _ := yaml.Marshal(d.info.RootNode) - return utils.ConvertYAMLtoJSON(yamlData) - } -} - -func (d *document) RenderAndReload() ([]byte, Document, *DocumentModel[v3high.Document], error) { - newBytes, rerr := d.Render() - if rerr != nil { - return nil, nil, nil, rerr - } - - newDoc, err := NewDocumentWithConfiguration(newBytes, d.config) - if err != nil { - return nil, nil, nil, err - } - - // build the model. - m, buildErrs := newDoc.BuildV3Model() - if buildErrs != nil { - return newBytes, newDoc, m, buildErrs - } - // this document is now dead, long live the new document! - return newBytes, newDoc, m, nil -} - -func (d *document) Render() ([]byte, error) { - if d.highOpenAPI3Model == nil { - // check for Swagger model first, to give a more helpful error message. - if d.highSwaggerModel != nil { - return nil, errors.New("this method only supports OpenAPI 3 documents, not Swagger") - } - return nil, errors.New("unable to render, no openapi model has been built for the document") - } - if d.info == nil { - return nil, errors.New("unable to render, no specification has been loaded") - } - - var newBytes []byte - var jsonErr error - if d.info.SpecFileType == datamodel.JSONFileType { - jsonIndent := " " - i := d.info.OriginalIndentation - if i > 2 { - for l := 0; l < i-2; l++ { - jsonIndent += " " - } - } - newBytes, jsonErr = d.highOpenAPI3Model.Model.RenderJSON(jsonIndent) - } - if d.info.SpecFileType == datamodel.YAMLFileType { - newBytes = d.highOpenAPI3Model.Model.RenderWithIndention(d.info.OriginalIndentation) - } - return newBytes, jsonErr -} - -func (d *document) BuildV2Model() (*DocumentModel[v2high.Swagger], error) { - if d.highSwaggerModel != nil { - return d.highSwaggerModel, nil - } - var errs []error - if d.info == nil { - return nil, fmt.Errorf("unable to build swagger document, no specification has been loaded") - } - if d.info.SpecFormat != datamodel.OAS2 { - return nil, fmt.Errorf("unable to build swagger document, "+ - "supplied spec is a different version (%v). Try 'BuildV3Model()'", d.info.SpecFormat) - } - - var lowDoc *v2low.Swagger - if d.config == nil { - d.config = datamodel.NewDocumentConfiguration() - } - - var docErr error - lowDoc, docErr = v2low.CreateDocumentFromConfig(d.info, d.config) - d.rolodex = lowDoc.Rolodex - - if docErr != nil { - errs = append(errs, utils.UnwrapErrors(docErr)...) - } - - // Do not short-circuit on circular reference errors, so the client - // has the option of ignoring them. - for _, err := range errs { - var refErr *index.ResolvingError - if errors.As(err, &refErr) { - if refErr.CircularReference == nil { - return nil, errors.Join(errs...) - } - } - } - highDoc := v2high.NewSwaggerDocument(lowDoc) - - d.highSwaggerModel = &DocumentModel[v2high.Swagger]{ - Model: *highDoc, - Index: lowDoc.Index, - } - lowbase.SchemaQuickHashMap.Clear() - return d.highSwaggerModel, errors.Join(errs...) -} - -func (d *document) BuildV3Model() (*DocumentModel[v3high.Document], error) { - if d.highOpenAPI3Model != nil { - return d.highOpenAPI3Model, nil - } - var errs []error - if d.info == nil { - return nil, fmt.Errorf("unable to build document, no specification has been loaded") - } - if d.info.SpecFormat != datamodel.OAS3 && d.info.SpecFormat != datamodel.OAS31 && d.info.SpecFormat != datamodel.OAS32 { - return nil, fmt.Errorf("unable to build openapi document, "+ - "supplied spec is a different version (%v). Try 'BuildV2Model()'", d.info.SpecFormat) - } - - var lowDoc *v3low.Document - if d.config == nil { - d.config = datamodel.NewDocumentConfiguration() - } - - var docErr error - lowDoc, docErr = v3low.CreateDocumentFromConfig(d.info, d.config) - d.rolodex = lowDoc.Rolodex - - if docErr != nil { - errs = append(errs, utils.UnwrapErrors(docErr)...) - } - - // Do not short-circuit on circular reference errors, so the client - // has the option of ignoring them. - for _, err := range utils.UnwrapErrors(docErr) { - var refErr *index.ResolvingError - if errors.As(err, &refErr) { - if refErr.CircularReference == nil { - return nil, errors.Join(errs...) - } - } - } - - highDoc := v3high.NewDocument(lowDoc) - highDoc.Rolodex = lowDoc.Index.GetRolodex() - - d.highOpenAPI3Model = &DocumentModel[v3high.Document]{ - Model: *highDoc, - Index: lowDoc.Index, - } - lowbase.SchemaQuickHashMap.Clear() - return d.highOpenAPI3Model, errors.Join(errs...) -} - -// CompareDocuments will accept a left and right Document implementing struct, build a model for the correct -// version and then compare model documents for changes. -// -// If there are any errors when building the models, those errors are returned with a nil pointer for the -// model.DocumentChanges. If there are any changes found however between either Document, then a pointer to -// model.DocumentChanges is returned containing every single change, broken down, model by model. -func CompareDocuments(original, updated Document) (*model.DocumentChanges, error) { - var errs []error - if original.GetSpecInfo().SpecType == utils.OpenApi3 && updated.GetSpecInfo().SpecType == utils.OpenApi3 { - v3ModelLeft, oErrs := original.BuildV3Model() - if oErrs != nil { - errs = append(errs, oErrs) - } - v3ModelRight, uErrs := updated.BuildV3Model() - if uErrs != nil { - errs = append(errs, uErrs) - } - if v3ModelLeft != nil && v3ModelRight != nil { - return what_changed.CompareOpenAPIDocuments(v3ModelLeft.Model.GoLow(), v3ModelRight.Model.GoLow()), - errors.Join(errs...) - } else { - return nil, errors.Join(errs...) - } - } - if original.GetSpecInfo().SpecType == utils.OpenApi2 && updated.GetSpecInfo().SpecType == utils.OpenApi2 { - v2ModelLeft, oErrs := original.BuildV2Model() - if oErrs != nil { - errs = append(errs, oErrs) - } - v2ModelRight, uErrs := updated.BuildV2Model() - if uErrs != nil { - errs = append(errs, uErrs) - } - return what_changed.CompareSwaggerDocuments(v2ModelLeft.Model.GoLow(), v2ModelRight.Model.GoLow()), - errors.Join(errs...) - } - return nil, fmt.Errorf("unable to compare documents, one or both documents are not of the same version") -} diff --git a/vendor/github.com/pb33f/libopenapi/index/find_component.go b/vendor/github.com/pb33f/libopenapi/index/find_component.go index aa2a0a207d6..cd499414c05 100644 --- a/vendor/github.com/pb33f/libopenapi/index/find_component.go +++ b/vendor/github.com/pb33f/libopenapi/index/find_component.go @@ -26,6 +26,10 @@ func (index *SpecIndex) FindComponent(ctx context.Context, componentId string) * return nil } + if resolved := index.ResolveRefViaSchemaId(componentId); resolved != nil { + return resolved + } + if strings.HasPrefix(componentId, "/") { baseUri, fragment := SplitRefFragment(componentId) if resolved := index.resolveRefViaSchemaIdPath(baseUri); resolved != nil { diff --git a/vendor/github.com/pb33f/libopenapi/index/index_model.go b/vendor/github.com/pb33f/libopenapi/index/index_model.go index ca6e0137151..c346f5adaa4 100644 --- a/vendor/github.com/pb33f/libopenapi/index/index_model.go +++ b/vendor/github.com/pb33f/libopenapi/index/index_model.go @@ -273,6 +273,7 @@ func (s *SpecIndexConfig) ToDocumentConfiguration() *datamodel.DocumentConfigura AllowUnknownExtensionContentDetection: s.AllowUnknownExtensionContentDetection, TransformSiblingRefs: s.TransformSiblingRefs, MergeReferencedProperties: s.MergeReferencedProperties, + ResolveNestedRefsWithDocumentContext: s.ResolveNestedRefsWithDocumentContext, PropertyMergeStrategy: strategy, SkipExternalRefResolution: s.SkipExternalRefResolution, Logger: s.Logger, diff --git a/vendor/github.com/pb33f/libopenapi/index/resolve_reference_value.go b/vendor/github.com/pb33f/libopenapi/index/resolve_reference_value.go new file mode 100644 index 00000000000..b913560fb00 --- /dev/null +++ b/vendor/github.com/pb33f/libopenapi/index/resolve_reference_value.go @@ -0,0 +1,95 @@ +package index + +import ( + "net/url" + "strconv" + "strings" +) + +// ResolveReferenceValue resolves a reference string to a decoded value. +// +// Resolution order: +// 1. Resolve using SpecIndex when available. +// 2. Fallback to local JSON pointer resolution (e.g. "#/components/schemas/Foo") +// using getDocData when provided. +// +// Returns nil when the reference cannot be resolved. +func ResolveReferenceValue(ref string, specIndex *SpecIndex, getDocData func() map[string]interface{}) interface{} { + if ref == "" { + return nil + } + + if specIndex != nil { + if resolvedRef, _ := specIndex.SearchIndexForReference(ref); resolvedRef != nil && resolvedRef.Node != nil { + var decoded interface{} + if err := resolvedRef.Node.Decode(&decoded); err == nil { + return decoded + } + } + } + + // Fallback parser only supports local JSON pointers ("#" root or "#/..."). + if ref != "#" && !strings.HasPrefix(ref, "#/") { + return nil + } + + if getDocData == nil { + return nil + } + docData := getDocData() + if docData == nil { + return nil + } + + return resolveLocalJSONPointer(docData, ref) +} + +func resolveLocalJSONPointer(docData map[string]interface{}, ref string) interface{} { + if ref == "" { + return nil + } + if ref == "#" { + return docData + } + if !strings.HasPrefix(ref, "#/") { + return nil + } + + segments := strings.Split(ref[2:], "/") + var current interface{} = docData + + for _, rawSegment := range segments { + segment := decodeJSONPointerToken(rawSegment) + switch node := current.(type) { + case map[string]interface{}: + next, ok := node[segment] + if !ok { + return nil + } + current = next + case []interface{}: + idx, err := strconv.Atoi(segment) + if err != nil || idx < 0 || idx >= len(node) { + return nil + } + current = node[idx] + default: + return nil + } + } + + return current +} + +func decodeJSONPointerToken(token string) string { + if strings.Contains(token, "%") { + decoded, err := url.PathUnescape(token) + if err == nil { + token = decoded + } + } + if !strings.Contains(token, "~") { + return token + } + return strings.ReplaceAll(strings.ReplaceAll(token, "~1", "/"), "~0", "~") +} diff --git a/vendor/github.com/pb33f/libopenapi/index/resolve_refs_node.go b/vendor/github.com/pb33f/libopenapi/index/resolve_refs_node.go new file mode 100644 index 00000000000..eb0724160f2 --- /dev/null +++ b/vendor/github.com/pb33f/libopenapi/index/resolve_refs_node.go @@ -0,0 +1,158 @@ +package index + +import ( + "strings" + + "go.yaml.in/yaml/v4" +) + +// ResolveRefsInNode resolves local $ref values in a YAML node using the provided +// index. If a mapping contains sibling keys alongside $ref, sibling keys are +// preserved and merged into the resolved mapping (sibling values take precedence). +func ResolveRefsInNode(node *yaml.Node, idx *SpecIndex) *yaml.Node { + if node == nil || idx == nil { + return node + } + return resolveRefsInNode(node, idx, map[string]struct{}{}) +} + +func resolveRefsInNode(node *yaml.Node, idx *SpecIndex, seen map[string]struct{}) *yaml.Node { + if node == nil || idx == nil { + return node + } + + switch node.Kind { + case yaml.MappingNode: + return resolveRefsInMappingNode(node, idx, seen) + case yaml.SequenceNode: + clone := *node + clone.Content = make([]*yaml.Node, 0, len(node.Content)) + for _, item := range node.Content { + clone.Content = append(clone.Content, resolveRefsInNode(item, idx, seen)) + } + return &clone + default: + return node + } +} + +func resolveRefsInMappingNode(node *yaml.Node, idx *SpecIndex, seen map[string]struct{}) *yaml.Node { + ref, hasRef := findRefInMappingNode(node) + if !hasRef { + return cloneMappingNodeWithResolvedChildren(node, idx, seen) + } + + // This helper is intentionally local-only; keep external refs intact. + if !strings.HasPrefix(ref, "#/") { + return cloneMappingNodeWithResolvedChildren(node, idx, seen) + } + + if _, exists := seen[ref]; exists { + return cloneMappingNodeWithResolvedChildren(node, idx, seen) + } + + seen[ref] = struct{}{} + var resolved *yaml.Node + if resolvedRef, _ := idx.SearchIndexForReference(ref); resolvedRef != nil && resolvedRef.Node != nil { + resolved = resolveRefsInNode(resolvedRef.Node, idx, seen) + } + delete(seen, ref) + + if resolved == nil { + return cloneMappingNodeWithResolvedChildren(node, idx, seen) + } + + if !hasNonRefSiblings(node) { + return resolved + } + + siblings := extractResolvedSiblingPairs(node, idx, seen) + if resolved.Kind == yaml.MappingNode { + return mergeResolvedMappingWithSiblings(resolved, siblings) + } + if resolved.Kind == yaml.DocumentNode && len(resolved.Content) > 0 && resolved.Content[0] != nil && resolved.Content[0].Kind == yaml.MappingNode { + docClone := *resolved + docClone.Content = append([]*yaml.Node(nil), resolved.Content...) + docClone.Content[0] = mergeResolvedMappingWithSiblings(resolved.Content[0], siblings) + return &docClone + } + + // Fallback: keep original mapping (with $ref) but still resolve sibling values. + return cloneMappingNodeWithResolvedChildren(node, idx, seen) +} + +func hasNonRefSiblings(node *yaml.Node) bool { + for i := 0; i+1 < len(node.Content); i += 2 { + key := node.Content[i] + if key != nil && key.Value != "$ref" { + return true + } + } + return false +} + +func findRefInMappingNode(node *yaml.Node) (string, bool) { + for i := 0; i+1 < len(node.Content); i += 2 { + key := node.Content[i] + val := node.Content[i+1] + if key != nil && key.Value == "$ref" && val != nil && val.Kind == yaml.ScalarNode { + return val.Value, true + } + } + return "", false +} + +func extractResolvedSiblingPairs(node *yaml.Node, idx *SpecIndex, seen map[string]struct{}) []*yaml.Node { + out := make([]*yaml.Node, 0, len(node.Content)) + for i := 0; i+1 < len(node.Content); i += 2 { + key := node.Content[i] + val := node.Content[i+1] + if key != nil && key.Value == "$ref" { + continue + } + out = append(out, key, resolveRefsInNode(val, idx, seen)) + } + return out +} + +func cloneMappingNodeWithResolvedChildren(node *yaml.Node, idx *SpecIndex, seen map[string]struct{}) *yaml.Node { + clone := *node + clone.Content = make([]*yaml.Node, 0, len(node.Content)) + for i := 0; i+1 < len(node.Content); i += 2 { + key := node.Content[i] + val := node.Content[i+1] + clone.Content = append(clone.Content, key, resolveRefsInNode(val, idx, seen)) + } + return &clone +} + +func mergeResolvedMappingWithSiblings(resolved *yaml.Node, siblings []*yaml.Node) *yaml.Node { + merged := *resolved + merged.Content = make([]*yaml.Node, 0, len(resolved.Content)+len(siblings)) + + keyPos := make(map[string]int, len(resolved.Content)/2+len(siblings)/2) + for i := 0; i+1 < len(resolved.Content); i += 2 { + key := resolved.Content[i] + val := resolved.Content[i+1] + merged.Content = append(merged.Content, key, val) + if key != nil { + keyPos[key.Value] = len(merged.Content) - 2 + } + } + + for i := 0; i+1 < len(siblings); i += 2 { + key := siblings[i] + val := siblings[i+1] + if key == nil { + continue + } + if pos, ok := keyPos[key.Value]; ok { + merged.Content[pos+1] = val + continue + } + merged.Content = append(merged.Content, key, val) + keyPos[key.Value] = len(merged.Content) - 2 + } + + return &merged +} diff --git a/vendor/github.com/pb33f/libopenapi/index/schema_id_resolve.go b/vendor/github.com/pb33f/libopenapi/index/schema_id_resolve.go index 1dd948c304b..1b1328f2ccb 100644 --- a/vendor/github.com/pb33f/libopenapi/index/schema_id_resolve.go +++ b/vendor/github.com/pb33f/libopenapi/index/schema_id_resolve.go @@ -128,6 +128,62 @@ func SplitRefFragment(ref string) (baseUri string, fragment string) { return ref[:idx], ref[idx:] } +func joinSchemaIdDefinitionPath(definitionPath, fragment string) string { + if definitionPath == "" { + return "" + } + normalizedFragment := strings.TrimPrefix(fragment, "#") + if normalizedFragment == "" || normalizedFragment == "/" { + return definitionPath + } + if definitionPath == "#" { + return "#" + normalizedFragment + } + return strings.TrimRight(definitionPath, "/") + normalizedFragment +} + +func buildSchemaIdResolvedReference(index *SpecIndex, entry *SchemaIdEntry, originalRef, baseUri, fragment string) *Reference { + if entry == nil { + return nil + } + + node := entry.SchemaNode + if fragment != "" && entry.SchemaNode != nil { + if fragmentNode := navigateToFragment(entry.SchemaNode, fragment); fragmentNode != nil { + node = fragmentNode + } + } + + definition := originalRef + fullDefinition := originalRef + if entry.DefinitionPath != "" { + definition = joinSchemaIdDefinitionPath(entry.DefinitionPath, fragment) + fullDefinition = definition + if entry.Index != nil { + if specPath := entry.Index.GetSpecAbsolutePath(); specPath != "" { + fullDefinition = specPath + definition + } + } + } + + remoteLocation := "" + if entry.Index != nil { + remoteLocation = entry.Index.GetSpecAbsolutePath() + } + + return &Reference{ + FullDefinition: fullDefinition, + Definition: definition, + Name: baseUri, + RawRef: originalRef, + SchemaIdBase: baseUri, + Node: node, + IsRemote: entry.Index != index, + RemoteLocation: remoteLocation, + Index: entry.Index, + } +} + // ResolveRefViaSchemaId attempts to resolve a $ref via the $id registry. // Implements JSON Schema 2020-12 $id-based resolution: // 1. Split ref into base URI and fragment @@ -156,26 +212,7 @@ func (index *SpecIndex) ResolveRefViaSchemaId(ref string) *Reference { return nil } - r := &Reference{ - FullDefinition: ref, - Definition: ref, - Name: baseUri, - RawRef: ref, - SchemaIdBase: baseUri, - Node: entry.SchemaNode, - IsRemote: entry.Index != index, - RemoteLocation: entry.Index.GetSpecAbsolutePath(), - Index: entry.Index, - } - - // Navigate to fragment if present - if fragment != "" && entry.SchemaNode != nil { - if fragmentNode := navigateToFragment(entry.SchemaNode, fragment); fragmentNode != nil { - r.Node = fragmentNode - } - } - - return r + return buildSchemaIdResolvedReference(index, entry, ref, baseUri, fragment) } func (index *SpecIndex) resolveRefViaSchemaIdPath(path string) *Reference { @@ -212,17 +249,7 @@ func (index *SpecIndex) resolveRefViaSchemaIdPath(path string) *Reference { } baseUri := match.GetKey() - return &Reference{ - FullDefinition: baseUri, - Definition: baseUri, - Name: baseUri, - RawRef: path, - SchemaIdBase: baseUri, - Node: match.SchemaNode, - IsRemote: match.Index != index, - RemoteLocation: match.Index.GetSpecAbsolutePath(), - Index: match.Index, - } + return buildSchemaIdResolvedReference(index, match, path, baseUri, "") } // navigateToFragment navigates to a JSON pointer fragment within a YAML node. diff --git a/vendor/github.com/pb33f/libopenapi/index/search_index.go b/vendor/github.com/pb33f/libopenapi/index/search_index.go index 6362177fc60..5d70dd3f11d 100644 --- a/vendor/github.com/pb33f/libopenapi/index/search_index.go +++ b/vendor/github.com/pb33f/libopenapi/index/search_index.go @@ -356,17 +356,56 @@ func (index *SpecIndex) SearchIndexForReferenceByReferenceWithContext(ctx contex } } - if index.logger != nil { - // this is a last ditch effort. if this fails, all hope is lost. - if index.GetRolodex() != nil { - for _, i := range index.GetRolodex().GetIndexes() { - v := i.FindComponent(ctx, ref) + // last ditch effort: search all rolodex indexes and root index. + // this is decoupled from the logger guard so search works even without a logger. + if rolo := index.GetRolodex(); rolo != nil { + for _, i := range rolo.GetIndexes() { + v := i.FindComponent(ctx, ref) + if v != nil { + return v, v.Index, ctx + } + } + + // also try the root index, which is not included in GetIndexes(). + // this handles the case where an external file contains a local #/ ref + // (e.g., #/components/schemas/Workspace) that the resolver expanded into + // an absolute path form (e.g., /path/to/file.yaml#/components/schemas/Workspace). + // the component actually lives in the root document, not in the external file. + if rootIdx := rolo.GetRootIndex(); rootIdx != nil && rootIdx != index { + v := rootIdx.FindComponent(ctx, ref) + if v != nil { + return v, v.Index, ctx + } + // if the ref contains a file path + fragment, extract the fragment + // and try it against the root index directly. This resolves cases where + // #/components/schemas/Name was expanded to /abs/path/file.yaml#/components/schemas/Name + // but the schema actually lives in the root document. + if parts := strings.SplitN(ref, "#/", 2); len(parts) == 2 && parts[0] != "" { + fragmentRef := fmt.Sprintf("#/%s", parts[1]) + v = rootIdx.FindComponent(ctx, fragmentRef) if v != nil { return v, v.Index, ctx } } } - index.logger.Error("unable to locate reference anywhere in the rolodex", "reference", ref) + } + + if index.logger != nil { + rolodexIndexCount := -1 + rootIndexPath := "" + if rolo := index.GetRolodex(); rolo != nil { + rolodexIndexCount = len(rolo.GetIndexes()) + if ri := rolo.GetRootIndex(); ri != nil { + rootIndexPath = ri.GetSpecAbsolutePath() + } + } + index.logger.Error("unable to locate reference anywhere in the rolodex", + "reference", ref, + "indexPath", index.specAbsolutePath, + "hasRolodex", index.GetRolodex() != nil, + "rolodexIndexCount", rolodexIndexCount, + "rootIndexPath", rootIndexPath, + ) } return nil, index, ctx } diff --git a/vendor/github.com/pb33f/libopenapi/libopenapi-logo.png b/vendor/github.com/pb33f/libopenapi/libopenapi-logo.png deleted file mode 100644 index 78bf7e5fc62..00000000000 Binary files a/vendor/github.com/pb33f/libopenapi/libopenapi-logo.png and /dev/null differ diff --git a/vendor/github.com/pb33f/libopenapi/overlay.go b/vendor/github.com/pb33f/libopenapi/overlay.go deleted file mode 100644 index 2f475f0bd7a..00000000000 --- a/vendor/github.com/pb33f/libopenapi/overlay.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2022-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package libopenapi - -import ( - gocontext "context" - - "github.com/pb33f/libopenapi/datamodel" - highoverlay "github.com/pb33f/libopenapi/datamodel/high/overlay" - "github.com/pb33f/libopenapi/datamodel/low" - lowoverlay "github.com/pb33f/libopenapi/datamodel/low/overlay" - "github.com/pb33f/libopenapi/overlay" - "go.yaml.in/yaml/v4" -) - -// OverlayResult contains the result of applying an overlay to a target document. -type OverlayResult struct { - // Bytes raw YAML bytes of the modified document after the overlay has been applied. - Bytes []byte - - // OverlayDocument is the modified document, ready to have a model built from it. - // The document is created using the same configuration as the input document - // (for ApplyOverlay and ApplyOverlayFromBytes), or with a default configuration - // (for ApplyOverlayToSpecBytes and ApplyOverlayFromBytesToSpecBytes). - OverlayDocument Document - - // Warnings that occurred during overlay application. - Warnings []*overlay.Warning -} - -// NewOverlayDocument creates a new overlay document from the provided bytes. -// The overlay document can then be applied to a target OpenAPI document using ApplyOverlay. -func NewOverlayDocument(overlayBytes []byte) (*highoverlay.Overlay, error) { - var node yaml.Node - if err := yaml.Unmarshal(overlayBytes, &node); err != nil { - return nil, err - } - - if len(node.Content) == 0 { - return nil, overlay.ErrInvalidOverlay - } - - var lowOv lowoverlay.Overlay - if err := low.BuildModel(node.Content[0], &lowOv); err != nil { - return nil, err - } - - if err := lowOv.Build(gocontext.Background(), nil, node.Content[0], nil); err != nil { - return nil, err - } - - return highoverlay.NewOverlay(&lowOv), nil -} - -// ApplyOverlay applies the overlay to the target document and returns the modified document. -// This is the primary entry point for an overlay application when working with Document objects. -// -// The returned OverlayDocument uses the same configuration as the input document. -func ApplyOverlay(document Document, ov *highoverlay.Overlay) (*OverlayResult, error) { - specBytes := document.GetSpecInfo().SpecBytes - if specBytes == nil { - return nil, overlay.ErrNoTargetDocument - } - - result, err := overlay.Apply(*specBytes, ov) - if err != nil { - return nil, err - } - - newDoc, err := NewDocumentWithConfiguration(result.Bytes, document.GetConfiguration()) - if err != nil { - return nil, err - } - - return &OverlayResult{ - Bytes: result.Bytes, - OverlayDocument: newDoc, - Warnings: result.Warnings, - }, nil -} - -// ApplyOverlayFromBytes applies an overlay (provided as bytes) to the target document. -// This is a convenience function when you have a Document but the overlay as raw bytes. -// -// The returned OverlayDocument uses the same configuration as the input document. -func ApplyOverlayFromBytes(document Document, overlayBytes []byte) (*OverlayResult, error) { - ov, err := NewOverlayDocument(overlayBytes) - if err != nil { - return nil, err - } - return ApplyOverlay(document, ov) -} - -// ApplyOverlayToSpecBytes applies the overlay to the target document bytes. -// Use this when you have raw spec bytes and a parsed Overlay object. -// -// The returned OverlayDocument uses a default document configuration. -func ApplyOverlayToSpecBytes(docBytes []byte, ov *highoverlay.Overlay) (*OverlayResult, error) { - return applyOverlayToBytesWithConfig(docBytes, ov, nil) -} - -// ApplyOverlayFromBytesToSpecBytes applies an overlay to target document bytes, -// where both the overlay and target document are provided as raw bytes. -// This is the most convenient function when you don't need to configure either document. -// -// The returned OverlayDocument uses a default document configuration. -func ApplyOverlayFromBytesToSpecBytes(docBytes, overlayBytes []byte) (*OverlayResult, error) { - ov, err := NewOverlayDocument(overlayBytes) - if err != nil { - return nil, err - } - return applyOverlayToBytesWithConfig(docBytes, ov, nil) -} - -// applyOverlayToBytesWithConfig is the internal function that applies the overlay to bytes -// and creates a Document with the specified configuration (nil for default). -func applyOverlayToBytesWithConfig(targetBytes []byte, ov *highoverlay.Overlay, config *datamodel.DocumentConfiguration) (*OverlayResult, error) { - result, err := overlay.Apply(targetBytes, ov) - if err != nil { - return nil, err - } - - newDoc, err := NewDocumentWithConfiguration(result.Bytes, config) - if err != nil { - return nil, err - } - - return &OverlayResult{ - Bytes: result.Bytes, - OverlayDocument: newDoc, - Warnings: result.Warnings, - }, nil -} diff --git a/vendor/github.com/pb33f/libopenapi/overlay/engine.go b/vendor/github.com/pb33f/libopenapi/overlay/engine.go deleted file mode 100644 index 091c7f607b9..00000000000 --- a/vendor/github.com/pb33f/libopenapi/overlay/engine.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2022-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package overlay - -import ( - "github.com/pb33f/jsonpath/pkg/jsonpath" - "github.com/pb33f/jsonpath/pkg/jsonpath/config" - highoverlay "github.com/pb33f/libopenapi/datamodel/high/overlay" - "go.yaml.in/yaml/v4" -) - -// Apply applies the given overlay to the target document bytes. -// It returns the modified document bytes and any warnings encountered. -func Apply(targetBytes []byte, overlay *highoverlay.Overlay) (*Result, error) { - if overlay == nil { - return nil, ErrInvalidOverlay - } - - if err := validateOverlay(overlay); err != nil { - return nil, err - } - - var rootNode yaml.Node - if err := yaml.Unmarshal(targetBytes, &rootNode); err != nil { - return nil, err - } - - // Parent index is built lazily and rebuilt after updates/copies to ensure - // remove actions can target nodes created by earlier update/copy actions. - var parentIdx parentIndex - parentIdxStale := true - - var warnings []*Warning - for _, action := range overlay.Actions { - if action.Remove && parentIdxStale { - parentIdx = newParentIndex(&rootNode) - parentIdxStale = false - } - - actionWarnings, err := applyAction(&rootNode, action, parentIdx) - if err != nil { - return nil, &OverlayError{Action: action, Cause: err} - } - warnings = append(warnings, actionWarnings...) - - // Mark parent index as stale after update or copy operations - // (both can add new nodes that subsequent remove actions may target) - if action.Update != nil || action.Copy != "" { - parentIdxStale = true - } - } - - resultBytes, err := yaml.Marshal(&rootNode) - if err != nil { - return nil, err - } - - return &Result{ - Bytes: resultBytes, - Warnings: warnings, - }, nil -} - -func applyAction(root *yaml.Node, action *highoverlay.Action, parentIdx parentIndex) ([]*Warning, error) { - var warnings []*Warning - - if action.Target == "" { - return warnings, nil - } - - path, err := jsonpath.NewPath(action.Target, config.WithPropertyNameExtension(), config.WithLazyContextTracking()) - if err != nil { - return nil, ErrInvalidJSONPath - } - - nodes := path.Query(root) - - if len(nodes) == 0 { - warnings = append(warnings, &Warning{ - Action: action, - Target: action.Target, - Message: "target matched zero nodes", - }) - return warnings, nil - } - - // Operation order per spec: copy → update → remove - // This allows: - // - Copy to populate the target first - // - Update to override copied values - // - Remove to clean up afterwards (move pattern) - - // 1. Copy (if present) - if action.Copy != "" { - copyWarnings, err := applyCopyAction(root, nodes, action.Copy) - if err != nil { - return nil, err - } - warnings = append(warnings, copyWarnings...) - } - - // 2. Update (if present) - // Validate targets for UPDATE actions (must be objects or arrays, not primitives). - // Validation happens AFTER copy because copy may change the target node type. - // REMOVE actions can target any node type. - if action.Update != nil { - for _, node := range nodes { - if err := validateTarget(node); err != nil { - return nil, err - } - } - applyUpdateAction(nodes, action.Update) - } - - // 3. Remove (if present) - if action.Remove { - applyRemoveAction(parentIdx, nodes) - } - - return warnings, nil -} - -func applyCopyAction(root *yaml.Node, targetNodes []*yaml.Node, copyPath string) ([]*Warning, error) { - var warnings []*Warning - - path, err := jsonpath.NewPath(copyPath, config.WithPropertyNameExtension(), config.WithLazyContextTracking()) - if err != nil { - return nil, ErrInvalidJSONPath - } - - sourceNodes := path.Query(root) - - // Single-node constraint per spec: copy source must select exactly one node - if len(sourceNodes) == 0 { - return nil, ErrCopySourceNotFound - } - if len(sourceNodes) > 1 { - return nil, ErrCopySourceMultiple - } - - sourceNode := sourceNodes[0] - - // Type compatibility check per spec: "If the target expression and - // copy expression do not return the same type, an error MUST be reported" - for _, targetNode := range targetNodes { - if sourceNode.Kind != targetNode.Kind { - return nil, ErrCopyTypeMismatch - } - mergeNode(targetNode, sourceNode) - } - - return warnings, nil -} - -func applyRemoveAction(idx parentIndex, nodes []*yaml.Node) { - for _, node := range nodes { - removeNode(idx, node) - } -} - -func applyUpdateAction(nodes []*yaml.Node, update *yaml.Node) { - if update.IsZero() { - return - } - for _, node := range nodes { - mergeNode(node, update) - } -} - -type parentIndex map[*yaml.Node]*yaml.Node - -func newParentIndex(root *yaml.Node) parentIndex { - index := parentIndex{} - index.indexNodeRecursively(root) - return index -} - -func (index parentIndex) indexNodeRecursively(parent *yaml.Node) { - for _, child := range parent.Content { - index[child] = parent - index.indexNodeRecursively(child) - } -} - -func (index parentIndex) getParent(child *yaml.Node) *yaml.Node { - return index[child] -} - -func removeNode(idx parentIndex, node *yaml.Node) { - parent := idx.getParent(node) - if parent == nil { - return - } - - for i, child := range parent.Content { - if child == node { - switch parent.Kind { - case yaml.MappingNode: - // JSONPath returns value nodes (odd indices), so remove both key and value - parent.Content = append(parent.Content[:i-1], parent.Content[i+1:]...) - return - case yaml.SequenceNode: - parent.Content = append(parent.Content[:i], parent.Content[i+1:]...) - return - } - } - } -} - -func mergeNode(node *yaml.Node, merge *yaml.Node) { - if node.Kind != merge.Kind { - *node = *cloneNode(merge) - return - } - switch node.Kind { - default: - node.Value = merge.Value - case yaml.MappingNode: - mergeMappingNode(node, merge) - case yaml.SequenceNode: - mergeSequenceNode(node, merge) - } -} - -func mergeMappingNode(node *yaml.Node, merge *yaml.Node) { -NextKey: - for i := 0; i < len(merge.Content); i += 2 { - mergeKey := merge.Content[i].Value - mergeValue := merge.Content[i+1] - - for j := 0; j < len(node.Content); j += 2 { - nodeKey := node.Content[j].Value - if nodeKey == mergeKey { - mergeNode(node.Content[j+1], mergeValue) - continue NextKey - } - } - - node.Content = append(node.Content, merge.Content[i], cloneNode(mergeValue)) - } -} - -func mergeSequenceNode(node *yaml.Node, merge *yaml.Node) { - // clone each child individually to avoid wasteful intermediate allocation - for _, child := range merge.Content { - node.Content = append(node.Content, cloneNode(child)) - } -} - -func cloneNode(node *yaml.Node) *yaml.Node { - if node == nil { - return nil - } - newNode := &yaml.Node{ - Kind: node.Kind, - Style: node.Style, - Tag: node.Tag, - Value: node.Value, - Anchor: node.Anchor, - HeadComment: node.HeadComment, - LineComment: node.LineComment, - FootComment: node.FootComment, - } - if node.Alias != nil { - newNode.Alias = cloneNode(node.Alias) - } - if node.Content != nil { - newNode.Content = make([]*yaml.Node, len(node.Content)) - for i, child := range node.Content { - newNode.Content[i] = cloneNode(child) - } - } - return newNode -} diff --git a/vendor/github.com/pb33f/libopenapi/overlay/errors.go b/vendor/github.com/pb33f/libopenapi/overlay/errors.go deleted file mode 100644 index 68f38509751..00000000000 --- a/vendor/github.com/pb33f/libopenapi/overlay/errors.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2022-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package overlay - -import ( - "errors" - "fmt" - - highoverlay "github.com/pb33f/libopenapi/datamodel/high/overlay" -) - -// Warning represents a non-fatal issue encountered during overlay application. -type Warning struct { - Action *highoverlay.Action - Target string - Message string -} - -func (w *Warning) String() string { - return fmt.Sprintf("overlay warning: target '%s': %s", w.Target, w.Message) -} - -// OverlayError represents an error that occurred during an overlay application. -type OverlayError struct { - Action *highoverlay.Action - Cause error -} - -func (e *OverlayError) Error() string { - if e.Action != nil { - return fmt.Sprintf("overlay error at target '%s': %v", e.Action.Target, e.Cause) - } - return fmt.Sprintf("overlay error: %v", e.Cause) -} - -func (e *OverlayError) Unwrap() error { - return e.Cause -} - -// Sentinel errors for overlay operations. -var ( - // Parsing errors - ErrInvalidOverlay = errors.New("invalid overlay document") - ErrMissingOverlayField = errors.New("missing required 'overlay' field") - ErrMissingInfo = errors.New("missing required 'info' field") - ErrMissingActions = errors.New("missing required 'actions' field") - ErrEmptyActions = errors.New("actions array must contain at least one action") - - // JSONPath errors - ErrInvalidJSONPath = errors.New("invalid JSONPath expression") - ErrPrimitiveTarget = errors.New("JSONPath target resolved to primitive/null; must be object or array") - - // Application errors - ErrNoTargetDocument = errors.New("no target document provided") - - // Copy action errors - ErrCopySourceNotFound = errors.New("copy source JSONPath matched zero nodes") - ErrCopySourceMultiple = errors.New("copy source JSONPath must match exactly one node") - ErrCopyTypeMismatch = errors.New("copy source and target must be the same type") -) diff --git a/vendor/github.com/pb33f/libopenapi/overlay/result.go b/vendor/github.com/pb33f/libopenapi/overlay/result.go deleted file mode 100644 index 47894337a99..00000000000 --- a/vendor/github.com/pb33f/libopenapi/overlay/result.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2022-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package overlay - -// Result represents the result of applying an overlay to a target document. -type Result struct { - // Bytes is the raw YAML/JSON bytes of the modified document. - Bytes []byte - - // Warnings contains non-fatal issues encountered during application. - Warnings []*Warning -} diff --git a/vendor/github.com/pb33f/libopenapi/overlay/validation.go b/vendor/github.com/pb33f/libopenapi/overlay/validation.go deleted file mode 100644 index 155c2ac0db9..00000000000 --- a/vendor/github.com/pb33f/libopenapi/overlay/validation.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022-2025 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package overlay - -import ( - highoverlay "github.com/pb33f/libopenapi/datamodel/high/overlay" - "go.yaml.in/yaml/v4" -) - -// validateOverlay checks that the overlay has all required fields. -func validateOverlay(overlay *highoverlay.Overlay) error { - if overlay.Overlay == "" { - return ErrMissingOverlayField - } - if overlay.Info == nil { - return ErrMissingInfo - } - if len(overlay.Actions) == 0 { - return ErrEmptyActions - } - return nil -} - -// validateTarget checks that a target node is a valid target (object or array). -// Per the Overlay Spec, primitive/null targets are invalid. -func validateTarget(node *yaml.Node) error { - if node.Kind == yaml.ScalarNode { - return ErrPrimitiveTarget - } - return nil -} diff --git a/vendor/github.com/pb33f/libopenapi/utils/utils.go b/vendor/github.com/pb33f/libopenapi/utils/utils.go index 09d5eb43113..6fefbb8dd13 100644 --- a/vendor/github.com/pb33f/libopenapi/utils/utils.go +++ b/vendor/github.com/pb33f/libopenapi/utils/utils.go @@ -321,14 +321,26 @@ func ExtractValueFromInterfaceMap(name string, raw interface{}) interface{} { return nil } +// leadingMergeContent unwraps a leading YAML merge key when it has a corresponding value node. +// Malformed YAML can produce a bare `<<` node with no value; in that case we leave the original +// node slice intact and let higher-level validation return an error instead of panicking. +func leadingMergeContent(nodes []*yaml.Node) []*yaml.Node { + if len(nodes) < 2 || nodes[0] == nil || nodes[0].Tag != "!!merge" { + return nodes + } + merged := NodeAlias(nodes[1]) + if merged == nil { + return nodes + } + return merged.Content +} + // FindFirstKeyNode will locate the first key and value yaml.Node based on a key. func FindFirstKeyNode(key string, nodes []*yaml.Node, depth int) (keyNode *yaml.Node, valueNode *yaml.Node) { if depth > 40 { return nil, nil } - if nodes != nil && len(nodes) > 0 && nodes[0].Tag == "!!merge" { - nodes = NodeAlias(nodes[1]).Content - } + nodes = leadingMergeContent(nodes) for i, v := range nodes { if key != "" && key == v.Value { if i+1 >= len(nodes) { @@ -366,9 +378,7 @@ type KeyNodeSearch struct { // FindKeyNodeTop is a non-recursive search of top level nodes for a key, will not look at content. // Returns the key and value func FindKeyNodeTop(key string, nodes []*yaml.Node) (keyNode *yaml.Node, valueNode *yaml.Node) { - if nodes != nil && len(nodes) > 0 && nodes[0].Tag == "!!merge" { - nodes = NodeAlias(nodes[1]).Content - } + nodes = leadingMergeContent(nodes) for i := 0; i < len(nodes); i++ { v := nodes[i] if i%2 != 0 { @@ -387,9 +397,7 @@ func FindKeyNodeTop(key string, nodes []*yaml.Node) (keyNode *yaml.Node, valueNo // FindKeyNode is a non-recursive search of a *yaml.Node Content for a child node with a key. // Returns the key and value func FindKeyNode(key string, nodes []*yaml.Node) (keyNode *yaml.Node, valueNode *yaml.Node) { - if nodes != nil && len(nodes) > 0 && nodes[0].Tag == "!!merge" { - nodes = NodeAlias(nodes[1]).Content - } + nodes = leadingMergeContent(nodes) for i, v := range nodes { if i%2 == 0 && key == v.Value { if len(nodes) <= i+1 { @@ -419,9 +427,7 @@ func FindKeyNode(key string, nodes []*yaml.Node) (keyNode *yaml.Node, valueNode // generally different things are required from different node trees, so depending on what this function is looking at // it will return different things. func FindKeyNodeFull(key string, nodes []*yaml.Node) (keyNode *yaml.Node, labelNode *yaml.Node, valueNode *yaml.Node) { - if nodes != nil && len(nodes) > 0 && nodes[0].Tag == "!!merge" { - nodes = NodeAlias(nodes[1]).Content - } + nodes = leadingMergeContent(nodes) for i := 0; i < len(nodes); i++ { if i%2 == 0 && key == nodes[i].Value { if i+1 >= len(nodes) { @@ -460,9 +466,7 @@ func FindKeyNodeFull(key string, nodes []*yaml.Node) (keyNode *yaml.Node, labelN // FindKeyNodeFullTop is an overloaded version of FindKeyNodeFull. This version only looks at the top // level of the node and not the children. func FindKeyNodeFullTop(key string, nodes []*yaml.Node) (keyNode *yaml.Node, labelNode *yaml.Node, valueNode *yaml.Node) { - if nodes != nil && len(nodes) >= 0 && nodes[0].Tag == "!!merge" { - nodes = NodeAlias(nodes[1]).Content - } + nodes = leadingMergeContent(nodes) for i := 0; i < len(nodes); i++ { v := nodes[i] if i%2 == 0 { @@ -479,6 +483,9 @@ func FindKeyNodeFullTop(key string, nodes []*yaml.Node) (keyNode *yaml.Node, lab continue } if i%2 == 0 && key == nodes[i].Value { + if i+1 >= len(nodes) { + return NodeAlias(nodes[i]), NodeAlias(nodes[i]), NodeAlias(nodes[i]) + } return NodeAlias(nodes[i]), NodeAlias(nodes[i]), NodeAlias(nodes[i+1]) // next node is what we need. } } @@ -1236,7 +1243,7 @@ func CheckForMergeNodes(node *yaml.Node) { for i := 0; i < total; i++ { mn := node.Content[i] if i%2 == 0 { - if mn.Tag == "!!merge" { + if mn.Tag == "!!merge" && i+1 < len(node.Content) { an := node.Content[i+1].Alias if an != nil { node.Content = append(node.Content, an.Content...) // append the merged nodes diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/changed.go b/vendor/github.com/pb33f/libopenapi/what-changed/changed.go deleted file mode 100644 index b9186797bae..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/changed.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2023-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// https://pb33f.io - -package what_changed - -import "github.com/pb33f/libopenapi/what-changed/model" - -// Changed represents an object that was changed -type Changed interface { - // GetAllChanges returns all top level changes made to properties in this object - GetAllChanges() []*model.Change - - // TotalChanges returns a count of all changes made on the object, including all children - TotalChanges() int - - // TotalBreakingChanges returns a count of all breaking changes on this object - TotalBreakingChanges() int - - // GetPropertyChanges - GetPropertyChanges() []*model.Change - - // PropertiesOnly will set a change object to only render properties and not the whole timeline. - PropertiesOnly() -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/breaking_rules.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/breaking_rules.go deleted file mode 100644 index 642a07d2273..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/breaking_rules.go +++ /dev/null @@ -1,451 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "reflect" - "strings" - "sync" -) - -// ResetDefaultBreakingRules resets the cached default rules. This is primarily -// intended for testing scenarios where the cache needs to be cleared. -func ResetDefaultBreakingRules() { - defaultRulesOnce = sync.Once{} - defaultRulesCache = nil -} - -// SetActiveBreakingRulesConfig sets the active breaking rules configuration used -// by comparison functions. Pass nil to reset to defaults. -func SetActiveBreakingRulesConfig(config *BreakingRulesConfig) { - activeConfigMu.Lock() - defer activeConfigMu.Unlock() - activeConfig = config -} - -// GetActiveBreakingRulesConfig returns the currently active breaking rules config. -// If no custom config has been set, returns the default rules. -func GetActiveBreakingRulesConfig() *BreakingRulesConfig { - activeConfigMu.RLock() - defer activeConfigMu.RUnlock() - if activeConfig != nil { - return activeConfig - } - return GenerateDefaultBreakingRules() -} - -// ResetActiveBreakingRulesConfig clears any custom config and reverts to defaults. -func ResetActiveBreakingRulesConfig() { - activeConfigMu.Lock() - defer activeConfigMu.Unlock() - activeConfig = nil -} - -// GenerateDefaultBreakingRules returns the default breaking change rules for OpenAPI 3.x. -// These rules match the currently hardcoded behavior in the comparison functions. -// The returned config is cached and reused for performance. -func GenerateDefaultBreakingRules() *BreakingRulesConfig { - defaultRulesOnce.Do(func() { - defaultRulesCache = buildDefaultRules() - }) - return defaultRulesCache -} - -// IsBreakingChange is a package-level helper that looks up whether a change is breaking -// using the currently active configuration. -func IsBreakingChange(component, property, changeType string) bool { - return GetActiveBreakingRulesConfig().IsBreaking(component, property, changeType) -} - -// BreakingAdded returns whether adding the specified property is a breaking change. -func BreakingAdded(component, property string) bool { - return IsBreakingChange(component, property, ChangeTypeAdded) -} - -// BreakingModified returns whether modifying the specified property is a breaking change. -func BreakingModified(component, property string) bool { - return IsBreakingChange(component, property, ChangeTypeModified) -} - -// BreakingRemoved returns whether removing the specified property is a breaking change. -func BreakingRemoved(component, property string) bool { - return IsBreakingChange(component, property, ChangeTypeRemoved) -} - -func boolPtr(b bool) *bool { - return &b -} - -func rule(added, modified, removed bool) *BreakingChangeRule { - return &BreakingChangeRule{ - Added: boolPtr(added), - Modified: boolPtr(modified), - Removed: boolPtr(removed), - } -} - -// jsonTagName extracts the field name from a JSON struct tag. -func jsonTagName(field reflect.StructField) string { - tag := field.Tag.Get("json") - if tag == "" || tag == "-" { - return "" - } - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx] - } - return tag -} - -// mergeRulesStruct merges all *BreakingChangeRule fields from override into base. -func mergeRulesStruct(base, override reflect.Value) { - rulesType := base.Type() - for i := 0; i < rulesType.NumField(); i++ { - field := rulesType.Field(i) - if field.Type != ruleType { - continue - } - bField := base.Field(i) - oField := override.Field(i) - if bField.CanSet() { - bField.Set(reflect.ValueOf(mergeRule( - bField.Interface().(*BreakingChangeRule), - oField.Interface().(*BreakingChangeRule), - ))) - } - } -} - -// addRulesToCache adds all *BreakingChangeRule fields from a rule struct to the cache. -func addRulesToCache(cache map[string]*BreakingChangeRule, compName string, rulesVal reflect.Value) { - rulesType := rulesVal.Type() - for i := 0; i < rulesType.NumField(); i++ { - field := rulesType.Field(i) - fVal := rulesVal.Field(i) - - propName := jsonTagName(field) - if propName == "" || field.Type != ruleType { - continue - } - cache[compName+"."+propName] = fVal.Interface().(*BreakingChangeRule) - } -} - -// mergeRule merges an override rule into a base rule. -// nil values in override are ignored, non-nil values replace the base. -func mergeRule(base, override *BreakingChangeRule) *BreakingChangeRule { - if override == nil { - return base - } - if base == nil { - return override - } - result := &BreakingChangeRule{ - Added: base.Added, - Modified: base.Modified, - Removed: base.Removed, - } - if override.Added != nil { - result.Added = override.Added - } - if override.Modified != nil { - result.Modified = override.Modified - } - if override.Removed != nil { - result.Removed = override.Removed - } - return result -} - -// buildDefaultRules creates the actual default rules configuration. -func buildDefaultRules() *BreakingRulesConfig { - return &BreakingRulesConfig{ - OpenAPI: rule(true, true, true), - JSONSchemaDialect: rule(true, true, true), - Self: rule(true, true, true), - Components: rule(false, false, true), - - Info: &InfoRules{ - Title: rule(false, false, false), - Summary: rule(false, false, false), - Description: rule(false, false, false), - TermsOfService: rule(false, false, false), - Version: rule(false, false, false), - Contact: rule(false, false, false), - License: rule(false, false, false), - }, - - Contact: &ContactRules{ - URL: rule(false, false, false), - Name: rule(false, false, false), - Email: rule(false, false, false), - }, - - License: &LicenseRules{ - URL: rule(false, false, false), - Name: rule(false, false, false), - Identifier: rule(false, false, false), - }, - - Paths: &PathsRules{ - Path: rule(false, false, true), - }, - - PathItem: &PathItemRules{ - Description: rule(false, false, false), - Summary: rule(false, false, false), - Get: rule(false, false, true), - Put: rule(false, false, true), - Post: rule(false, false, true), - Delete: rule(false, false, true), - Options: rule(false, false, true), - Head: rule(false, false, true), - Patch: rule(false, false, true), - Trace: rule(false, false, true), - Query: rule(false, false, true), - AdditionalOperations: rule(false, false, true), - Servers: rule(false, false, true), - Parameters: rule(false, false, true), - }, - - Operation: &OperationRules{ - Tags: rule(false, false, true), - Summary: rule(false, false, false), - Description: rule(false, false, false), - Deprecated: rule(false, false, false), - OperationID: rule(true, true, true), - ExternalDocs: rule(false, false, false), - Responses: rule(false, false, true), - Parameters: rule(false, false, true), - Security: rule(false, false, true), - RequestBody: rule(true, false, true), - Callbacks: rule(false, false, true), - Servers: rule(false, false, true), - }, - - Parameter: &ParameterRules{ - Name: rule(true, true, true), - In: rule(true, true, true), - Description: rule(false, false, false), - Required: rule(false, true, false), - AllowEmptyValue: rule(true, true, true), - Style: rule(false, false, false), - AllowReserved: rule(true, true, true), - Explode: rule(false, false, false), - Deprecated: rule(false, false, false), - Example: rule(false, false, false), - Schema: rule(true, false, true), - Items: rule(true, false, true), - }, - - RequestBody: &RequestBodyRules{ - Description: rule(false, false, false), - Required: rule(true, true, true), - }, - - Responses: &ResponsesRules{ - Default: rule(false, false, true), - Codes: rule(false, false, true), - }, - - Response: &ResponseRules{ - Description: rule(false, false, false), - Summary: rule(false, false, false), - Schema: rule(true, false, true), - Examples: rule(false, false, false), - }, - - MediaType: &MediaTypeRules{ - Example: rule(false, false, false), - Schema: rule(true, false, true), - ItemSchema: rule(true, false, true), - ItemEncoding: rule(false, false, true), - }, - - Encoding: &EncodingRules{ - ContentType: rule(true, true, true), - Style: rule(true, true, true), - Explode: rule(true, true, true), - AllowReserved: rule(false, false, false), - }, - - Header: &HeaderRules{ - Description: rule(false, false, false), - Style: rule(false, false, false), - AllowReserved: rule(false, false, false), - AllowEmptyValue: rule(true, true, true), - Explode: rule(false, false, false), - Example: rule(false, false, false), - Deprecated: rule(false, false, false), - Required: rule(true, true, true), - Schema: rule(true, false, true), - Items: rule(true, false, true), - }, - - Schemas: rule(true, false, true), - Servers: rule(false, false, true), - - Schema: &SchemaRules{ - Ref: rule(false, false, false), - Type: rule(false, true, false), - Title: rule(false, false, false), - Description: rule(false, false, false), - Format: rule(false, true, false), - Maximum: rule(false, true, false), - Minimum: rule(false, true, false), - ExclusiveMaximum: rule(false, true, false), - ExclusiveMinimum: rule(false, true, false), - MaxLength: rule(false, true, false), - MinLength: rule(false, true, false), - Pattern: rule(false, true, false), - MaxItems: rule(false, true, false), - MinItems: rule(false, true, false), - MaxProperties: rule(false, true, false), - MinProperties: rule(false, true, false), - UniqueItems: rule(false, true, false), - MultipleOf: rule(false, true, false), - ContentEncoding: rule(false, true, false), - ContentMediaType: rule(false, true, false), - Default: rule(false, true, false), - Const: rule(false, true, false), - Nullable: rule(false, true, false), - ReadOnly: rule(false, true, false), - WriteOnly: rule(false, true, false), - Deprecated: rule(false, false, false), - Example: rule(false, false, false), - Examples: rule(false, false, false), - Required: rule(true, false, true), - Enum: rule(false, false, true), - Properties: rule(false, false, true), - AdditionalProperties: rule(true, true, true), - AllOf: rule(false, false, true), - AnyOf: rule(false, false, true), - OneOf: rule(false, false, true), - PrefixItems: rule(false, false, true), - Items: rule(true, true, true), - Discriminator: rule(true, false, true), - ExternalDocs: rule(false, false, false), - Not: rule(true, false, true), - If: rule(true, false, true), - Then: rule(true, false, true), - Else: rule(true, false, true), - PropertyNames: rule(true, false, true), - Contains: rule(true, false, true), - UnevaluatedItems: rule(true, false, true), - UnevaluatedProperties: rule(true, true, true), - DynamicAnchor: rule(false, true, true), // $dynamicAnchor: modification/removal is breaking - DynamicRef: rule(false, true, true), // $dynamicRef: modification/removal is breaking - Id: rule(true, true, true), // $id: all changes are breaking (affects reference resolution) - Comment: rule(false, false, false), // $comment: does not affect API contracts - ContentSchema: rule(true, true, true), // contentSchema: affects content validation - Vocabulary: rule(true, true, true), // $vocabulary: affects schema interpretation - DependentRequired: rule(false, true, true), - XML: rule(false, false, true), - SchemaDialect: rule(true, true, true), - }, - - Discriminator: &DiscriminatorRules{ - PropertyName: rule(true, true, true), - DefaultMapping: rule(true, true, true), - Mapping: rule(false, true, true), - }, - - XML: &XMLRules{ - Name: rule(true, true, true), - Namespace: rule(true, true, true), - Prefix: rule(true, true, true), - Attribute: rule(true, true, true), - NodeType: rule(true, true, true), - Wrapped: rule(true, true, true), - }, - - Server: &ServerRules{ - Name: rule(true, true, true), - URL: rule(true, true, true), - Description: rule(false, false, false), - }, - - ServerVariable: &ServerVariableRules{ - Enum: rule(false, false, true), - Default: rule(true, true, true), - Description: rule(false, false, false), - }, - - Tags: rule(false, false, true), - - Security: rule(false, false, true), - - Tag: &TagRules{ - Name: rule(false, true, true), - Summary: rule(false, false, false), - Description: rule(false, false, false), - Parent: rule(true, true, true), - Kind: rule(false, false, false), - ExternalDocs: rule(false, false, false), - }, - - ExternalDocs: &ExternalDocsRules{ - URL: rule(false, false, false), - Description: rule(false, false, false), - }, - - SecurityScheme: &SecuritySchemeRules{ - Type: rule(true, true, true), - Description: rule(false, false, false), - Name: rule(true, true, true), - In: rule(true, true, true), - Scheme: rule(true, true, true), - BearerFormat: rule(false, false, false), - OpenIDConnectURL: rule(false, false, false), - OAuth2MetadataUrl: rule(false, false, false), - Flows: rule(false, false, true), - Scopes: rule(false, false, true), - Flow: rule(true, true, true), // Swagger 2.0 - AuthorizationURL: rule(true, true, true), // Swagger 2.0 - TokenURL: rule(true, true, true), // Swagger 2.0 - Deprecated: rule(false, false, false), - }, - - SecurityRequirement: &SecurityRequirementRules{ - Schemes: rule(false, false, true), - Scopes: rule(false, false, true), - }, - - OAuthFlows: &OAuthFlowsRules{ - Implicit: rule(false, false, true), - Password: rule(false, false, true), - ClientCredentials: rule(false, false, true), - AuthorizationCode: rule(false, false, true), - Device: rule(false, false, true), - }, - - OAuthFlow: &OAuthFlowRules{ - AuthorizationURL: rule(true, true, true), - TokenURL: rule(true, true, true), - RefreshURL: rule(true, true, true), - Scopes: rule(false, true, true), - }, - - Callback: &CallbackRules{ - Expressions: rule(false, false, true), - }, - - Link: &LinkRules{ - OperationRef: rule(true, true, true), - OperationID: rule(true, true, true), - RequestBody: rule(true, true, true), - Description: rule(false, false, false), - Server: rule(true, false, true), - Parameters: rule(true, true, true), - }, - - Example: &ExampleRules{ - Summary: rule(false, false, false), - Description: rule(false, false, false), - Value: rule(false, false, false), - ExternalValue: rule(false, false, false), - DataValue: rule(false, false, false), - SerializedValue: rule(false, false, false), - }, - } -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/breaking_rules_config.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/breaking_rules_config.go deleted file mode 100644 index b18834718ac..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/breaking_rules_config.go +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "fmt" - "reflect" - "strings" - "sync" - - "go.yaml.in/yaml/v4" -) - -// BreakingRulesConfig holds all breaking change rules organized by OpenAPI component. -// Structure mirrors the OpenAPI 3.x specification. -type BreakingRulesConfig struct { - OpenAPI *BreakingChangeRule `json:"openapi,omitempty" yaml:"openapi,omitempty"` - JSONSchemaDialect *BreakingChangeRule `json:"jsonSchemaDialect,omitempty" yaml:"jsonSchemaDialect,omitempty"` - Self *BreakingChangeRule `json:"$self,omitempty" yaml:"$self,omitempty"` - Components *BreakingChangeRule `json:"components,omitempty" yaml:"components,omitempty"` - Info *InfoRules `json:"info,omitempty" yaml:"info,omitempty"` - Contact *ContactRules `json:"contact,omitempty" yaml:"contact,omitempty"` - License *LicenseRules `json:"license,omitempty" yaml:"license,omitempty"` - Paths *PathsRules `json:"paths,omitempty" yaml:"paths,omitempty"` - PathItem *PathItemRules `json:"pathItem,omitempty" yaml:"pathItem,omitempty"` - Operation *OperationRules `json:"operation,omitempty" yaml:"operation,omitempty"` - Parameter *ParameterRules `json:"parameter,omitempty" yaml:"parameter,omitempty"` - RequestBody *RequestBodyRules `json:"requestBody,omitempty" yaml:"requestBody,omitempty"` - Responses *ResponsesRules `json:"responses,omitempty" yaml:"responses,omitempty"` - Response *ResponseRules `json:"response,omitempty" yaml:"response,omitempty"` - MediaType *MediaTypeRules `json:"mediaType,omitempty" yaml:"mediaType,omitempty"` - Encoding *EncodingRules `json:"encoding,omitempty" yaml:"encoding,omitempty"` - Header *HeaderRules `json:"header,omitempty" yaml:"header,omitempty"` - Schema *SchemaRules `json:"schema,omitempty" yaml:"schema,omitempty"` - Schemas *BreakingChangeRule `json:"schemas,omitempty" yaml:"schemas,omitempty"` - Servers *BreakingChangeRule `json:"servers,omitempty" yaml:"servers,omitempty"` - Discriminator *DiscriminatorRules `json:"discriminator,omitempty" yaml:"discriminator,omitempty"` - XML *XMLRules `json:"xml,omitempty" yaml:"xml,omitempty"` - Server *ServerRules `json:"server,omitempty" yaml:"server,omitempty"` - ServerVariable *ServerVariableRules `json:"serverVariable,omitempty" yaml:"serverVariable,omitempty"` - Tags *BreakingChangeRule `json:"tags,omitempty" yaml:"tags,omitempty"` - Tag *TagRules `json:"tag,omitempty" yaml:"tag,omitempty"` - Security *BreakingChangeRule `json:"security,omitempty" yaml:"security,omitempty"` - ExternalDocs *ExternalDocsRules `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` - SecurityScheme *SecuritySchemeRules `json:"securityScheme,omitempty" yaml:"securityScheme,omitempty"` - SecurityRequirement *SecurityRequirementRules `json:"securityRequirement,omitempty" yaml:"securityRequirement,omitempty"` - OAuthFlows *OAuthFlowsRules `json:"oauthFlows,omitempty" yaml:"oauthFlows,omitempty"` - OAuthFlow *OAuthFlowRules `json:"oauthFlow,omitempty" yaml:"oauthFlow,omitempty"` - Callback *CallbackRules `json:"callback,omitempty" yaml:"callback,omitempty"` - Link *LinkRules `json:"link,omitempty" yaml:"link,omitempty"` - Example *ExampleRules `json:"example,omitempty" yaml:"example,omitempty"` - - ruleCache map[string]*BreakingChangeRule - cacheOnce sync.Once -} - -// Merge applies user overrides to the configuration. Only non-nil values from -// the override config replace the current values. Uses reflection to reduce boilerplate. -func (c *BreakingRulesConfig) Merge(override *BreakingRulesConfig) { - if override == nil { - return - } - - cVal := reflect.ValueOf(c).Elem() - oVal := reflect.ValueOf(override).Elem() - - for i := 0; i < configType.NumField(); i++ { - field := configType.Field(i) - cField := cVal.Field(i) - oField := oVal.Field(i) - - if !cField.CanSet() { - continue - } - - if field.Type == ruleType { - cField.Set(reflect.ValueOf(mergeRule( - cField.Interface().(*BreakingChangeRule), - oField.Interface().(*BreakingChangeRule), - ))) - continue - } - - if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { - if oField.IsNil() { - continue - } - if cField.IsNil() { - cField.Set(reflect.New(field.Type.Elem())) - } - mergeRulesStruct(cField.Elem(), oField.Elem()) - } - } - - c.invalidateCache() -} - -// IsBreaking looks up whether a change is breaking based on the component, property, and change type. -// Returns the configured breaking status, or false if the rule is not found. -func (c *BreakingRulesConfig) IsBreaking(component, property, changeType string) bool { - rule := c.GetRule(component, property) - if rule == nil { - return false - } - - switch changeType { - case ChangeTypeAdded: - if rule.Added != nil { - return *rule.Added - } - case ChangeTypeModified: - if rule.Modified != nil { - return *rule.Modified - } - case ChangeTypeRemoved: - if rule.Removed != nil { - return *rule.Removed - } - } - return false -} - -// GetRule returns the BreakingChangeRule for a given component and property. -// Returns nil if no rule is defined. Uses internal cache for O(1) lookups. -func (c *BreakingRulesConfig) GetRule(component, property string) *BreakingChangeRule { - c.cacheOnce.Do(func() { - c.ruleCache = c.buildRuleCache() - }) - if property == "" { - return c.ruleCache[component] - } - return c.ruleCache[component+"."+property] -} - -// buildRuleCache creates a flat map of all rules for O(1) lookups using reflection. -func (c *BreakingRulesConfig) buildRuleCache() map[string]*BreakingChangeRule { - cache := make(map[string]*BreakingChangeRule, 200) - cVal := reflect.ValueOf(c).Elem() - - for i := 0; i < configType.NumField(); i++ { - field := configType.Field(i) - fVal := cVal.Field(i) - - compName := jsonTagName(field) - if compName == "" || !fVal.CanInterface() { - continue - } - - if field.Type == ruleType { - cache[compName] = fVal.Interface().(*BreakingChangeRule) - continue - } - - if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { - if fVal.IsNil() { - continue - } - addRulesToCache(cache, compName, fVal.Elem()) - } - } - return cache -} - -// invalidateCache resets the cache so it will be rebuilt on next access. -func (c *BreakingRulesConfig) invalidateCache() { - c.cacheOnce = sync.Once{} - c.ruleCache = nil -} - -// --- Config Validation --- - -// ConfigValidationError represents a single validation issue in a breaking rules config. -type ConfigValidationError struct { - // Message is a human-readable description of the issue. - Message string - - // Path is the YAML path where the issue was found (e.g., "schema.discriminator"). - Path string - - // Line is the 1-based line number in the YAML source (0 if unknown). - Line int - - // Column is the 1-based column number in the YAML source (0 if unknown). - Column int - - // FoundKey is the misplaced key that was detected. - FoundKey string - - // SuggestedPath is where the key should be placed instead. - SuggestedPath string -} - -// Error implements the error interface. -func (e *ConfigValidationError) Error() string { - if e.Line > 0 { - return fmt.Sprintf("%s (line %d, column %d)", e.Message, e.Line, e.Column) - } - return e.Message -} - -// ConfigValidationResult holds the results of validating a breaking rules config. -type ConfigValidationResult struct { - // Errors contains all validation issues found. - Errors []*ConfigValidationError -} - -// HasErrors returns true if any validation errors were found. -func (r *ConfigValidationResult) HasErrors() bool { - return len(r.Errors) > 0 -} - -// Error implements the error interface, joining all errors. -func (r *ConfigValidationResult) Error() string { - if !r.HasErrors() { - return "" - } - msgs := make([]string, len(r.Errors)) - for i, e := range r.Errors { - msgs[i] = e.Error() - } - return strings.Join(msgs, "\n") -} - -// validTopLevelComponents is the set of valid top-level keys in a breaking rules config. -// Built from BreakingRulesConfig struct field tags at init time. -var validTopLevelComponents = buildValidComponentSet() - -// buildValidComponentSet creates a set of valid top-level component names -// by reflecting on the BreakingRulesConfig struct tags. -func buildValidComponentSet() map[string]bool { - result := make(map[string]bool) - t := reflect.TypeOf(BreakingRulesConfig{}) - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - name := jsonTagName(field) - if name != "" && name != "-" { - result[name] = true - } - } - return result -} - -// ValidateBreakingRulesConfigYAML validates raw YAML bytes for a breaking rules config. -// It detects misplaced nested configurations (e.g., "schema.discriminator" should be -// just "discriminator" at the top level) and returns all validation errors found. -// Returns nil if the configuration is valid. -func ValidateBreakingRulesConfigYAML(yamlBytes []byte) *ConfigValidationResult { - var rootNode yaml.Node - if err := yaml.Unmarshal(yamlBytes, &rootNode); err != nil { - return &ConfigValidationResult{ - Errors: []*ConfigValidationError{{ - Message: fmt.Sprintf("invalid YAML: %v", err), - }}, - } - } - - result := &ConfigValidationResult{} - validateConfigNode(&rootNode, "", result) - - if result.HasErrors() { - return result - } - return nil -} - -// breakingRuleFields are the valid fields for BreakingChangeRule -var breakingRuleFields = map[string]bool{ - "added": true, - "modified": true, - "removed": true, -} - -// simpleRuleComponents are components that are directly BreakingChangeRule (not nested) -// For these, added/modified/removed at depth 1 is correct (e.g., "openapi.modified: false") -var simpleRuleComponents = map[string]bool{ - "openapi": true, - "jsonSchemaDialect": true, - "$self": true, - "components": true, - "schemas": true, - "servers": true, - "tags": true, - "security": true, -} - -// componentProperties maps each component to its valid property names -// Built from reflection on BreakingRulesConfig struct -var componentProperties = buildComponentPropertiesMap() - -func buildComponentPropertiesMap() map[string]map[string]bool { - result := make(map[string]map[string]bool) - t := reflect.TypeOf(BreakingRulesConfig{}) - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - componentName := jsonTagName(field) - if componentName == "" || componentName == "-" { - continue - } - // Get the field type (pointer to rules struct) - fieldType := field.Type - if fieldType.Kind() == reflect.Ptr { - fieldType = fieldType.Elem() - } - // Build property set for this component - props := make(map[string]bool) - for j := 0; j < fieldType.NumField(); j++ { - propField := fieldType.Field(j) - propName := jsonTagName(propField) - if propName != "" && propName != "-" { - props[propName] = true - } - } - result[componentName] = props - } - return result -} - -// isValidPropertyForComponent checks if a property is valid for the given component -func isValidPropertyForComponent(component, property string) bool { - if props, ok := componentProperties[component]; ok { - return props[property] - } - return false -} - -// validateConfigNode recursively walks the YAML tree looking for misplaced configurations. -func validateConfigNode(node *yaml.Node, path string, result *ConfigValidationResult) { - validateConfigNodeWithDepth(node, path, 0, "", result) -} - -// validateConfigNodeWithDepth recursively walks the YAML tree with depth tracking. -// depth 0 = root, depth 1 = under a component (e.g., "paths"), depth 2 = under a property (e.g., "paths.path") -// parentComponent tracks the root-level component we're under (e.g., "paths", "schema", "openapi") -// parentProperty tracks the property we're under within that component (e.g., "path" under "paths") -func validateConfigNodeWithDepth(node *yaml.Node, path string, depth int, parentComponent string, result *ConfigValidationResult) { - validateConfigNodeWithDepthAndProperty(node, path, depth, parentComponent, "", result) -} - -// validateConfigNodeWithDepthAndProperty is the internal recursive validator. -func validateConfigNodeWithDepthAndProperty(node *yaml.Node, path string, depth int, parentComponent, parentProperty string, result *ConfigValidationResult) { - // Document nodes contain a single content node - if node.Kind == yaml.DocumentNode && len(node.Content) > 0 { - validateConfigNodeWithDepthAndProperty(node.Content[0], path, depth, parentComponent, parentProperty, result) - return - } - - // Only process mapping nodes (objects) - if node.Kind != yaml.MappingNode { - return - } - - // Process key-value pairs in the mapping - for i := 0; i < len(node.Content); i += 2 { - keyNode := node.Content[i] - valueNode := node.Content[i+1] - - if keyNode.Kind != yaml.ScalarNode { - continue - } - - key := keyNode.Value - currentPath := buildConfigPath(path, key) - - // Track the parent component when we enter a top-level component - currentParent := parentComponent - currentProperty := parentProperty - if depth == 0 && validTopLevelComponents[key] { - currentParent = key - currentProperty = "" - } else if depth == 1 && parentComponent != "" { - // At depth 1, the key is a property name under a component - currentProperty = key - } - - // If we're already nested under a component and find another top-level component name, - // this is a misplacement error UNLESS the key is a valid property of the parent component. - // For example, "parameter.example" is valid because ParameterRules has an Example property, - // even though "example" is also a top-level component. - if path != "" && validTopLevelComponents[key] && !isValidPropertyForComponent(parentComponent, key) { - result.Errors = append(result.Errors, &ConfigValidationError{ - Message: fmt.Sprintf("found '%s' nested under '%s'; '%s' should be a top-level key", key, path, key), - Path: currentPath, - Line: keyNode.Line, - Column: keyNode.Column, - FoundKey: key, - SuggestedPath: key, - }) - } - - // Check for breaking rule fields (added/modified/removed) at wrong depth - // depth 1 = directly under a component (e.g., "paths.added" is wrong) - // These should only appear at depth 2 (e.g., "paths.path.added" is correct) - // Exception: "simple" components like openapi, schemas, servers are directly BreakingChangeRule - // so "openapi.modified: false" is correct - if depth == 1 && breakingRuleFields[key] && !simpleRuleComponents[parentComponent] { - // Extract the component name from the path - componentName := path - result.Errors = append(result.Errors, &ConfigValidationError{ - Message: fmt.Sprintf("'%s' found directly under '%s'; breaking rules must be nested under a property name (e.g., '%s.path.%s' not '%s.%s')", key, componentName, componentName, key, componentName, key), - Path: currentPath, - Line: keyNode.Line, - Column: keyNode.Column, - FoundKey: key, - SuggestedPath: fmt.Sprintf("%s..%s", componentName, key), - }) - } - - // At depth 2+, check if we're trying to add invalid keys under a BreakingChangeRule. - // A BreakingChangeRule (like schema.discriminator) can only have added/modified/removed. - // If we find anything else (like "propertyName"), it's someone trying to configure - // a component's sub-rules under the wrong parent. - // - // Only check this if: - // 1. The parentProperty is also a valid top-level component name (like "discriminator") - // 2. The key is a property of that top-level component (like "propertyName" is a property of DiscriminatorRules) - // 3. The key is NOT a breaking rule field (added/modified/removed) - // - // This catches cases like schema.discriminator.propertyName where: - // - schema.discriminator is valid (SchemaRules has Discriminator property) - // - BUT propertyName under it is wrong (should be discriminator.propertyName at top level) - if depth >= 2 && parentProperty != "" && !breakingRuleFields[key] && validTopLevelComponents[parentProperty] { - // Check if this key is a property of the top-level component with the same name as parentProperty - if props, ok := componentProperties[parentProperty]; ok && props[key] { - result.Errors = append(result.Errors, &ConfigValidationError{ - Message: fmt.Sprintf("'%s' is incorrectly nested under '%s'; move your '%s:' block to the top level of your config (current: %s.%s.%s, should be: %s.%s)", parentProperty, parentComponent, parentProperty, parentComponent, parentProperty, key, parentProperty, key), - Path: currentPath, - Line: keyNode.Line, - Column: keyNode.Column, - FoundKey: parentProperty, - SuggestedPath: parentProperty, - }) - } - } - - // Recurse into nested mappings - if valueNode.Kind == yaml.MappingNode { - validateConfigNodeWithDepthAndProperty(valueNode, currentPath, depth+1, currentParent, currentProperty, result) - } - } -} - -// buildConfigPath constructs a dotted path from parent and child components. -func buildConfigPath(parent, child string) string { - if parent == "" { - return child - } - return parent + "." + child -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/breaking_rules_constants.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/breaking_rules_constants.go deleted file mode 100644 index 36544361a11..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/breaking_rules_constants.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "reflect" - "sync" -) - -// Component name constants for breaking change rule lookups. -// These match the JSON keys used in BreakingRulesConfig. -const ( - CompCallback = "callback" - CompComponents = "components" - CompContact = "contact" - CompDiscriminator = "discriminator" - CompEncoding = "encoding" - CompExample = "example" - CompExternalDocs = "externalDocs" - CompHeader = "header" - CompInfo = "info" - CompJSONSchemaDialect = "jsonSchemaDialect" - CompLicense = "license" - CompLink = "link" - CompMediaType = "mediaType" - CompOAuthFlow = "oauthFlow" - CompOAuthFlows = "oauthFlows" - CompOpenAPI = "openapi" - CompOperation = "operation" - CompParameter = "parameter" - CompPathItem = "pathItem" - CompPaths = "paths" - CompRequestBody = "requestBody" - CompResponse = "response" - CompResponses = "responses" - CompSchema = "schema" - CompSchemas = "schemas" - CompSecurity = "security" - CompSecurityRequirement = "securityRequirement" - CompServers = "servers" - CompSelf = "$self" - CompTags = "tags" - CompSecurityScheme = "securityScheme" - CompServer = "server" - CompServerVariable = "serverVariable" - CompTag = "tag" - CompXML = "xml" -) - -// Property name constants for breaking change rule lookups. -// These match the JSON keys used in the various *Rules structs. -const ( - PropAdditionalOperations = "additionalOperations" - PropAdditionalProperties = "additionalProperties" - PropAllOf = "allOf" - PropAllowEmptyValue = "allowEmptyValue" - PropAllowReserved = "allowReserved" - PropAnyOf = "anyOf" - PropAttribute = "attribute" - PropAuthorizationCode = "authorizationCode" - PropAuthorizationURL = "authorizationUrl" - PropBearerFormat = "bearerFormat" - PropCallbacks = "callbacks" - PropCodes = "codes" - PropClientCredentials = "clientCredentials" - PropCollectionFormat = "collectionFormat" - PropComment = "$comment" - PropConst = "const" - PropContact = "contact" - PropContains = "contains" - PropContentEncoding = "contentEncoding" - PropContentMediaType = "contentMediaType" - PropContentSchema = "contentSchema" - PropContentType = "contentType" - PropDataValue = "dataValue" - PropDefault = "default" - PropDefaultMapping = "defaultMapping" - PropDelete = "delete" - PropDeprecated = "deprecated" - PropDependentRequired = "dependentRequired" - PropDescription = "description" - PropDevice = "device" - PropDiscriminator = "discriminator" - PropDynamicAnchor = "$dynamicAnchor" - PropDynamicRef = "$dynamicRef" - PropId = "$id" - PropElse = "else" - PropEmail = "email" - PropEnum = "enum" - PropExample = "example" - PropExamples = "examples" - PropExclusiveMaximum = "exclusiveMaximum" - PropExclusiveMinimum = "exclusiveMinimum" - PropExplode = "explode" - PropExpressions = "expressions" - PropExternalDocs = "externalDocs" - PropExternalValue = "externalValue" - PropFlow = "flow" - PropFlows = "flows" - PropFormat = "format" - PropGet = "get" - PropHead = "head" - PropIdentifier = "identifier" - PropIf = "if" - PropImplicit = "implicit" - PropIn = "in" - PropItems = "items" - PropItemEncoding = "itemEncoding" - PropItemSchema = "itemSchema" - PropKind = "kind" - PropLicense = "license" - PropMapping = "mapping" - PropMaxItems = "maxItems" - PropMaxLength = "maxLength" - PropMaxProperties = "maxProperties" - PropMaximum = "maximum" - PropMinItems = "minItems" - PropMinLength = "minLength" - PropMinProperties = "minProperties" - PropMinimum = "minimum" - PropMultipleOf = "multipleOf" - PropName = "name" - PropNamespace = "namespace" - PropNodeType = "nodeType" - PropNot = "not" - PropNullable = "nullable" - PropOAuth2MetadataUrl = "oauth2MetadataUrl" - PropOneOf = "oneOf" - PropOpenIDConnectURL = "openIdConnectUrl" - PropOperationID = "operationId" - PropOperationRef = "operationRef" - PropOptions = "options" - PropParameters = "parameters" - PropParent = "parent" - PropPassword = "password" - PropPatch = "patch" - PropPath = "path" - PropPattern = "pattern" - PropPost = "post" - PropPrefix = "prefix" - PropPrefixItems = "prefixItems" - PropProperties = "properties" - PropPropertyName = "propertyName" - PropPropertyNames = "propertyNames" - PropPut = "put" - PropQuery = "query" - PropReadOnly = "readOnly" - PropRef = "$ref" - PropRefreshURL = "refreshUrl" - PropRequired = "required" - PropRequestBody = "requestBody" - PropResponses = "responses" - PropScheme = "scheme" - PropSchemaDialect = "schemaDialect" - PropSchemas = "schemas" - PropSchemes = "schemes" - PropSchema = "schema" - PropScopes = "scopes" - PropSecurity = "security" - PropSelf = "$self" - PropSerializedValue = "serializedValue" - PropServer = "server" - PropServers = "servers" - PropStyle = "style" - PropSummary = "summary" - PropTags = "tags" - PropTermsOfService = "termsOfService" - PropThen = "then" - PropTitle = "title" - PropTokenURL = "tokenUrl" - PropTrace = "trace" - PropType = "type" - PropUnevaluatedItems = "unevaluatedItems" - PropUnevaluatedProps = "unevaluatedProperties" - PropUniqueItems = "uniqueItems" - PropURL = "url" - PropValue = "value" - PropVersion = "version" - PropVocabulary = "$vocabulary" - PropWrapped = "wrapped" - PropWriteOnly = "writeOnly" - PropXML = "xml" -) - -// ChangeType constants for IsBreaking lookup -const ( - ChangeTypeAdded = "added" - ChangeTypeModified = "modified" - ChangeTypeRemoved = "removed" -) - -// reflection types cached at init to avoid repeated TypeOf calls in hot paths -var ( - ruleType = reflect.TypeOf((*BreakingChangeRule)(nil)) - configType = reflect.TypeOf(BreakingRulesConfig{}) -) - -// singleton cache for default rules to avoid repeated allocations -var ( - defaultRulesOnce sync.Once - defaultRulesCache *BreakingRulesConfig -) - -// active config used by comparison functions -var ( - activeConfigMu sync.RWMutex - activeConfig *BreakingRulesConfig -) diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/breaking_rules_model.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/breaking_rules_model.go deleted file mode 100644 index 89cc2bf69b6..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/breaking_rules_model.go +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -// BreakingChangeRule holds the breaking status for a property's change types. -// nil values mean "use default" - only set values override the defaults. -type BreakingChangeRule struct { - Added *bool `json:"added,omitempty" yaml:"added,omitempty"` - Modified *bool `json:"modified,omitempty" yaml:"modified,omitempty"` - Removed *bool `json:"removed,omitempty" yaml:"removed,omitempty"` -} - -// InfoRules defines breaking rules for the Info object properties. -type InfoRules struct { - Title *BreakingChangeRule `json:"title,omitempty" yaml:"title,omitempty"` - Summary *BreakingChangeRule `json:"summary,omitempty" yaml:"summary,omitempty"` - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` - TermsOfService *BreakingChangeRule `json:"termsOfService,omitempty" yaml:"termsOfService,omitempty"` - Version *BreakingChangeRule `json:"version,omitempty" yaml:"version,omitempty"` - Contact *BreakingChangeRule `json:"contact,omitempty" yaml:"contact,omitempty"` - License *BreakingChangeRule `json:"license,omitempty" yaml:"license,omitempty"` -} - -// ContactRules defines breaking rules for the Contact object properties. -type ContactRules struct { - URL *BreakingChangeRule `json:"url,omitempty" yaml:"url,omitempty"` - Name *BreakingChangeRule `json:"name,omitempty" yaml:"name,omitempty"` - Email *BreakingChangeRule `json:"email,omitempty" yaml:"email,omitempty"` -} - -// LicenseRules defines breaking rules for the License object properties. -type LicenseRules struct { - URL *BreakingChangeRule `json:"url,omitempty" yaml:"url,omitempty"` - Name *BreakingChangeRule `json:"name,omitempty" yaml:"name,omitempty"` - Identifier *BreakingChangeRule `json:"identifier,omitempty" yaml:"identifier,omitempty"` -} - -// PathsRules defines breaking rules for the Paths object properties. -type PathsRules struct { - Path *BreakingChangeRule `json:"path,omitempty" yaml:"path,omitempty"` -} - -// PathItemRules defines breaking rules for the Path Item object properties. -type PathItemRules struct { - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` - Summary *BreakingChangeRule `json:"summary,omitempty" yaml:"summary,omitempty"` - Get *BreakingChangeRule `json:"get,omitempty" yaml:"get,omitempty"` - Put *BreakingChangeRule `json:"put,omitempty" yaml:"put,omitempty"` - Post *BreakingChangeRule `json:"post,omitempty" yaml:"post,omitempty"` - Delete *BreakingChangeRule `json:"delete,omitempty" yaml:"delete,omitempty"` - Options *BreakingChangeRule `json:"options,omitempty" yaml:"options,omitempty"` - Head *BreakingChangeRule `json:"head,omitempty" yaml:"head,omitempty"` - Patch *BreakingChangeRule `json:"patch,omitempty" yaml:"patch,omitempty"` - Trace *BreakingChangeRule `json:"trace,omitempty" yaml:"trace,omitempty"` - Query *BreakingChangeRule `json:"query,omitempty" yaml:"query,omitempty"` - AdditionalOperations *BreakingChangeRule `json:"additionalOperations,omitempty" yaml:"additionalOperations,omitempty"` - Servers *BreakingChangeRule `json:"servers,omitempty" yaml:"servers,omitempty"` - Parameters *BreakingChangeRule `json:"parameters,omitempty" yaml:"parameters,omitempty"` -} - -// OperationRules defines breaking rules for the Operation object properties. -type OperationRules struct { - Tags *BreakingChangeRule `json:"tags,omitempty" yaml:"tags,omitempty"` - Summary *BreakingChangeRule `json:"summary,omitempty" yaml:"summary,omitempty"` - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` - Deprecated *BreakingChangeRule `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` - OperationID *BreakingChangeRule `json:"operationId,omitempty" yaml:"operationId,omitempty"` - ExternalDocs *BreakingChangeRule `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` - Responses *BreakingChangeRule `json:"responses,omitempty" yaml:"responses,omitempty"` - Parameters *BreakingChangeRule `json:"parameters,omitempty" yaml:"parameters,omitempty"` - Security *BreakingChangeRule `json:"security,omitempty" yaml:"security,omitempty"` - RequestBody *BreakingChangeRule `json:"requestBody,omitempty" yaml:"requestBody,omitempty"` - Callbacks *BreakingChangeRule `json:"callbacks,omitempty" yaml:"callbacks,omitempty"` - Servers *BreakingChangeRule `json:"servers,omitempty" yaml:"servers,omitempty"` -} - -// ParameterRules defines breaking rules for the Parameter object properties. -type ParameterRules struct { - Name *BreakingChangeRule `json:"name,omitempty" yaml:"name,omitempty"` - In *BreakingChangeRule `json:"in,omitempty" yaml:"in,omitempty"` - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` - Required *BreakingChangeRule `json:"required,omitempty" yaml:"required,omitempty"` - AllowEmptyValue *BreakingChangeRule `json:"allowEmptyValue,omitempty" yaml:"allowEmptyValue,omitempty"` - Style *BreakingChangeRule `json:"style,omitempty" yaml:"style,omitempty"` - AllowReserved *BreakingChangeRule `json:"allowReserved,omitempty" yaml:"allowReserved,omitempty"` - Explode *BreakingChangeRule `json:"explode,omitempty" yaml:"explode,omitempty"` - Deprecated *BreakingChangeRule `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` - Example *BreakingChangeRule `json:"example,omitempty" yaml:"example,omitempty"` - Schema *BreakingChangeRule `json:"schema,omitempty" yaml:"schema,omitempty"` - Items *BreakingChangeRule `json:"items,omitempty" yaml:"items,omitempty"` -} - -// RequestBodyRules defines breaking rules for the Request Body object properties. -type RequestBodyRules struct { - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` - Required *BreakingChangeRule `json:"required,omitempty" yaml:"required,omitempty"` -} - -// ResponsesRules defines breaking rules for the Responses object properties. -type ResponsesRules struct { - Default *BreakingChangeRule `json:"default,omitempty" yaml:"default,omitempty"` - Codes *BreakingChangeRule `json:"codes,omitempty" yaml:"codes,omitempty"` -} - -// ResponseRules defines breaking rules for the Response object properties. -type ResponseRules struct { - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` - Summary *BreakingChangeRule `json:"summary,omitempty" yaml:"summary,omitempty"` - Schema *BreakingChangeRule `json:"schema,omitempty" yaml:"schema,omitempty"` - Examples *BreakingChangeRule `json:"examples,omitempty" yaml:"examples,omitempty"` -} - -// MediaTypeRules defines breaking rules for the Media Type object properties. -type MediaTypeRules struct { - Example *BreakingChangeRule `json:"example,omitempty" yaml:"example,omitempty"` - Schema *BreakingChangeRule `json:"schema,omitempty" yaml:"schema,omitempty"` - ItemSchema *BreakingChangeRule `json:"itemSchema,omitempty" yaml:"itemSchema,omitempty"` - ItemEncoding *BreakingChangeRule `json:"itemEncoding,omitempty" yaml:"itemEncoding,omitempty"` -} - -// EncodingRules defines breaking rules for the Encoding object properties. -type EncodingRules struct { - ContentType *BreakingChangeRule `json:"contentType,omitempty" yaml:"contentType,omitempty"` - Style *BreakingChangeRule `json:"style,omitempty" yaml:"style,omitempty"` - Explode *BreakingChangeRule `json:"explode,omitempty" yaml:"explode,omitempty"` - AllowReserved *BreakingChangeRule `json:"allowReserved,omitempty" yaml:"allowReserved,omitempty"` -} - -// HeaderRules defines breaking rules for the Header object properties. -type HeaderRules struct { - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` - Style *BreakingChangeRule `json:"style,omitempty" yaml:"style,omitempty"` - AllowReserved *BreakingChangeRule `json:"allowReserved,omitempty" yaml:"allowReserved,omitempty"` - AllowEmptyValue *BreakingChangeRule `json:"allowEmptyValue,omitempty" yaml:"allowEmptyValue,omitempty"` - Explode *BreakingChangeRule `json:"explode,omitempty" yaml:"explode,omitempty"` - Example *BreakingChangeRule `json:"example,omitempty" yaml:"example,omitempty"` - Deprecated *BreakingChangeRule `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` - Required *BreakingChangeRule `json:"required,omitempty" yaml:"required,omitempty"` - Schema *BreakingChangeRule `json:"schema,omitempty" yaml:"schema,omitempty"` - Items *BreakingChangeRule `json:"items,omitempty" yaml:"items,omitempty"` -} - -// SchemaRules defines breaking rules for the Schema object properties. -type SchemaRules struct { - Ref *BreakingChangeRule `json:"$ref,omitempty" yaml:"$ref,omitempty"` - Type *BreakingChangeRule `json:"type,omitempty" yaml:"type,omitempty"` - Title *BreakingChangeRule `json:"title,omitempty" yaml:"title,omitempty"` - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` - Format *BreakingChangeRule `json:"format,omitempty" yaml:"format,omitempty"` - Maximum *BreakingChangeRule `json:"maximum,omitempty" yaml:"maximum,omitempty"` - Minimum *BreakingChangeRule `json:"minimum,omitempty" yaml:"minimum,omitempty"` - ExclusiveMaximum *BreakingChangeRule `json:"exclusiveMaximum,omitempty" yaml:"exclusiveMaximum,omitempty"` - ExclusiveMinimum *BreakingChangeRule `json:"exclusiveMinimum,omitempty" yaml:"exclusiveMinimum,omitempty"` - MaxLength *BreakingChangeRule `json:"maxLength,omitempty" yaml:"maxLength,omitempty"` - MinLength *BreakingChangeRule `json:"minLength,omitempty" yaml:"minLength,omitempty"` - Pattern *BreakingChangeRule `json:"pattern,omitempty" yaml:"pattern,omitempty"` - MaxItems *BreakingChangeRule `json:"maxItems,omitempty" yaml:"maxItems,omitempty"` - MinItems *BreakingChangeRule `json:"minItems,omitempty" yaml:"minItems,omitempty"` - MaxProperties *BreakingChangeRule `json:"maxProperties,omitempty" yaml:"maxProperties,omitempty"` - MinProperties *BreakingChangeRule `json:"minProperties,omitempty" yaml:"minProperties,omitempty"` - UniqueItems *BreakingChangeRule `json:"uniqueItems,omitempty" yaml:"uniqueItems,omitempty"` - MultipleOf *BreakingChangeRule `json:"multipleOf,omitempty" yaml:"multipleOf,omitempty"` - ContentEncoding *BreakingChangeRule `json:"contentEncoding,omitempty" yaml:"contentEncoding,omitempty"` - ContentMediaType *BreakingChangeRule `json:"contentMediaType,omitempty" yaml:"contentMediaType,omitempty"` - Default *BreakingChangeRule `json:"default,omitempty" yaml:"default,omitempty"` - Const *BreakingChangeRule `json:"const,omitempty" yaml:"const,omitempty"` - Nullable *BreakingChangeRule `json:"nullable,omitempty" yaml:"nullable,omitempty"` - ReadOnly *BreakingChangeRule `json:"readOnly,omitempty" yaml:"readOnly,omitempty"` - WriteOnly *BreakingChangeRule `json:"writeOnly,omitempty" yaml:"writeOnly,omitempty"` - Deprecated *BreakingChangeRule `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` - Example *BreakingChangeRule `json:"example,omitempty" yaml:"example,omitempty"` - Examples *BreakingChangeRule `json:"examples,omitempty" yaml:"examples,omitempty"` - Required *BreakingChangeRule `json:"required,omitempty" yaml:"required,omitempty"` - Enum *BreakingChangeRule `json:"enum,omitempty" yaml:"enum,omitempty"` - Properties *BreakingChangeRule `json:"properties,omitempty" yaml:"properties,omitempty"` - AdditionalProperties *BreakingChangeRule `json:"additionalProperties,omitempty" yaml:"additionalProperties,omitempty"` - AllOf *BreakingChangeRule `json:"allOf,omitempty" yaml:"allOf,omitempty"` - AnyOf *BreakingChangeRule `json:"anyOf,omitempty" yaml:"anyOf,omitempty"` - OneOf *BreakingChangeRule `json:"oneOf,omitempty" yaml:"oneOf,omitempty"` - PrefixItems *BreakingChangeRule `json:"prefixItems,omitempty" yaml:"prefixItems,omitempty"` - Items *BreakingChangeRule `json:"items,omitempty" yaml:"items,omitempty"` - Discriminator *BreakingChangeRule `json:"discriminator,omitempty" yaml:"discriminator,omitempty"` - ExternalDocs *BreakingChangeRule `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` - Not *BreakingChangeRule `json:"not,omitempty" yaml:"not,omitempty"` - If *BreakingChangeRule `json:"if,omitempty" yaml:"if,omitempty"` - Then *BreakingChangeRule `json:"then,omitempty" yaml:"then,omitempty"` - Else *BreakingChangeRule `json:"else,omitempty" yaml:"else,omitempty"` - PropertyNames *BreakingChangeRule `json:"propertyNames,omitempty" yaml:"propertyNames,omitempty"` - Contains *BreakingChangeRule `json:"contains,omitempty" yaml:"contains,omitempty"` - UnevaluatedItems *BreakingChangeRule `json:"unevaluatedItems,omitempty" yaml:"unevaluatedItems,omitempty"` - UnevaluatedProperties *BreakingChangeRule `json:"unevaluatedProperties,omitempty" yaml:"unevaluatedProperties,omitempty"` - DynamicAnchor *BreakingChangeRule `json:"$dynamicAnchor,omitempty" yaml:"$dynamicAnchor,omitempty"` - DynamicRef *BreakingChangeRule `json:"$dynamicRef,omitempty" yaml:"$dynamicRef,omitempty"` - Id *BreakingChangeRule `json:"$id,omitempty" yaml:"$id,omitempty"` - Comment *BreakingChangeRule `json:"$comment,omitempty" yaml:"$comment,omitempty"` - ContentSchema *BreakingChangeRule `json:"contentSchema,omitempty" yaml:"contentSchema,omitempty"` - Vocabulary *BreakingChangeRule `json:"$vocabulary,omitempty" yaml:"$vocabulary,omitempty"` - DependentRequired *BreakingChangeRule `json:"dependentRequired,omitempty" yaml:"dependentRequired,omitempty"` - XML *BreakingChangeRule `json:"xml,omitempty" yaml:"xml,omitempty"` - SchemaDialect *BreakingChangeRule `json:"schemaDialect,omitempty" yaml:"schemaDialect,omitempty"` -} - -// DiscriminatorRules defines breaking rules for the Discriminator object properties. -type DiscriminatorRules struct { - PropertyName *BreakingChangeRule `json:"propertyName,omitempty" yaml:"propertyName,omitempty"` - DefaultMapping *BreakingChangeRule `json:"defaultMapping,omitempty" yaml:"defaultMapping,omitempty"` - Mapping *BreakingChangeRule `json:"mapping,omitempty" yaml:"mapping,omitempty"` -} - -// XMLRules defines breaking rules for the XML object properties. -type XMLRules struct { - Name *BreakingChangeRule `json:"name,omitempty" yaml:"name,omitempty"` - Namespace *BreakingChangeRule `json:"namespace,omitempty" yaml:"namespace,omitempty"` - Prefix *BreakingChangeRule `json:"prefix,omitempty" yaml:"prefix,omitempty"` - Attribute *BreakingChangeRule `json:"attribute,omitempty" yaml:"attribute,omitempty"` - NodeType *BreakingChangeRule `json:"nodeType,omitempty" yaml:"nodeType,omitempty"` - Wrapped *BreakingChangeRule `json:"wrapped,omitempty" yaml:"wrapped,omitempty"` -} - -// ServerRules defines breaking rules for the Server object properties. -type ServerRules struct { - Name *BreakingChangeRule `json:"name,omitempty" yaml:"name,omitempty"` - URL *BreakingChangeRule `json:"url,omitempty" yaml:"url,omitempty"` - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` -} - -// ServerVariableRules defines breaking rules for the Server Variable object properties. -type ServerVariableRules struct { - Enum *BreakingChangeRule `json:"enum,omitempty" yaml:"enum,omitempty"` - Default *BreakingChangeRule `json:"default,omitempty" yaml:"default,omitempty"` - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` -} - -// TagRules defines breaking rules for the Tag object properties. -type TagRules struct { - Name *BreakingChangeRule `json:"name,omitempty" yaml:"name,omitempty"` - Summary *BreakingChangeRule `json:"summary,omitempty" yaml:"summary,omitempty"` - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` - Parent *BreakingChangeRule `json:"parent,omitempty" yaml:"parent,omitempty"` - Kind *BreakingChangeRule `json:"kind,omitempty" yaml:"kind,omitempty"` - ExternalDocs *BreakingChangeRule `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` -} - -// ExternalDocsRules defines breaking rules for the External Documentation object properties. -type ExternalDocsRules struct { - URL *BreakingChangeRule `json:"url,omitempty" yaml:"url,omitempty"` - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` -} - -// SecuritySchemeRules defines breaking rules for the Security Scheme object properties. -type SecuritySchemeRules struct { - Type *BreakingChangeRule `json:"type,omitempty" yaml:"type,omitempty"` - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` - Name *BreakingChangeRule `json:"name,omitempty" yaml:"name,omitempty"` - In *BreakingChangeRule `json:"in,omitempty" yaml:"in,omitempty"` - Scheme *BreakingChangeRule `json:"scheme,omitempty" yaml:"scheme,omitempty"` - BearerFormat *BreakingChangeRule `json:"bearerFormat,omitempty" yaml:"bearerFormat,omitempty"` - OpenIDConnectURL *BreakingChangeRule `json:"openIdConnectUrl,omitempty" yaml:"openIdConnectUrl,omitempty"` - OAuth2MetadataUrl *BreakingChangeRule `json:"oauth2MetadataUrl,omitempty" yaml:"oauth2MetadataUrl,omitempty"` - Flows *BreakingChangeRule `json:"flows,omitempty" yaml:"flows,omitempty"` - Scopes *BreakingChangeRule `json:"scopes,omitempty" yaml:"scopes,omitempty"` - Flow *BreakingChangeRule `json:"flow,omitempty" yaml:"flow,omitempty"` // Swagger 2.0 - AuthorizationURL *BreakingChangeRule `json:"authorizationUrl,omitempty" yaml:"authorizationUrl,omitempty"` // Swagger 2.0 - TokenURL *BreakingChangeRule `json:"tokenUrl,omitempty" yaml:"tokenUrl,omitempty"` // Swagger 2.0 - Deprecated *BreakingChangeRule `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` -} - -// SecurityRequirementRules defines breaking rules for the Security Requirement object properties. -type SecurityRequirementRules struct { - Schemes *BreakingChangeRule `json:"schemes,omitempty" yaml:"schemes,omitempty"` - Scopes *BreakingChangeRule `json:"scopes,omitempty" yaml:"scopes,omitempty"` -} - -// OAuthFlowsRules defines breaking rules for the OAuth Flows object properties. -type OAuthFlowsRules struct { - Implicit *BreakingChangeRule `json:"implicit,omitempty" yaml:"implicit,omitempty"` - Password *BreakingChangeRule `json:"password,omitempty" yaml:"password,omitempty"` - ClientCredentials *BreakingChangeRule `json:"clientCredentials,omitempty" yaml:"clientCredentials,omitempty"` - AuthorizationCode *BreakingChangeRule `json:"authorizationCode,omitempty" yaml:"authorizationCode,omitempty"` - Device *BreakingChangeRule `json:"device,omitempty" yaml:"device,omitempty"` -} - -// OAuthFlowRules defines breaking rules for the OAuth Flow object properties. -type OAuthFlowRules struct { - AuthorizationURL *BreakingChangeRule `json:"authorizationUrl,omitempty" yaml:"authorizationUrl,omitempty"` - TokenURL *BreakingChangeRule `json:"tokenUrl,omitempty" yaml:"tokenUrl,omitempty"` - RefreshURL *BreakingChangeRule `json:"refreshUrl,omitempty" yaml:"refreshUrl,omitempty"` - Scopes *BreakingChangeRule `json:"scopes,omitempty" yaml:"scopes,omitempty"` -} - -// CallbackRules defines breaking rules for the Callback object properties. -type CallbackRules struct { - Expressions *BreakingChangeRule `json:"expressions,omitempty" yaml:"expressions,omitempty"` -} - -// LinkRules defines breaking rules for the Link object properties. -type LinkRules struct { - OperationRef *BreakingChangeRule `json:"operationRef,omitempty" yaml:"operationRef,omitempty"` - OperationID *BreakingChangeRule `json:"operationId,omitempty" yaml:"operationId,omitempty"` - RequestBody *BreakingChangeRule `json:"requestBody,omitempty" yaml:"requestBody,omitempty"` - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` - Server *BreakingChangeRule `json:"server,omitempty" yaml:"server,omitempty"` - Parameters *BreakingChangeRule `json:"parameters,omitempty" yaml:"parameters,omitempty"` -} - -// ExampleRules defines breaking rules for the Example object properties. -type ExampleRules struct { - Summary *BreakingChangeRule `json:"summary,omitempty" yaml:"summary,omitempty"` - Description *BreakingChangeRule `json:"description,omitempty" yaml:"description,omitempty"` - Value *BreakingChangeRule `json:"value,omitempty" yaml:"value,omitempty"` - ExternalValue *BreakingChangeRule `json:"externalValue,omitempty" yaml:"externalValue,omitempty"` - DataValue *BreakingChangeRule `json:"dataValue,omitempty" yaml:"dataValue,omitempty"` - SerializedValue *BreakingChangeRule `json:"serializedValue,omitempty" yaml:"serializedValue,omitempty"` -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/callback.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/callback.go deleted file mode 100644 index f5685b92727..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/callback.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// CallbackChanges represents all changes made between two Callback OpenAPI objects. -type CallbackChanges struct { - *PropertyChanges - ExpressionChanges map[string]*PathItemChanges `json:"expressions,omitempty" yaml:"expressions,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// TotalChanges returns a total count of all changes made between Callback objects -func (c *CallbackChanges) TotalChanges() int { - if c == nil { - return 0 - } - d := c.PropertyChanges.TotalChanges() - for k := range c.ExpressionChanges { - if c.ExpressionChanges[k] != nil { - d += c.ExpressionChanges[k].TotalChanges() - } - } - if c.ExtensionChanges != nil { - d += c.ExtensionChanges.TotalChanges() - } - return d -} - -// GetAllChanges returns a slice of all changes made between Callback objects -func (c *CallbackChanges) GetAllChanges() []*Change { - if c == nil { - return nil - } - var changes []*Change - changes = append(changes, c.Changes...) - for k := range c.ExpressionChanges { - changes = append(changes, c.ExpressionChanges[k].GetAllChanges()...) - } - if c.ExtensionChanges != nil { - changes = append(changes, c.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalBreakingChanges returns a total count of all changes made between Callback objects -func (c *CallbackChanges) TotalBreakingChanges() int { - d := c.PropertyChanges.TotalBreakingChanges() - for k := range c.ExpressionChanges { - d += c.ExpressionChanges[k].TotalBreakingChanges() - } - if c.ExtensionChanges != nil { - d += c.ExtensionChanges.TotalBreakingChanges() - } - return d -} - -// CompareCallback will compare two Callback objects and return a pointer to CallbackChanges with all the things -// that have changed between them. Handles nil inputs for added/removed callback scenarios. -func CompareCallback(l, r *v3.Callback) *CallbackChanges { - cc := new(CallbackChanges) - var changes []*Change - - if l == nil && r == nil { - return nil - } - - // whole callback added - use operation.callbacks breaking rules - if l == nil { - expChanges := make(map[string]*PathItemChanges) - for k, v := range r.Expression.FromOldest() { - CreateChange(&changes, ObjectAdded, k.Value, - nil, v.GetValueNode(), BreakingAdded(CompOperation, PropCallbacks), - nil, v.GetValue()) - } - cc.ExpressionChanges = expChanges - cc.ExtensionChanges = CompareExtensions(nil, r.Extensions) - cc.PropertyChanges = NewPropertyChanges(changes) - if cc.TotalChanges() <= 0 { - return nil - } - return cc - } - - // whole callback removed - use operation.callbacks breaking rules - if r == nil { - expChanges := make(map[string]*PathItemChanges) - for k, v := range l.Expression.FromOldest() { - CreateChange(&changes, ObjectRemoved, k.Value, - v.GetValueNode(), nil, BreakingRemoved(CompOperation, PropCallbacks), - v.GetValue(), nil) - } - cc.ExpressionChanges = expChanges - cc.ExtensionChanges = CompareExtensions(l.Extensions, nil) - cc.PropertyChanges = NewPropertyChanges(changes) - if cc.TotalChanges() <= 0 { - return nil - } - return cc - } - - // Both exist - compare them - lHashes := make(map[string]string) - rHashes := make(map[string]string) - - lValues := make(map[string]low.ValueReference[*v3.PathItem]) - rValues := make(map[string]low.ValueReference[*v3.PathItem]) - - for k, v := range l.Expression.FromOldest() { - lHashes[k.Value] = low.GenerateHashString(v.Value) - lValues[k.Value] = v - } - - for k, v := range r.Expression.FromOldest() { - rHashes[k.Value] = low.GenerateHashString(v.Value) - rValues[k.Value] = v - } - - expChanges := make(map[string]*PathItemChanges) - - // check left path item hashes - for k := range lHashes { - rhash := rHashes[k] - if rhash == "" { - CreateChange(&changes, ObjectRemoved, k, - lValues[k].GetValueNode(), nil, BreakingRemoved(CompCallback, PropExpressions), - lValues[k].GetValue(), nil) - continue - } - if lHashes[k] == rHashes[k] { - continue - } - // run comparison. - expChanges[k] = ComparePathItems(lValues[k].Value, rValues[k].Value) - } - - // check right path item hashes - for k := range rHashes { - lhash := lHashes[k] - if lhash == "" { - CreateChange(&changes, ObjectAdded, k, - nil, rValues[k].GetValueNode(), BreakingAdded(CompCallback, PropExpressions), - nil, rValues[k].GetValue()) - continue - } - } - cc.ExpressionChanges = expChanges - cc.ExtensionChanges = CompareExtensions(l.Extensions, r.Extensions) - cc.PropertyChanges = NewPropertyChanges(changes) - if cc.TotalChanges() <= 0 { - return nil - } - return cc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/change_types.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/change_types.go deleted file mode 100644 index 668423ebae6..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/change_types.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "encoding/json" - - "go.yaml.in/yaml/v4" -) - -// Definitions of the possible changes between two items -const ( - - // Modified means that was a modification of a value was made - Modified = iota + 1 - - // PropertyAdded means that a new property to an object was added - PropertyAdded - - // ObjectAdded means that a new object was added to a parent object - ObjectAdded - - // ObjectRemoved means that an object was removed from a parent object - ObjectRemoved - - // PropertyRemoved means that a property of an object was removed - PropertyRemoved -) - -// WhatChanged is a summary object that contains a high level summary of everything changed. -type WhatChanged struct { - Added int `json:"added,omitempty" yaml:"added,omitempty"` - Removed int `json:"removed,omitempty" yaml:"removed,omitempty"` - Modified int `json:"modified,omitempty" yaml:"modified,omitempty"` - TotalChanges int `json:"total,omitempty" yaml:"total,omitempty"` -} - -// ChangeContext holds a reference to the line and column positions of original and new change. -type ChangeContext struct { - DocumentLocation string `json:"document,omitempty" yaml:"document,omitempty"` - OriginalLine *int `json:"originalLine,omitempty" yaml:"originalLine,omitempty"` - OriginalColumn *int `json:"originalColumn,omitempty" yaml:"originalColumn,omitempty"` - NewLine *int `json:"newLine,omitempty" yaml:"newLine,omitempty"` - NewColumn *int `json:"newColumn,omitempty" yaml:"newColumn,omitempty"` -} - -type ChangeIsReferenced interface { - GetChangeReference() string - SetChangeReference(ref string) -} - -// HasChanged determines if the line and column numbers of the original and new values have changed. -// -// It's worth noting that there is no guarantee to the positions of anything in either left or right, so -// considering these values as 'changes' is going to add a considerable amount of noise to results. -func (c *ChangeContext) HasChanged() bool { - if c.NewLine != nil && c.OriginalLine != nil && *c.NewLine != *c.OriginalLine { - return true - } - //if c.NewColumn != nil && c.OriginalColumn != nil && *c.NewColumn != *c.OriginalColumn { - // return true - //} - if (c.NewLine == nil && c.OriginalLine != nil) || (c.NewLine != nil && c.OriginalLine == nil) { - return true - } - //if (c.NewColumn == nil && c.OriginalColumn != nil) || (c.NewColumn != nil && c.OriginalColumn == nil) { - // return true - //} - return false -} - -// Change represents a change between two different elements inside an OpenAPI specification. -type Change struct { - // Context represents the lines and column numbers of the original and new values - // It's worth noting that these values may frequently be different and are not used to calculate - // a change. If the positions change, but values do not, then no change is recorded. - Context *ChangeContext `json:"context,omitempty" yaml:"context,omitempty"` - - // ChangeType represents the type of change that occurred. stored as an integer, defined by constants above. - ChangeType int `json:"change,omitempty" yaml:"change,omitempty"` - - // Property is the property name key being changed. - Property string `json:"property,omitempty" yaml:"property,omitempty"` - - // Original is the original value represented as a string. - Original string `json:"original,omitempty" yaml:"original,omitempty"` - - // New is the new value represented as a string. - New string `json:"new,omitempty" yaml:"new,omitempty"` - - // OriginalEncoded is the original value serialized to YAML (for complex types like extensions). - // Only populated for specific use cases (e.g., extension values that are objects/arrays). - OriginalEncoded string `json:"originalEncoded,omitempty" yaml:"originalEncoded,omitempty"` - - // NewEncoded is the new value serialized to YAML (for complex types like extensions). - // Only populated for specific use cases (e.g., extension values that are objects/arrays). - NewEncoded string `json:"newEncoded,omitempty" yaml:"newEncoded,omitempty"` - - // Breaking determines if the change is a breaking one or not. - Breaking bool `json:"breaking" yaml:"breaking"` - - // OriginalObject represents the original object that was changed. - OriginalObject any `json:"-" yaml:"-"` - - // NewObject represents the new object that has been modified. - NewObject any `json:"-" yaml:"-"` - - // Type represents the type of object that was changed. (not used in the current implementation). - Type string `json:"type,omitempty"` - - // Path represents the path to the object that was changed (not used in the current implementation). - Path string `json:"path,omitempty"` - - // Reference is populated when the change is related to a $ref change. - Reference string `json:"reference,omitempty"` -} - -// MarshalJSON is a custom JSON marshaller for the Change object. -func (c *Change) MarshalJSON() ([]byte, error) { - changeType := "" - switch c.ChangeType { - case Modified: - changeType = "modified" - case PropertyAdded: - changeType = "property_added" - case ObjectAdded: - changeType = "object_added" - case ObjectRemoved: - changeType = "object_removed" - case PropertyRemoved: - changeType = "property_removed" - } - data := map[string]interface{}{ - "change": c.ChangeType, - "changeText": changeType, - "property": c.Property, - "breaking": c.Breaking, - } - - if c.Original != "" { - data["original"] = c.Original - } - - if c.New != "" { - data["new"] = c.New - } - - if c.OriginalEncoded != "" { - data["originalEncoded"] = c.OriginalEncoded - } - - if c.NewEncoded != "" { - data["newEncoded"] = c.NewEncoded - } - - if c.Context != nil { - data["context"] = c.Context - } - if c.Type != "" { - data["type"] = c.Type - } - if c.Path != "" { - data["path"] = c.Path - } - return json.Marshal(data) -} - -// PropertyChanges holds a slice of Change pointers -type PropertyChanges struct { - RenderPropertiesOnly bool `json:"-" yaml:"-"` - ChangeReference string `json:"changeReference,omitempty""` - Changes []*Change `json:"changes,omitempty" yaml:"changes,omitempty"` -} - -func (p *PropertyChanges) SetChangeReference(ref string) { - p.ChangeReference = ref -} - -func (p *PropertyChanges) GetChangeReference() string { - return p.ChangeReference -} - -// TotalChanges returns the total number of property changes made. -func (p *PropertyChanges) TotalChanges() int { - if p == nil { - return 0 - } - return len(p.Changes) -} - -// TotalBreakingChanges returns the total number of property breaking changes made. -func (p *PropertyChanges) TotalBreakingChanges() int { - return CountBreakingChanges(p.Changes) -} - -// PropertiesOnly will set the change object to only render properties, not the timeline. -func (p *PropertyChanges) PropertiesOnly() { - p.RenderPropertiesOnly = true -} - -// GetPropertyChanges will return just the property changes -func (p *PropertyChanges) GetPropertyChanges() []*Change { - return p.Changes -} - -func NewPropertyChanges(changes []*Change) *PropertyChanges { - return &PropertyChanges{Changes: changes} -} - -// PropertyCheck is used by functions to check the state of left and right values. -type PropertyCheck struct { - // Original is the property we're checking on the left - Original any - - // New is s the property we're checking on the right - New any - - // Label is the identifier we're looking for on the left and right hand sides - Label string - - // LeftNode is the yaml.Node pointer that holds the original node structure of the value - LeftNode *yaml.Node - - // RightNode is the yaml.Node pointer that holds the new node structure of the value - RightNode *yaml.Node - - // Breaking determines if the check is a breaking change (modifications or removals etc.) - // - // Deprecated: Use Component and Property fields for configurable breaking rules. - // This field is used as a fallback when Component is not set. - // - // TODO: Migration to Component/Property-based breaking rules - // - // Current state: CreateChange() takes a `breaking bool` parameter that is computed via - // BreakingAdded/Modified/Removed(component, property) and stored directly on this field. - // The breaking status is fixed at Change creation time. - // - // Target state: CreateChange() should accept `component, property string` parameters instead, - // store them on the Change, and breaking status should be computed at runtime via IsBreaking(). - // This allows users to apply different breaking rule configs to the same set of changes. - // - // Migration steps: - // 1. Extend CreateChange signature to accept component, property string - // 2. Update ~198 CreateChange call sites across 23 files - // 3. Add IsBreaking() method on Change that computes from Component/Property - // 4. Keep Breaking field populated for backward compatibility - // - // Files with most CreateChange calls: schema.go (51), path_item.go (42), operation.go (22) - Breaking bool - - // Component is the OpenAPI component type (e.g., CompTag, CompSchema) for breaking rules lookup. - // When set along with Property, the configurable breaking rules system is used instead of Breaking. - // TODO: Currently not populated - see Breaking field TODO for migration plan. - Component string - - // Property is the property name within the component (e.g., PropParent, PropName) for breaking rules lookup. - // Used together with Component to look up the correct breaking rule for each change type. - // TODO: Currently not populated - see Breaking field TODO for migration plan. - Property string - - // Changes represents a pointer to the slice to contain all changes found. - Changes *[]*Change -} - -// NewPropertyCheck creates a PropertyCheck with the Component and Property fields set for -// configurable breaking rules. This is the preferred way to create PropertyCheck instances. -func NewPropertyCheck( - component, property string, - leftNode, rightNode *yaml.Node, - label string, - changes *[]*Change, - original, new any, -) *PropertyCheck { - return &PropertyCheck{ - LeftNode: leftNode, - RightNode: rightNode, - Label: label, - Changes: changes, - Breaking: BreakingModified(component, property), // fallback for legacy code paths - Component: component, - Property: property, - Original: original, - New: new, - } -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/comparison_functions.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/comparison_functions.go deleted file mode 100644 index c53f9030f76..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/comparison_functions.go +++ /dev/null @@ -1,816 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "fmt" - "reflect" - "strings" - "sync" - - "github.com/pb33f/libopenapi/datamodel/low/base" - - "github.com/pb33f/libopenapi/orderedmap" - "github.com/pb33f/libopenapi/utils" - - "github.com/pb33f/libopenapi/datamodel/low" - "go.yaml.in/yaml/v4" -) - -const ( - HashPh = "%x" - EMPTY_STR = "" -) - -var changeMutex sync.Mutex - -// SetReferenceIfExists checks if a low-level value has a reference and sets it on the change object -// if the change object implements the ChangeIsReferenced interface. -func SetReferenceIfExists[T any](value *low.ValueReference[T], changeObj any) { - if value != nil && value.IsReference() { - if refChange, ok := changeObj.(ChangeIsReferenced); ok { - refChange.SetChangeReference(value.GetReference()) - } - } -} - -// PreserveParameterReference checks if a parameter is a reference and preserves it on the changes object. -// This eliminates duplicate reference preservation logic in operation.go and path_item.go. -func PreserveParameterReference[T any](lRefs, rRefs map[string]*low.ValueReference[T], name string, changes ChangeIsReferenced) { - if lRef := lRefs[name]; lRef != nil && lRef.IsReference() { - SetReferenceIfExists(lRef, changes) - } else if rRef := rRefs[name]; rRef != nil && rRef.IsReference() { - SetReferenceIfExists(rRef, changes) - } -} - -func checkLocation(ctx *ChangeContext, hs base.HasIndex) bool { - if !reflect.ValueOf(hs).IsNil() { - idx := hs.GetIndex() - if idx == nil { - return false - } - if idx.GetRolodex() != nil { - r := idx.GetRolodex() - rIdx := r.GetRootIndex() - if rIdx.GetSpecAbsolutePath() != idx.GetSpecAbsolutePath() { - ctx.DocumentLocation = idx.GetSpecAbsolutePath() - return true - } - } - } - return false -} - -// CreateChange is a generic function that will create a Change of type T, populate all properties if set and then -// add a pointer to Change[T] in the slice of Change pointers provided -func CreateChange(changes *[]*Change, changeType int, property string, leftValueNode, rightValueNode *yaml.Node, - breaking bool, originalObject, newObject any, -) *[]*Change { - // create a new context for the left and right nodes. - ctx := CreateContext(leftValueNode, rightValueNode) - c := &Change{ - Context: ctx, - ChangeType: changeType, - Property: property, - Breaking: breaking, - } - - // lets find out if the objects are local to the root, or if it's come from another document in the tree. - if originalObject != nil { - if hs, ok := originalObject.(base.HasIndex); ok { - checkLocation(ctx, hs) - } - } - if newObject != nil { - if hs, ok := newObject.(base.HasIndex); ok { - checkLocation(ctx, hs) - } - } - - // if the left is not nil, we have an original value - if leftValueNode != nil && leftValueNode.Value != EMPTY_STR { - c.Original = leftValueNode.Value - } - // if the right is not nil, then we have a new value - if rightValueNode != nil && rightValueNode.Value != EMPTY_STR { - c.New = rightValueNode.Value - } - - // If node is nil but object is a string, use the object value as fallback - // This handles cases where the value is provided as the object parameter (e.g., security requirements) - if leftValueNode == nil && c.Original == "" { - if str, ok := originalObject.(string); ok { - c.Original = str - } - } - if rightValueNode == nil && c.New == "" { - if str, ok := newObject.(string); ok { - c.New = str - } - } - - // original and new objects - c.OriginalObject = originalObject - c.NewObject = newObject - - // add the change to supplied changes slice - changeMutex.Lock() - *changes = append(*changes, c) - changeMutex.Unlock() - return changes -} - -// CreateChangeWithEncoding is like CreateChange but also populates the encoded fields for complex values. -// use this ONLY for extensions or other cases where complex YAML structures need to be serialized. -// the encoded values are serialized to YAML format. -func CreateChangeWithEncoding(changes *[]*Change, changeType int, property string, leftValueNode, rightValueNode *yaml.Node, - breaking bool, originalObject, newObject any, -) *[]*Change { - CreateChange(changes, changeType, property, leftValueNode, rightValueNode, breaking, originalObject, newObject) - - c := (*changes)[len(*changes)-1] - - // serialize complex values to YAML for extension rendering (avoid inflating memory for scalar values) - if leftValueNode != nil && (utils.IsNodeArray(leftValueNode) || utils.IsNodeMap(leftValueNode)) { - if encoded, err := yaml.Marshal(leftValueNode); err == nil { - c.OriginalEncoded = string(encoded) - } - } - if rightValueNode != nil && (utils.IsNodeArray(rightValueNode) || utils.IsNodeMap(rightValueNode)) { - if encoded, err := yaml.Marshal(rightValueNode); err == nil { - c.NewEncoded = string(encoded) - } - } - - return changes -} - -// CreateContext will return a pointer to a ChangeContext containing the original and new line and column numbers -// of the left and right value nodes. -func CreateContext(l, r *yaml.Node) *ChangeContext { - ctx := new(ChangeContext) - if l != nil { - ctx.OriginalLine = &l.Line - ctx.OriginalColumn = &l.Column - } - if r != nil { - ctx.NewLine = &r.Line - ctx.NewColumn = &r.Column - } - return ctx -} - -func FlattenLowLevelOrderedMap[T any]( - lowMap *orderedmap.Map[low.KeyReference[string], low.ValueReference[T]], -) map[string]*low.ValueReference[T] { - flat := make(map[string]*low.ValueReference[T]) - - for k, l := range lowMap.FromOldest() { - flat[k.Value] = &l - } - return flat -} - -// CountBreakingChanges counts the number of changes in a slice that are breaking -func CountBreakingChanges(changes []*Change) int { - b := 0 - for i := range changes { - if changes[i].Breaking { - b++ - } - } - return b -} - -// checkForObjectAdditionOrRemovalInternal is the internal implementation that handles both encoding modes. -func checkForObjectAdditionOrRemovalInternal[T any](l, r map[string]*low.ValueReference[T], label string, changes *[]*Change, - breakingAdd, breakingRemove bool, withEncoding bool, -) { - createFn := CreateChange - if withEncoding { - createFn = CreateChangeWithEncoding - } - var left, right T - if CheckSpecificObjectRemoved(l, r, label) { - left = l[label].GetValue() - createFn(changes, ObjectRemoved, label, l[label].GetValueNode(), nil, - breakingRemove, left, nil) - } - if CheckSpecificObjectAdded(l, r, label) { - right = r[label].GetValue() - createFn(changes, ObjectAdded, label, nil, r[label].GetValueNode(), - breakingAdd, nil, right) - } -} - -// CheckForObjectAdditionOrRemoval will check for the addition or removal of an object from left and right maps. -// The label is the key to look for in the left and right maps. -// -// To determine this a breaking change for an addition then set breakingAdd to true (however I can't think of many -// scenarios that adding things should break anything). Removals are generally breaking, except for non contract -// properties like descriptions, summaries and other non-binding values, so a breakingRemove value can be tuned for -// these circumstances. -func CheckForObjectAdditionOrRemoval[T any](l, r map[string]*low.ValueReference[T], label string, changes *[]*Change, - breakingAdd, breakingRemove bool, -) { - checkForObjectAdditionOrRemovalInternal(l, r, label, changes, breakingAdd, breakingRemove, false) -} - -// CheckForObjectAdditionOrRemovalWithEncoding is like CheckForObjectAdditionOrRemoval but populates encoded fields. -// Use this for extensions where complex values need to be serialized to YAML. -func CheckForObjectAdditionOrRemovalWithEncoding[T any](l, r map[string]*low.ValueReference[T], label string, changes *[]*Change, - breakingAdd, breakingRemove bool, -) { - checkForObjectAdditionOrRemovalInternal(l, r, label, changes, breakingAdd, breakingRemove, true) -} - -// CheckSpecificObjectRemoved returns true if a specific value is not in both maps. -func CheckSpecificObjectRemoved[T any](l, r map[string]*T, label string) bool { - return l[label] != nil && r[label] == nil -} - -// CheckSpecificObjectAdded returns true if a specific value is not in both maps. -func CheckSpecificObjectAdded[T any](l, r map[string]*T, label string) bool { - return l[label] == nil && r[label] != nil -} - -// CheckProperties will iterate through a slice of PropertyCheck pointers of type T. The method is a convenience method -// for running checks on the following methods in order: -// -// CheckPropertyAdditionOrRemoval -// CheckForModification -// -// When PropertyCheck has Component set, the configurable breaking rules system is used -// to look up the correct breaking value for each change type (added, modified, removed). -func CheckProperties(properties []*PropertyCheck) { - checkPropertiesInternal(properties, false) -} - -// checkPropertiesInternal is the shared implementation for CheckProperties and CheckPropertiesWithEncoding. -// The withEncoding parameter controls whether to use encoding-aware functions for complex YAML values. -func checkPropertiesInternal(properties []*PropertyCheck, withEncoding bool) { - // cache config once outside the loop for performance (avoids repeated mutex operations) - config := GetActiveBreakingRulesConfig() - - for _, n := range properties { - var breakingAdded, breakingModified, breakingRemoved bool - - if n.Component != "" { - // use configurable breaking rules via cached config if rule exists - if rule := config.GetRule(n.Component, n.Property); rule != nil { - // extract breaking values directly from rule (avoids 3 redundant lookups) - breakingAdded = rule.Added != nil && *rule.Added - breakingModified = rule.Modified != nil && *rule.Modified - breakingRemoved = rule.Removed != nil && *rule.Removed - } else { - // no rule found - fallback to legacy Breaking field - breakingAdded = n.Breaking - breakingModified = n.Breaking - breakingRemoved = n.Breaking - } - } else { - // no component set - fallback to legacy Breaking field - breakingAdded = n.Breaking - breakingModified = n.Breaking - breakingRemoved = n.Breaking - } - - // run the checks with the determined breaking values - if withEncoding { - checkForRemovalInternal(n.LeftNode, n.RightNode, n.Label, n.Changes, breakingRemoved, n.Original, n.New, true) - checkForAdditionInternal(n.LeftNode, n.RightNode, n.Label, n.Changes, breakingAdded, n.Original, n.New, true) - checkForModificationInternal(n.LeftNode, n.RightNode, n.Label, n.Changes, breakingModified, n.Original, n.New, true) - } else { - checkForRemovalInternal(n.LeftNode, n.RightNode, n.Label, n.Changes, breakingRemoved, n.Original, n.New, false) - checkForAdditionInternal(n.LeftNode, n.RightNode, n.Label, n.Changes, breakingAdded, n.Original, n.New, false) - checkForModificationInternal(n.LeftNode, n.RightNode, n.Label, n.Changes, breakingModified, n.Original, n.New, false) - } - } -} - -// CheckPropertiesWithEncoding is like CheckProperties but uses CreateChangeWithEncoding for complex values. -// Use this for extensions where YAML serialization is needed. -func CheckPropertiesWithEncoding(properties []*PropertyCheck) { - checkPropertiesInternal(properties, true) -} - -// CheckPropertyAdditionOrRemovalWithEncoding checks for additions and removals with encoding. -func CheckPropertyAdditionOrRemovalWithEncoding[T any](l, r *yaml.Node, - label string, changes *[]*Change, breaking bool, orig, new T, -) { - checkForRemovalInternal(l, r, label, changes, breaking, orig, new, true) - checkForAdditionInternal(l, r, label, changes, breaking, orig, new, true) -} - -// CheckForRemovalWithEncoding checks for removals with YAML encoding. -func CheckForRemovalWithEncoding[T any](l, r *yaml.Node, label string, changes *[]*Change, breaking bool, orig, new T) { - checkForRemovalInternal(l, r, label, changes, breaking, orig, new, true) -} - -// CheckForAdditionWithEncoding checks for additions with YAML encoding. -func CheckForAdditionWithEncoding[T any](l, r *yaml.Node, label string, changes *[]*Change, breaking bool, orig, new T) { - checkForAdditionInternal(l, r, label, changes, breaking, orig, new, true) -} - -// CheckForModificationWithEncoding checks for modifications with YAML encoding. -func CheckForModificationWithEncoding[T any](l, r *yaml.Node, label string, changes *[]*Change, breaking bool, orig, new T) { - checkForModificationInternal(l, r, label, changes, breaking, orig, new, true) -} - -// CheckPropertyAdditionOrRemoval will run both CheckForRemoval (first) and CheckForAddition (second) -func CheckPropertyAdditionOrRemoval[T any](l, r *yaml.Node, - label string, changes *[]*Change, breaking bool, orig, new T, -) { - CheckForRemoval[T](l, r, label, changes, breaking, orig, new) - CheckForAddition[T](l, r, label, changes, breaking, orig, new) -} - -// checkForRemovalInternal is the internal implementation for removal checks with configurable encoding. -func checkForRemovalInternal[T any](l, r *yaml.Node, label string, changes *[]*Change, breaking bool, orig, new T, withEncoding bool) { - createFn := CreateChange - if withEncoding { - createFn = CreateChangeWithEncoding - } - if l != nil && l.Value != EMPTY_STR && (r == nil || r.Value == EMPTY_STR && !utils.IsNodeArray(r) && !utils.IsNodeMap(r)) { - createFn(changes, PropertyRemoved, label, l, r, breaking, orig, new) - return - } - if l != nil && r == nil { - createFn(changes, PropertyRemoved, label, l, nil, breaking, orig, nil) - } -} - -// CheckForRemoval will check left and right yaml.Node instances for changes. Anything that is found missing on the -// right, but present on the left, is considered a removal. A new Change[T] will be created with the type -// -// PropertyRemoved -// -// The Change is then added to the slice of []Change[T] instances provided as a pointer. -func CheckForRemoval[T any](l, r *yaml.Node, label string, changes *[]*Change, breaking bool, orig, new T) { - checkForRemovalInternal(l, r, label, changes, breaking, orig, new, false) -} - -// checkForAdditionInternal is the internal implementation for addition checks with configurable encoding. -func checkForAdditionInternal[T any](l, r *yaml.Node, label string, changes *[]*Change, breaking bool, orig, new T, withEncoding bool) { - createFn := CreateChange - if withEncoding { - createFn = CreateChangeWithEncoding - } - // left doesn't exist if: nil OR (empty scalar AND not a map/array) OR (empty map/array) - leftDoesNotExist := l == nil || - (l.Value == EMPTY_STR && !utils.IsNodeMap(l) && !utils.IsNodeArray(l)) || - ((utils.IsNodeMap(l) || utils.IsNodeArray(l)) && len(l.Content) == 0) - // right exists if: not nil AND (has value OR is array OR is map) - rightExists := r != nil && (r.Value != EMPTY_STR || utils.IsNodeArray(r) || utils.IsNodeMap(r)) - - if leftDoesNotExist && rightExists { - createFn(changes, PropertyAdded, label, l, r, breaking, orig, new) - } -} - -// CheckForAddition will check left and right yaml.Node instances for changes. Anything that is found missing on the -// left, but present on the right, is considered an addition. A new Change[T] will be created with the type -// -// PropertyAdded -// -// The Change is then added to the slice of []Change[T] instances provided as a pointer. -func CheckForAddition[T any](l, r *yaml.Node, label string, changes *[]*Change, breaking bool, orig, new T) { - checkForAdditionInternal(l, r, label, changes, breaking, orig, new, false) -} - -// checkForModificationInternal is the internal implementation for modification checks with configurable encoding. -func checkForModificationInternal[T any](l, r *yaml.Node, label string, changes *[]*Change, breaking bool, orig, new T, withEncoding bool) { - createFn := CreateChange - if withEncoding { - createFn = CreateChangeWithEncoding - } - if l != nil && l.Value != EMPTY_STR && r != nil && r.Value != EMPTY_STR && (r.Value != l.Value || r.Tag != l.Tag) { - createFn(changes, Modified, label, l, r, breaking, orig, new) - return - } - if l != nil && utils.IsNodeArray(l) && r != nil && !utils.IsNodeArray(r) { - createFn(changes, Modified, label, l, r, breaking, orig, new) - return - } - if l != nil && !utils.IsNodeArray(l) && r != nil && utils.IsNodeArray(r) { - createFn(changes, Modified, label, l, r, breaking, orig, new) - return - } - if l != nil && utils.IsNodeMap(l) && r != nil && !utils.IsNodeMap(r) { - createFn(changes, Modified, label, l, r, breaking, orig, new) - return - } - if l != nil && !utils.IsNodeMap(l) && r != nil && utils.IsNodeMap(r) { - createFn(changes, Modified, label, l, r, breaking, orig, new) - return - } - if l != nil && utils.IsNodeArray(l) && r != nil && utils.IsNodeArray(r) { - if len(l.Content) != len(r.Content) { - createFn(changes, Modified, label, l, r, breaking, orig, new) - return - } - - // Compare the YAML node trees directly without marshaling - if !low.CompareYAMLNodes(l, r) { - createFn(changes, Modified, label, l, r, breaking, orig, new) - } - return - } - if l != nil && utils.IsNodeMap(l) && r != nil && utils.IsNodeMap(r) { - // Compare the YAML node trees directly without marshaling - if !low.CompareYAMLNodes(l, r) { - createFn(changes, Modified, label, l, r, breaking, orig, new) - } - return - } -} - -// CheckForModification will check left and right yaml.Node instances for changes. Anything that is found in both -// sides, but vary in value is considered a modification. -// -// If there is a change in value the function adds a change type of Modified. -// -// The Change is then added to the slice of []Change[T] instances provided as a pointer. -func CheckForModification[T any](l, r *yaml.Node, label string, changes *[]*Change, breaking bool, orig, new T) { - checkForModificationInternal(l, r, label, changes, breaking, orig, new, false) -} - -// CheckMapForChanges checks a left and right low level map for any additions, subtractions or modifications to -// values. The compareFunc argument should reference the correct comparison function for the generic type. -// Uses original hardcoded breaking behavior (removals breaking, additions non-breaking). -func CheckMapForChanges[T any, R any](expLeft, expRight *orderedmap.Map[low.KeyReference[string], low.ValueReference[T]], - changes *[]*Change, label string, compareFunc func(l, r T) R, -) map[string]R { - return checkMapForChangesInternal(expLeft, expRight, changes, label, compareFunc, true, false, true) -} - -// CheckMapForChangesWithRules checks a left and right low level map for any additions, subtractions or modifications -// to values, using the configurable breaking rules system for the specified component and property. -func CheckMapForChangesWithRules[T any, R any](expLeft, expRight *orderedmap.Map[low.KeyReference[string], low.ValueReference[T]], - changes *[]*Change, label string, compareFunc func(l, r T) R, component, property string, -) map[string]R { - return checkMapForChangesInternal(expLeft, expRight, changes, label, compareFunc, true, - BreakingAdded(component, property), BreakingRemoved(component, property)) -} - -// CheckMapForAdditionRemoval checks a left and right low level map for any additions or subtractions, but not modifications -func CheckMapForAdditionRemoval[T any](expLeft, expRight *orderedmap.Map[low.KeyReference[string], low.ValueReference[T]], - changes *[]*Change, label string, -) any { - doNothing := func(l, r T) any { - return nil - } - // adding purely to make sure code is called for coverage. - var l, r T - doNothing(l, r) - return checkMapForChangesInternal(expLeft, expRight, changes, label, doNothing, false, false, true) -} - -// CheckMapForChangesWithComp checks a left and right low level map for any additions, subtractions or modifications to -// values. The compareFunc argument should reference the correct comparison function for the generic type. The compare -// bit determines if the comparison should be run or not. -// Deprecated: Use checkMapForChangesInternal with explicit breaking parameters instead. -func CheckMapForChangesWithComp[T any, R any](expLeft, expRight *orderedmap.Map[low.KeyReference[string], low.ValueReference[T]], - changes *[]*Change, label string, compareFunc func(l, r T) R, compare bool, -) map[string]R { - return checkMapForChangesInternal(expLeft, expRight, changes, label, compareFunc, compare, false, true) -} - -// CheckMapForChangesWithNilSupport checks a left and right low level map for any additions, subtractions or modifications. -// Unlike CheckMapForChanges, this function calls compareFunc for added/removed items by passing nil for the missing side. -// The compareFunc MUST handle nil inputs gracefully (return appropriate changes for added/removed cases). -// This allows the returned map to include entries for added/removed items, enabling proper tree rendering. -func CheckMapForChangesWithNilSupport[T any, R any](expLeft, expRight *orderedmap.Map[low.KeyReference[string], low.ValueReference[T]], - changes *[]*Change, label string, compareFunc func(l, r T) R, -) map[string]R { - return checkMapForChangesWithNilSupportInternal(expLeft, expRight, changes, label, compareFunc, false, true) -} - -// checkMapForChangesWithNilSupportInternal is the core implementation that calls compareFunc with nil for added/removed items. -func checkMapForChangesWithNilSupportInternal[T any, R any](expLeft, expRight *orderedmap.Map[low.KeyReference[string], low.ValueReference[T]], - changes *[]*Change, label string, compareFunc func(l, r T) R, - breakingAdded, breakingRemoved bool, -) map[string]R { - var chLock sync.Mutex - - lHashes := make(map[string]string) - rHashes := make(map[string]string) - lValues := make(map[string]low.ValueReference[T]) - rValues := make(map[string]low.ValueReference[T]) - - if expLeft != nil { - for k, v := range expLeft.FromOldest() { - lHashes[k.Value] = low.GenerateHashString(v.Value) - lValues[k.Value] = v - } - } - - if expRight != nil { - for k, v := range expRight.FromOldest() { - rHashes[k.Value] = low.GenerateHashString(v.Value) - rValues[k.Value] = v - } - } - - expChanges := make(map[string]R) - - checkLeft := func(k string, doneChan chan struct{}, f, g map[string]string, p, h map[string]low.ValueReference[T]) { - rhash := g[k] - if rhash == EMPTY_STR { - // Item was removed - call compareFunc with nil/zero right side - chLock.Lock() - var zero T - ch := compareFunc(p[k].Value, zero) - if !reflect.ValueOf(&ch).Elem().IsZero() { - expChanges[k] = ch - var cr any = ch - pVal := p[k] - SetReferenceIfExists(&pVal, cr) - } - chLock.Unlock() - doneChan <- struct{}{} - return - } - if f[k] == g[k] { - doneChan <- struct{}{} - return - } - // Item was modified - chLock.Lock() - ch := compareFunc(p[k].Value, h[k].Value) - if !reflect.ValueOf(&ch).Elem().IsZero() { - expChanges[k] = ch - var cr any = ch - pVal := p[k] - SetReferenceIfExists(&pVal, cr) - } - chLock.Unlock() - doneChan <- struct{}{} - } - - checkRight := func(k string, doneChan chan struct{}, f map[string]string, p map[string]low.ValueReference[T]) { - lhash := f[k] - if lhash == EMPTY_STR { - // Item was added - call compareFunc with nil/zero left side - chLock.Lock() - var zero T - ch := compareFunc(zero, p[k].Value) - if !reflect.ValueOf(&ch).Elem().IsZero() { - expChanges[k] = ch - var cr any = ch - pVal := p[k] - SetReferenceIfExists(&pVal, cr) - } - chLock.Unlock() - } - doneChan <- struct{}{} - } - - doneChan := make(chan struct{}) - count := 0 - - for k := range lHashes { - count++ - go checkLeft(k, doneChan, lHashes, rHashes, lValues, rValues) - } - - for k := range rHashes { - count++ - go checkRight(k, doneChan, lHashes, rValues) - } - - completed := 0 - for completed < count { - <-doneChan - completed++ - } - return expChanges -} - -// checkMapForChangesInternal is the core implementation that checks a left and right low level map for any -// additions, subtractions or modifications to values. The breakingAdded and breakingRemoved parameters control -// whether additions and removals are marked as breaking changes. -func checkMapForChangesInternal[T any, R any](expLeft, expRight *orderedmap.Map[low.KeyReference[string], low.ValueReference[T]], - changes *[]*Change, label string, compareFunc func(l, r T) R, compare bool, - breakingAdded, breakingRemoved bool, -) map[string]R { - var chLock sync.Mutex - - lHashes := make(map[string]string) - rHashes := make(map[string]string) - lValues := make(map[string]low.ValueReference[T]) - rValues := make(map[string]low.ValueReference[T]) - - if expLeft != nil { - for k, v := range expLeft.FromOldest() { - lHashes[k.Value] = low.GenerateHashString(v.Value) - lValues[k.Value] = v - } - } - - if expRight != nil { - for k, v := range expRight.FromOldest() { - rHashes[k.Value] = low.GenerateHashString(v.Value) - rValues[k.Value] = v - } - } - - expChanges := make(map[string]R) - - checkLeft := func(k string, doneChan chan struct{}, f, g map[string]string, p, h map[string]low.ValueReference[T]) { - rhash := g[k] - if rhash == EMPTY_STR { - chLock.Lock() - if p[k].GetValueNode().Value == EMPTY_STR { - p[k].GetValueNode().Value = k - } - CreateChange(changes, ObjectRemoved, label, - p[k].GetValueNode(), nil, breakingRemoved, - p[k].GetValue(), nil) - chLock.Unlock() - doneChan <- struct{}{} - return - } - if f[k] == g[k] { - doneChan <- struct{}{} - return - } - if compare { - chLock.Lock() - ch := compareFunc(p[k].Value, h[k].Value) - // incorrect map results were being generated causing panics. - // https://github.com/pb33f/libopenapi/issues/61 - if !reflect.ValueOf(&ch).Elem().IsZero() { - expChanges[k] = ch - var cr any = ch - pVal := p[k] - SetReferenceIfExists(&pVal, cr) - } - chLock.Unlock() - } - doneChan <- struct{}{} - } - - checkRight := func(k string, doneChan chan struct{}, f map[string]string, p map[string]low.ValueReference[T]) { - lhash := f[k] - if lhash == EMPTY_STR { - chLock.Lock() - if p[k].GetValueNode().Value == EMPTY_STR { - p[k].GetValueNode().Value = k - } - CreateChange(changes, ObjectAdded, label, - nil, p[k].GetValueNode(), breakingAdded, - nil, p[k].GetValue()) - chLock.Unlock() - } - doneChan <- struct{}{} - } - - doneChan := make(chan struct{}) - count := 0 - - for k := range lHashes { - count++ - go checkLeft(k, doneChan, lHashes, rHashes, lValues, rValues) - } - - for k := range rHashes { - count++ - go checkRight(k, doneChan, lHashes, rValues) - } - - completed := 0 - for completed < count { - <-doneChan - completed++ - } - return expChanges -} - -// ExtractStringValueSliceChanges will compare two low level string slices for changes. -// The breaking parameter is deprecated - use ExtractStringValueSliceChangesWithRules instead. -func ExtractStringValueSliceChanges(lParam, rParam []low.ValueReference[string], - changes *[]*Change, label string, breaking bool, -) { - lKeys := make([]string, len(lParam)) - rKeys := make([]string, len(rParam)) - lValues := make(map[string]low.ValueReference[string]) - rValues := make(map[string]low.ValueReference[string]) - for i := range lParam { - lKeys[i] = strings.ToLower(lParam[i].Value) - lValues[lKeys[i]] = lParam[i] - } - for i := range rParam { - rKeys[i] = strings.ToLower(rParam[i].Value) - rValues[rKeys[i]] = rParam[i] - } - for i := range lValues { - if _, ok := rValues[i]; !ok { - CreateChange(changes, PropertyRemoved, label, - lValues[i].ValueNode, - nil, - breaking, - lValues[i].Value, - nil) - } - } - for i := range rValues { - if _, ok := lValues[i]; !ok { - CreateChange(changes, PropertyAdded, label, - nil, - rValues[i].ValueNode, - false, - nil, - rValues[i].Value) - } - } -} - -// ExtractStringValueSliceChangesWithRules compares two low level string slices for changes, -// using the configurable breaking rules system to determine breaking status. -func ExtractStringValueSliceChangesWithRules(lParam, rParam []low.ValueReference[string], - changes *[]*Change, label string, component, property string, -) { - lKeys := make([]string, len(lParam)) - rKeys := make([]string, len(rParam)) - lValues := make(map[string]low.ValueReference[string]) - rValues := make(map[string]low.ValueReference[string]) - for i := range lParam { - lKeys[i] = strings.ToLower(lParam[i].Value) - lValues[lKeys[i]] = lParam[i] - } - for i := range rParam { - rKeys[i] = strings.ToLower(rParam[i].Value) - rValues[rKeys[i]] = rParam[i] - } - for i := range lValues { - if _, ok := rValues[i]; !ok { - CreateChange(changes, PropertyRemoved, label, - lValues[i].ValueNode, - nil, - BreakingRemoved(component, property), - lValues[i].Value, - nil) - } - } - for i := range rValues { - if _, ok := lValues[i]; !ok { - CreateChange(changes, PropertyAdded, label, - nil, - rValues[i].ValueNode, - BreakingAdded(component, property), - nil, - rValues[i].Value) - } - } -} - -func toString(v any) string { - if y, ok := v.(*yaml.Node); ok { - copy := *y - _ = copy.Encode(©) - return fmt.Sprint(copy) - } - - return fmt.Sprint(v) -} - -// ExtractRawValueSliceChanges will compare two low level interface{} slices for changes. -func ExtractRawValueSliceChanges[T any](lParam, rParam []low.ValueReference[T], - changes *[]*Change, label string, breaking bool, -) { - lKeys := make([]string, len(lParam)) - rKeys := make([]string, len(rParam)) - lValues := make(map[string]low.ValueReference[T]) - rValues := make(map[string]low.ValueReference[T]) - for i := range lParam { - lKeys[i] = strings.ToLower(toString(lParam[i].Value)) - lValues[lKeys[i]] = lParam[i] - } - for i := range rParam { - rKeys[i] = strings.ToLower(toString(rParam[i].Value)) - rValues[rKeys[i]] = rParam[i] - } - for i := range lValues { - if _, ok := rValues[i]; !ok { - CreateChange(changes, PropertyRemoved, label, - lValues[i].ValueNode, - nil, - breaking, - lValues[i].Value, - nil) - } - } - for i := range rValues { - if _, ok := lValues[i]; !ok { - CreateChange(changes, PropertyAdded, label, - nil, - rValues[i].ValueNode, - false, - nil, - rValues[i].Value) - } - } -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/components.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/components.go deleted file mode 100644 index f9403a0c10e..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/components.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "reflect" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - v2 "github.com/pb33f/libopenapi/datamodel/low/v2" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" - "github.com/pb33f/libopenapi/orderedmap" -) - -// ComponentsChanges represents changes made to both OpenAPI and Swagger documents. This model is based on OpenAPI 3 -// components, however it's also used to contain Swagger definitions changes. Swagger for some reason decided to not -// contain definitions inside a single parent like Components, and instead scattered them across the root of the -// Swagger document, giving everything a `Definitions` postfix. This design attempts to unify those models into -// a single entity that contains all changes. -// -// Schemas are treated differently from every other component / definition in this library. Schemas can be highly -// recursive, and are not resolved by the model, every ref is recorded, but it's not looked at essentially. This means -// that when what-changed performs a check, everything that is *not* a schema is checked *inline*, Those references are -// resolved in place and a change is recorded in place. Schemas however are *not* resolved. which means no change -// will be recorded in place for any object referencing it. -// -// That is why there is a separate SchemaChanges object in ComponentsChanges. Schemas are checked at the source, and -// not inline when referenced. A schema change will only be found once, however a change to ANY other definition or -// component, will be found inline (and will duplicate for every use). -// -// The other oddity here is SecuritySchemes. For some reason OpenAPI does not use a $ref for these entities, it -// uses a name lookup, which means there are no direct links between any model and a security scheme reference. -// So like Schemas, SecuritySchemes are treated differently and handled individually. -// -// An important note: Everything EXCEPT Schemas and SecuritySchemes is ONLY checked for additions or removals. -// modifications are not checked, these checks occur in-place by implementing objects as they are autp-resolved -// when the model is built. -type ComponentsChanges struct { - *PropertyChanges - SchemaChanges map[string]*SchemaChanges `json:"schemas,omitempty" yaml:"schemas,omitempty"` - SecuritySchemeChanges map[string]*SecuritySchemeChanges `json:"securitySchemes,omitempty" yaml:"securitySchemes,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// CompareComponents will compare OpenAPI components for any changes. Accepts Swagger Definition objects -// like ParameterDefinitions or Definitions etc. -func CompareComponents(l, r any) *ComponentsChanges { - var changes []*Change - - cc := new(ComponentsChanges) - - // Swagger Parameters - if reflect.TypeOf(&v2.ParameterDefinitions{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v2.ParameterDefinitions{}) == reflect.TypeOf(r) { - lDef := l.(*v2.ParameterDefinitions) - rDef := r.(*v2.ParameterDefinitions) - var a, b *orderedmap.Map[low.KeyReference[string], low.ValueReference[*v2.Parameter]] - if lDef != nil { - a = lDef.Definitions - } - if rDef != nil { - b = rDef.Definitions - } - CheckMapForAdditionRemoval(a, b, &changes, v3.ParametersLabel) - } - - // Swagger Responses - if reflect.TypeOf(&v2.ResponsesDefinitions{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v2.ResponsesDefinitions{}) == reflect.TypeOf(r) { - lDef := l.(*v2.ResponsesDefinitions) - rDef := r.(*v2.ResponsesDefinitions) - var a, b *orderedmap.Map[low.KeyReference[string], low.ValueReference[*v2.Response]] - if lDef != nil { - a = lDef.Definitions - } - if rDef != nil { - b = rDef.Definitions - } - CheckMapForAdditionRemoval(a, b, &changes, v3.ResponsesLabel) - } - - // Swagger Schemas - if reflect.TypeOf(&v2.Definitions{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v2.Definitions{}) == reflect.TypeOf(r) { - lDef := l.(*v2.Definitions) - rDef := r.(*v2.Definitions) - var a, b *orderedmap.Map[low.KeyReference[string], low.ValueReference[*base.SchemaProxy]] - if lDef != nil { - a = lDef.Schemas - } - if rDef != nil { - b = rDef.Schemas - } - cc.SchemaChanges = CheckMapForChanges(a, b, &changes, v2.DefinitionsLabel, CompareSchemas) - } - - // Swagger Security Definitions - if reflect.TypeOf(&v2.SecurityDefinitions{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v2.SecurityDefinitions{}) == reflect.TypeOf(r) { - lDef := l.(*v2.SecurityDefinitions) - rDef := r.(*v2.SecurityDefinitions) - var a, b *orderedmap.Map[low.KeyReference[string], low.ValueReference[*v2.SecurityScheme]] - if lDef != nil { - a = lDef.Definitions - } - if rDef != nil { - b = rDef.Definitions - } - cc.SecuritySchemeChanges = CheckMapForChanges(a, b, &changes, - v3.SecurityDefinitionLabel, CompareSecuritySchemesV2) - } - - // OpenAPI Components - if reflect.TypeOf(&v3.Components{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v3.Components{}) == reflect.TypeOf(r) { - - lComponents := l.(*v3.Components) - rComponents := r.(*v3.Components) - - //if low.AreEqual(lComponents, rComponents) { - // return nil - //} - - doneChan := make(chan componentComparison) - comparisons := 0 - - // run as fast as we can, thread all the things. - if !lComponents.Schemas.IsEmpty() || !rComponents.Schemas.IsEmpty() { - comparisons++ - go runComparison(lComponents.Schemas.Value, rComponents.Schemas.Value, - &changes, v3.SchemasLabel, CompareSchemas, doneChan) - } - - if !lComponents.Responses.IsEmpty() || !rComponents.Responses.IsEmpty() { - comparisons++ - go runComparison(lComponents.Responses.Value, rComponents.Responses.Value, - &changes, v3.ResponsesLabel, CompareResponseV3, doneChan) - } - - if !lComponents.Parameters.IsEmpty() || !rComponents.Parameters.IsEmpty() { - comparisons++ - go runComparison(lComponents.Parameters.Value, rComponents.Parameters.Value, - &changes, v3.ParametersLabel, CompareParametersV3, doneChan) - } - - if !lComponents.Examples.IsEmpty() || !rComponents.Examples.IsEmpty() { - comparisons++ - go runComparison(lComponents.Examples.Value, rComponents.Examples.Value, - &changes, v3.ExamplesLabel, CompareExamples, doneChan) - } - - if !lComponents.RequestBodies.IsEmpty() || !rComponents.RequestBodies.IsEmpty() { - comparisons++ - go runComparison(lComponents.RequestBodies.Value, rComponents.RequestBodies.Value, - &changes, v3.RequestBodiesLabel, CompareRequestBodies, doneChan) - } - - if !lComponents.Headers.IsEmpty() || !rComponents.Headers.IsEmpty() { - comparisons++ - go runComparison(lComponents.Headers.Value, rComponents.Headers.Value, - &changes, v3.HeadersLabel, CompareHeadersV3, doneChan) - } - - if !lComponents.SecuritySchemes.IsEmpty() || !rComponents.SecuritySchemes.IsEmpty() { - comparisons++ - go runComparison(lComponents.SecuritySchemes.Value, rComponents.SecuritySchemes.Value, - &changes, v3.SecuritySchemesLabel, CompareSecuritySchemesV3, doneChan) - } - - if !lComponents.Links.IsEmpty() || !rComponents.Links.IsEmpty() { - comparisons++ - go runComparison(lComponents.Links.Value, rComponents.Links.Value, - &changes, v3.LinksLabel, CompareLinks, doneChan) - } - - if !lComponents.Callbacks.IsEmpty() || !rComponents.Callbacks.IsEmpty() { - comparisons++ - go runComparison(lComponents.Callbacks.Value, rComponents.Callbacks.Value, - &changes, v3.CallbacksLabel, CompareCallback, doneChan) - } - - if !lComponents.MediaTypes.IsEmpty() || !rComponents.MediaTypes.IsEmpty() { - comparisons++ - go runComparison(lComponents.MediaTypes.Value, rComponents.MediaTypes.Value, - &changes, v3.MediaTypesLabel, CompareMediaTypes, doneChan) - } - - cc.ExtensionChanges = CompareExtensions(lComponents.Extensions, rComponents.Extensions) - - completedComponents := 0 - for completedComponents < comparisons { - res := <-doneChan - switch res.prop { - case v3.SchemasLabel: - completedComponents++ - cc.SchemaChanges = res.result.(map[string]*SchemaChanges) - case v3.SecuritySchemesLabel: - completedComponents++ - cc.SecuritySchemeChanges = res.result.(map[string]*SecuritySchemeChanges) - case v3.ResponsesLabel, v3.ParametersLabel, v3.ExamplesLabel, v3.RequestBodiesLabel, v3.HeadersLabel, - v3.LinksLabel, v3.CallbacksLabel, v3.MediaTypesLabel: - completedComponents++ - } - } - } - - cc.PropertyChanges = NewPropertyChanges(changes) - if cc.TotalChanges() <= 0 { - return nil - } - return cc -} - -type componentComparison struct { - prop string - result any -} - -// run a generic comparison in a thread which in turn splits checks into further threads. -func runComparison[T any, R any](l, r *orderedmap.Map[low.KeyReference[string], low.ValueReference[T]], - changes *[]*Change, label string, compareFunc func(l, r T) R, doneChan chan componentComparison, -) { - // for schemas - if label == v3.SchemasLabel || label == v2.DefinitionsLabel || label == v3.SecuritySchemesLabel { - doneChan <- componentComparison{ - prop: label, - result: CheckMapForChanges(l, r, changes, label, compareFunc), - } - return - } else { - doneChan <- componentComparison{ - prop: label, - result: CheckMapForAdditionRemoval(l, r, changes, label), - } - } -} - -// GetAllChanges returns a slice of all changes made between Callback objects -func (c *ComponentsChanges) GetAllChanges() []*Change { - if c == nil { - return nil - } - var changes []*Change - changes = append(changes, c.Changes...) - for k := range c.SchemaChanges { - changes = append(changes, c.SchemaChanges[k].GetAllChanges()...) - } - for k := range c.SecuritySchemeChanges { - changes = append(changes, c.SecuritySchemeChanges[k].GetAllChanges()...) - } - if c.ExtensionChanges != nil { - changes = append(changes, c.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns total changes for all Components and Definitions -func (c *ComponentsChanges) TotalChanges() int { - if c == nil { - return 0 - } - v := c.PropertyChanges.TotalChanges() - for k := range c.SchemaChanges { - v += c.SchemaChanges[k].TotalChanges() - } - for k := range c.SecuritySchemeChanges { - v += c.SecuritySchemeChanges[k].TotalChanges() - } - if c.ExtensionChanges != nil { - v += c.ExtensionChanges.TotalChanges() - } - return v -} - -// TotalBreakingChanges returns all breaking changes found for all Components and Definitions -func (c *ComponentsChanges) TotalBreakingChanges() int { - v := c.PropertyChanges.TotalBreakingChanges() - for k := range c.SchemaChanges { - v += c.SchemaChanges[k].TotalBreakingChanges() - } - for k := range c.SecuritySchemeChanges { - v += c.SecuritySchemeChanges[k].TotalBreakingChanges() - } - return v -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/contact.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/contact.go deleted file mode 100644 index adcde04a1f1..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/contact.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low/base" - "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// ContactChanges Represent changes to a Contact object that is a child of Info, part of an OpenAPI document. -type ContactChanges struct { - *PropertyChanges -} - -// GetAllChanges returns a slice of all changes made between Callback objects -func (c *ContactChanges) GetAllChanges() []*Change { - if c == nil { - return nil - } - return c.Changes -} - -// TotalChanges represents the total number of changes that have occurred to a Contact object -func (c *ContactChanges) TotalChanges() int { - if c == nil { - return 0 - } - return c.PropertyChanges.TotalChanges() -} - -// TotalBreakingChanges returns the total number of breaking changes in Contact objects. -func (c *ContactChanges) TotalBreakingChanges() int { - if c == nil { - return 0 - } - return c.PropertyChanges.TotalBreakingChanges() -} - -// CompareContact will check a left (original) and right (new) Contact object for any changes. If there -// were any, a pointer to a ContactChanges object is returned, otherwise if nothing changed - the function -// returns nil. -func CompareContact(l, r *base.Contact) *ContactChanges { - var changes []*Change - props := make([]*PropertyCheck, 0, 3) - - props = append(props, - NewPropertyCheck(CompContact, PropURL, - l.URL.ValueNode, r.URL.ValueNode, - v3.URLLabel, &changes, l, r), - NewPropertyCheck(CompContact, PropName, - l.Name.ValueNode, r.Name.ValueNode, - v3.NameLabel, &changes, l, r), - NewPropertyCheck(CompContact, PropEmail, - l.Email.ValueNode, r.Email.ValueNode, - v3.EmailLabel, &changes, l, r), - ) - - CheckProperties(props) - - dc := new(ContactChanges) - dc.PropertyChanges = NewPropertyChanges(changes) - if dc.TotalChanges() <= 0 { - return nil - } - return dc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/discriminator.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/discriminator.go deleted file mode 100644 index aaf3fb37f06..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/discriminator.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low/base" -) - -// DiscriminatorChanges represents changes made to a Discriminator OpenAPI object -type DiscriminatorChanges struct { - *PropertyChanges - MappingChanges []*Change `json:"mappings,omitempty" yaml:"mappings,omitempty"` -} - -// TotalChanges returns a count of everything changed within the Discriminator object -func (d *DiscriminatorChanges) TotalChanges() int { - if d == nil { - return 0 - } - l := 0 - if k := d.PropertyChanges.TotalChanges(); k > 0 { - l += k - } - if k := len(d.MappingChanges); k > 0 { - l += k - } - return l -} - -// GetAllChanges returns a slice of all changes made between Callback objects -func (c *DiscriminatorChanges) GetAllChanges() []*Change { - if c == nil { - return nil - } - var changes []*Change - changes = append(changes, c.Changes...) - if c.MappingChanges != nil { - changes = append(changes, c.MappingChanges...) - } - return changes -} - -// TotalBreakingChanges returns the number of breaking changes made by the Discriminator -func (d *DiscriminatorChanges) TotalBreakingChanges() int { - return d.PropertyChanges.TotalBreakingChanges() + CountBreakingChanges(d.MappingChanges) -} - -// CompareDiscriminator will check a left (original) and right (new) Discriminator object for changes -// and will return a pointer to DiscriminatorChanges -func CompareDiscriminator(l, r *base.Discriminator) *DiscriminatorChanges { - dc := new(DiscriminatorChanges) - var changes []*Change - props := make([]*PropertyCheck, 0, 2) - var mappingChanges []*Change - - props = append(props, - NewPropertyCheck(CompDiscriminator, PropPropertyName, - l.PropertyName.ValueNode, r.PropertyName.ValueNode, - base.PropertyNameLabel, &changes, l, r), - NewPropertyCheck(CompDiscriminator, PropDefaultMapping, - l.DefaultMapping.ValueNode, r.DefaultMapping.ValueNode, - base.DefaultMappingLabel, &changes, l, r), - ) - - CheckProperties(props) - - // flatten maps - lMap := FlattenLowLevelOrderedMap[string](l.Mapping.Value) - rMap := FlattenLowLevelOrderedMap[string](r.Mapping.Value) - - // check for removals, modifications and moves - for i := range lMap { - CheckForObjectAdditionOrRemoval[string](lMap, rMap, i, &mappingChanges, BreakingAdded(CompDiscriminator, PropMapping), BreakingRemoved(CompDiscriminator, PropMapping)) - // if the existing tag exists, let's check it. - if rMap[i] != nil { - if lMap[i].Value != rMap[i].Value { - CreateChange(&mappingChanges, Modified, i, lMap[i].GetValueNode(), - rMap[i].GetValueNode(), BreakingModified(CompDiscriminator, PropMapping), lMap[i].GetValue(), rMap[i].GetValue()) - } - } - } - - for i := range rMap { - if lMap[i] == nil { - CreateChange(&mappingChanges, ObjectAdded, i, nil, - rMap[i].GetValueNode(), BreakingAdded(CompDiscriminator, PropMapping), nil, rMap[i].GetValue()) - } - } - - dc.PropertyChanges = NewPropertyChanges(changes) - dc.MappingChanges = mappingChanges - if dc.TotalChanges() <= 0 { - return nil - } - return dc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/document.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/document.go deleted file mode 100644 index 5c9263c10d1..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/document.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package model -// -// What-changed models are unified across OpenAPI and Swagger. Everything is kept flat for simplicity, so please -// excuse the size of the package. There is a lot of data to crunch! -// -// Every model in here is either universal (works across both versions of OpenAPI) or is bound to a specific version -// of OpenAPI. There is only a single model however - so version specific objects are marked accordingly. -package model - -import ( - "reflect" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - v2 "github.com/pb33f/libopenapi/datamodel/low/v2" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// DocumentChanges represents all the changes made to an OpenAPI document. -type DocumentChanges struct { - *PropertyChanges - InfoChanges *InfoChanges `json:"info,omitempty" yaml:"info,omitempty"` - PathsChanges *PathsChanges `json:"paths,omitempty" yaml:"paths,omitempty"` - TagChanges []*TagChanges `json:"tags,omitempty" yaml:"tags,omitempty"` - ExternalDocChanges *ExternalDocChanges `json:"externalDoc,omitempty" yaml:"externalDoc,omitempty"` - WebhookChanges map[string]*PathItemChanges `json:"webhooks,omitempty" yaml:"webhooks,omitempty"` - ServerChanges []*ServerChanges `json:"servers,omitempty" yaml:"servers,omitempty"` - SecurityRequirementChanges []*SecurityRequirementChanges `json:"securityRequirements,omitempty" yaml:"securityRequirements,omitempty"` - ComponentsChanges *ComponentsChanges `json:"components,omitempty" yaml:"components,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// TotalChanges returns a total count of all changes made in the Document -func (d *DocumentChanges) TotalChanges() int { - if d == nil { - return 0 - } - - c := d.PropertyChanges.TotalChanges() - if d.InfoChanges != nil { - c += d.InfoChanges.TotalChanges() - } - if d.PathsChanges != nil { - c += d.PathsChanges.TotalChanges() - } - for k := range d.TagChanges { - c += d.TagChanges[k].TotalChanges() - } - if d.ExternalDocChanges != nil { - c += d.ExternalDocChanges.TotalChanges() - } - for k := range d.WebhookChanges { - c += d.WebhookChanges[k].TotalChanges() - } - for k := range d.ServerChanges { - c += d.ServerChanges[k].TotalChanges() - } - for k := range d.SecurityRequirementChanges { - c += d.SecurityRequirementChanges[k].TotalChanges() - } - if d.ComponentsChanges != nil { - c += d.ComponentsChanges.TotalChanges() - } - if d.ExtensionChanges != nil { - c += d.ExtensionChanges.TotalChanges() - } - return c -} - -// GetAllChanges returns a slice of all changes made between Document objects -func (d *DocumentChanges) GetAllChanges() []*Change { - if d == nil { - return nil - } - - var changes []*Change - changes = append(changes, d.Changes...) - if d.InfoChanges != nil { - changes = append(changes, d.InfoChanges.GetAllChanges()...) - } - if d.PathsChanges != nil { - changes = append(changes, d.PathsChanges.GetAllChanges()...) - } - for k := range d.TagChanges { - changes = append(changes, d.TagChanges[k].GetAllChanges()...) - } - if d.ExternalDocChanges != nil { - changes = append(changes, d.ExternalDocChanges.GetAllChanges()...) - } - for k := range d.WebhookChanges { - changes = append(changes, d.WebhookChanges[k].GetAllChanges()...) - } - for k := range d.ServerChanges { - changes = append(changes, d.ServerChanges[k].GetAllChanges()...) - } - for k := range d.SecurityRequirementChanges { - changes = append(changes, d.SecurityRequirementChanges[k].GetAllChanges()...) - } - if d.ComponentsChanges != nil { - changes = append(changes, d.ComponentsChanges.GetAllChanges()...) - } - if d.ExtensionChanges != nil { - changes = append(changes, d.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalBreakingChanges returns a total count of all breaking changes made in the Document -func (d *DocumentChanges) TotalBreakingChanges() int { - if d == nil { - return 0 - } - - c := d.PropertyChanges.TotalBreakingChanges() - if d.InfoChanges != nil { - c += d.InfoChanges.TotalBreakingChanges() - } - if d.PathsChanges != nil { - c += d.PathsChanges.TotalBreakingChanges() - } - for k := range d.TagChanges { - c += d.TagChanges[k].TotalBreakingChanges() - } - if d.ExternalDocChanges != nil { - c += d.ExternalDocChanges.TotalBreakingChanges() - } - for k := range d.WebhookChanges { - c += d.WebhookChanges[k].TotalBreakingChanges() - } - for k := range d.ServerChanges { - c += d.ServerChanges[k].TotalBreakingChanges() - } - for k := range d.SecurityRequirementChanges { - c += d.SecurityRequirementChanges[k].TotalBreakingChanges() - } - if d.ComponentsChanges != nil { - c += d.ComponentsChanges.TotalBreakingChanges() - } - return c -} - -// CompareDocuments will compare any two OpenAPI documents (either Swagger or OpenAPI) and return a pointer to -// DocumentChanges that outlines everything that was found to have changed. -func CompareDocuments(l, r any) *DocumentChanges { - var changes []*Change - var props []*PropertyCheck - - dc := new(DocumentChanges) - - // reset schema hashmap - base.SchemaQuickHashMap.Clear() - - // clear hash cache to ensure clean state for comparison - low.ClearHashCache() - - if reflect.TypeOf(&v2.Swagger{}) == reflect.TypeOf(l) && reflect.TypeOf(&v2.Swagger{}) == reflect.TypeOf(r) { - lDoc := l.(*v2.Swagger) - rDoc := r.(*v2.Swagger) - - // version - addPropertyCheck(&props, lDoc.Swagger.ValueNode, rDoc.Swagger.ValueNode, - lDoc.Swagger.Value, rDoc.Swagger.Value, &changes, v3.SwaggerLabel, true, CompOpenAPI, "") - - // host - addPropertyCheck(&props, lDoc.Host.ValueNode, rDoc.Host.ValueNode, - lDoc.Host.Value, rDoc.Host.Value, &changes, v3.HostLabel, true, "", "") - - // base path - addPropertyCheck(&props, lDoc.BasePath.ValueNode, rDoc.BasePath.ValueNode, - lDoc.BasePath.Value, rDoc.BasePath.Value, &changes, v3.BasePathLabel, true, "", "") - - // schemes - if len(lDoc.Schemes.Value) > 0 || len(rDoc.Schemes.Value) > 0 { - ExtractStringValueSliceChanges(lDoc.Schemes.Value, rDoc.Schemes.Value, - &changes, v3.SchemesLabel, true) - } - // consumes - if len(lDoc.Consumes.Value) > 0 || len(rDoc.Consumes.Value) > 0 { - ExtractStringValueSliceChanges(lDoc.Consumes.Value, rDoc.Consumes.Value, - &changes, v3.ConsumesLabel, true) - } - // produces - if len(lDoc.Produces.Value) > 0 || len(rDoc.Produces.Value) > 0 { - ExtractStringValueSliceChanges(lDoc.Produces.Value, rDoc.Produces.Value, - &changes, v3.ProducesLabel, true) - } - - // tags - dc.TagChanges = CompareTags(lDoc.Tags.Value, rDoc.Tags.Value) - - // paths - if !lDoc.Paths.IsEmpty() || !rDoc.Paths.IsEmpty() { - dc.PathsChanges = ComparePaths(lDoc.Paths.Value, rDoc.Paths.Value) - } - - // external docs - compareDocumentExternalDocs(lDoc, rDoc, dc, &changes) - - // info - compareDocumentInfo(&lDoc.Info, &rDoc.Info, dc, &changes) - - // security - if !lDoc.Security.IsEmpty() || !rDoc.Security.IsEmpty() { - checkSecurity(lDoc.Security, rDoc.Security, &changes, dc) - } - - // components / definitions - // swagger (damn you) decided to put all this stuff at the document root, rather than cleanly - // placing it under a parent, like they did with OpenAPI. This means picking through each definition - // creating a new set of changes and then morphing them into a single changes object. - cc := new(ComponentsChanges) - cc.PropertyChanges = new(PropertyChanges) - if n := CompareComponents(lDoc.Definitions.Value, rDoc.Definitions.Value); n != nil { - cc.SchemaChanges = n.SchemaChanges - } - if n := CompareComponents(lDoc.SecurityDefinitions.Value, rDoc.SecurityDefinitions.Value); n != nil { - cc.SecuritySchemeChanges = n.SecuritySchemeChanges - } - if n := CompareComponents(lDoc.Parameters.Value, rDoc.Parameters.Value); n != nil { - cc.PropertyChanges.Changes = append(cc.PropertyChanges.Changes, n.Changes...) - } - if n := CompareComponents(lDoc.Responses.Value, rDoc.Responses.Value); n != nil { - cc.Changes = append(cc.Changes, n.Changes...) - } - dc.ExtensionChanges = CompareExtensions(lDoc.Extensions, rDoc.Extensions) - if cc.TotalChanges() > 0 { - dc.ComponentsChanges = cc - } - } - - if reflect.TypeOf(&v3.Document{}) == reflect.TypeOf(l) && reflect.TypeOf(&v3.Document{}) == reflect.TypeOf(r) { - lDoc := l.(*v3.Document) - rDoc := r.(*v3.Document) - - // version - addPropertyCheck(&props, lDoc.Version.ValueNode, rDoc.Version.ValueNode, - lDoc.Version.Value, rDoc.Version.Value, &changes, v3.OpenAPILabel, - BreakingModified(CompOpenAPI, ""), CompOpenAPI, "") - - // schema dialect - addPropertyCheck(&props, lDoc.JsonSchemaDialect.ValueNode, rDoc.JsonSchemaDialect.ValueNode, - lDoc.JsonSchemaDialect.Value, rDoc.JsonSchemaDialect.Value, &changes, v3.JSONSchemaDialectLabel, - BreakingModified(CompJSONSchemaDialect, ""), CompJSONSchemaDialect, "") - - // $self field (3.2+) - addPropertyCheck(&props, lDoc.Self.ValueNode, rDoc.Self.ValueNode, - lDoc.Self.Value, rDoc.Self.Value, &changes, v3.SelfLabel, - BreakingModified(CompSelf, ""), CompSelf, "") - - // tags - dc.TagChanges = CompareTags(lDoc.Tags.Value, rDoc.Tags.Value) - - // paths - if !lDoc.Paths.IsEmpty() || !rDoc.Paths.IsEmpty() { - dc.PathsChanges = ComparePaths(lDoc.Paths.Value, rDoc.Paths.Value) - } - - // external docs - compareDocumentExternalDocs(lDoc, rDoc, dc, &changes) - - // info - compareDocumentInfo(&lDoc.Info, &rDoc.Info, dc, &changes) - - // security - if !lDoc.Security.IsEmpty() || !rDoc.Security.IsEmpty() { - checkSecurity(lDoc.Security, rDoc.Security, &changes, dc) - } - - // compare components. - if !lDoc.Components.IsEmpty() && !rDoc.Components.IsEmpty() { - if n := CompareComponents(lDoc.Components.Value, rDoc.Components.Value); n != nil { - dc.ComponentsChanges = n - } - } - if !lDoc.Components.IsEmpty() && rDoc.Components.IsEmpty() { - CreateChange(&changes, PropertyRemoved, v3.ComponentsLabel, - lDoc.Components.ValueNode, nil, BreakingRemoved(CompComponents, ""), lDoc.Components.Value, nil) - } - if lDoc.Components.IsEmpty() && !rDoc.Components.IsEmpty() { - CreateChange(&changes, PropertyAdded, v3.ComponentsLabel, - nil, rDoc.Components.ValueNode, BreakingAdded(CompComponents, ""), nil, lDoc.Components.Value) - } - - // compare servers - if n := checkServers(lDoc.Servers, rDoc.Servers, CompServers, ""); n != nil { - dc.ServerChanges = n - } - - // compare webhooks - dc.WebhookChanges = CheckMapForChanges(lDoc.Webhooks.Value, rDoc.Webhooks.Value, &changes, - v3.WebhooksLabel, ComparePathItemsV3) - - // extensions - dc.ExtensionChanges = CompareExtensions(lDoc.Extensions, rDoc.Extensions) - } - - CheckProperties(props) - dc.PropertyChanges = NewPropertyChanges(changes) - if dc.TotalChanges() <= 0 { - return nil - } - base.SchemaQuickHashMap.Clear() - return dc -} - -func compareDocumentExternalDocs(l, r low.HasExternalDocs, dc *DocumentChanges, changes *[]*Change) { - // external docs - if !l.GetExternalDocs().IsEmpty() && !r.GetExternalDocs().IsEmpty() { - lExtDoc := l.GetExternalDocs().Value.(*base.ExternalDoc) - rExtDoc := r.GetExternalDocs().Value.(*base.ExternalDoc) - if !low.AreEqual(lExtDoc, rExtDoc) { - dc.ExternalDocChanges = CompareExternalDocs(lExtDoc, rExtDoc) - } - } - if l.GetExternalDocs().IsEmpty() && !r.GetExternalDocs().IsEmpty() { - CreateChange(changes, PropertyAdded, v3.ExternalDocsLabel, - nil, r.GetExternalDocs().ValueNode, false, nil, - r.GetExternalDocs().Value) - } - if !l.GetExternalDocs().IsEmpty() && r.GetExternalDocs().IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.ExternalDocsLabel, - l.GetExternalDocs().ValueNode, nil, false, l.GetExternalDocs().Value, - nil) - } -} - -func compareDocumentInfo(l, r *low.NodeReference[*base.Info], dc *DocumentChanges, changes *[]*Change) { - // info - if !l.IsEmpty() && !r.IsEmpty() { - lInfo := l.Value - rInfo := r.Value - if !low.AreEqual(lInfo, rInfo) { - dc.InfoChanges = CompareInfo(lInfo, rInfo) - } - } - if l.IsEmpty() && !r.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.InfoLabel, - nil, r.ValueNode, false, nil, - r.Value) - } - if !l.IsEmpty() && r.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.InfoLabel, - l.ValueNode, nil, false, l.Value, - nil) - } -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/document_flat.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/document_flat.go deleted file mode 100644 index 21f7771a303..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/document_flat.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2023-2024 Princess Beef Heavy Industries, LLC / Dave Shanley -// https://pb33f.io - -package model - -type DocumentChangesFlat struct { - *PropertyChanges - InfoChanges []*Change `json:"info,omitempty" yaml:"info,omitempty"` - PathsChanges []*Change `json:"paths,omitempty" yaml:"paths,omitempty"` - TagChanges []*Change `json:"tags,omitempty" yaml:"tags,omitempty"` - ExternalDocChanges []*Change `json:"externalDoc,omitempty" yaml:"externalDoc,omitempty"` - WebhookChanges []*Change `json:"webhooks,omitempty" yaml:"webhooks,omitempty"` - ServerChanges []*Change `json:"servers,omitempty" yaml:"servers,omitempty"` - SecurityRequirementChanges []*Change `json:"securityRequirements,omitempty" yaml:"securityRequirements,omitempty"` - ComponentsChanges []*Change `json:"components,omitempty" yaml:"components,omitempty"` - ExtensionChanges []*Change `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/encoding.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/encoding.go deleted file mode 100644 index f4252016764..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/encoding.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// EncodingChanges represent all the changes made to an Encoding object -type EncodingChanges struct { - *PropertyChanges - HeaderChanges map[string]*HeaderChanges `json:"headers,omitempty" yaml:"headers,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Encoding objects -func (e *EncodingChanges) GetAllChanges() []*Change { - if e == nil { - return nil - } - var changes []*Change - changes = append(changes, e.Changes...) - for k := range e.HeaderChanges { - changes = append(changes, e.HeaderChanges[k].GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total number of changes made between two Encoding objects -func (e *EncodingChanges) TotalChanges() int { - if e == nil { - return 0 - } - c := e.PropertyChanges.TotalChanges() - if e.HeaderChanges != nil { - for i := range e.HeaderChanges { - c += e.HeaderChanges[i].TotalChanges() - } - } - return c -} - -// TotalBreakingChanges returns the number of changes made between two Encoding objects that were breaking. -func (e *EncodingChanges) TotalBreakingChanges() int { - c := e.PropertyChanges.TotalBreakingChanges() - if e.HeaderChanges != nil { - for i := range e.HeaderChanges { - c += e.HeaderChanges[i].TotalBreakingChanges() - } - } - return c -} - -// CompareEncoding returns a pointer to *EncodingChanges that contain all changes made between a left and right -// set of Encoding objects. -func CompareEncoding(l, r *v3.Encoding) *EncodingChanges { - var changes []*Change - props := make([]*PropertyCheck, 0, 4) - - props = append(props, - NewPropertyCheck(CompEncoding, PropContentType, - l.ContentType.ValueNode, r.ContentType.ValueNode, - v3.ContentTypeLabel, &changes, l, r), - NewPropertyCheck(CompEncoding, PropStyle, - l.Style.ValueNode, r.Style.ValueNode, - v3.StyleLabel, &changes, l, r), - NewPropertyCheck(CompEncoding, PropExplode, - l.Explode.ValueNode, r.Explode.ValueNode, - v3.ExplodeLabel, &changes, l, r), - NewPropertyCheck(CompEncoding, PropAllowReserved, - l.AllowReserved.ValueNode, r.AllowReserved.ValueNode, - v3.AllowReservedLabel, &changes, l, r), - ) - - // check everything. - CheckProperties(props) - ec := new(EncodingChanges) - - // headers - ec.HeaderChanges = CheckMapForChanges(l.Headers.Value, r.Headers.Value, &changes, v3.HeadersLabel, CompareHeadersV3) - ec.PropertyChanges = NewPropertyChanges(changes) - if ec.TotalChanges() <= 0 { - return nil - } - return ec -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/example.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/example.go deleted file mode 100644 index 8e6ea730647..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/example.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "fmt" - "sort" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" - "github.com/pb33f/libopenapi/utils" -) - -// ExampleChanges represent changes to an Example object, part of an OpenAPI specification. -type ExampleChanges struct { - *PropertyChanges - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Example objects -func (e *ExampleChanges) GetAllChanges() []*Change { - if e == nil { - return nil - } - var changes []*Change - changes = append(changes, e.Changes...) - if e.ExtensionChanges != nil { - changes = append(changes, e.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total number of changes made to Example -func (e *ExampleChanges) TotalChanges() int { - if e == nil { - return 0 - } - l := e.PropertyChanges.TotalChanges() - if e.ExtensionChanges != nil { - l += e.ExtensionChanges.PropertyChanges.TotalChanges() - } - return l -} - -// TotalBreakingChanges returns the total number of breaking changes made to Example -func (e *ExampleChanges) TotalBreakingChanges() int { - l := e.PropertyChanges.TotalBreakingChanges() - return l -} - -// CompareExamples returns a pointer to ExampleChanges that contains all changes made between -// left and right Example instances. If l is nil, the example was added. If r is nil, it was removed. -func CompareExamples(l, r *base.Example) *ExampleChanges { - ec := new(ExampleChanges) - var changes []*Change - - if l == nil { - // Example was added - use RootNode for proper line/column location - CreateChange(&changes, ObjectAdded, v3.ExampleLabel, - nil, r.RootNode, BreakingAdded(CompExample, PropValue), nil, r) - ec.PropertyChanges = NewPropertyChanges(changes) - return ec - } - if r == nil { - // Example was removed - use RootNode for proper line/column location - CreateChange(&changes, ObjectRemoved, v3.ExampleLabel, - l.RootNode, nil, BreakingRemoved(CompExample, PropValue), l, nil) - ec.PropertyChanges = NewPropertyChanges(changes) - return ec - } - - props := make([]*PropertyCheck, 0, 2) - - props = append(props, - NewPropertyCheck(CompExample, PropSummary, - l.Summary.ValueNode, r.Summary.ValueNode, - v3.SummaryLabel, &changes, l, r), - NewPropertyCheck(CompExample, PropDescription, - l.Description.ValueNode, r.Description.ValueNode, - v3.DescriptionLabel, &changes, l, r), - ) - - // Value - if utils.IsNodeMap(l.Value.ValueNode) && utils.IsNodeMap(r.Value.ValueNode) { - lKeys := make([]string, len(l.Value.ValueNode.Content)/2) - rKeys := make([]string, len(r.Value.ValueNode.Content)/2) - z := 0 - for k := range l.Value.ValueNode.Content { - if k%2 == 0 { - // if there is no value (value is another map or something else), render the node into yaml and hash it. - // https://github.com/pb33f/libopenapi/issues/61 - val := l.Value.ValueNode.Content[k+1].Value - if val == "" { - val = low.HashYAMLNodeSlice(l.Value.ValueNode.Content[k+1].Content) - } - lKeys[z] = fmt.Sprintf("%v-%v-%v", - l.Value.ValueNode.Content[k].Value, - l.Value.ValueNode.Content[k+1].Tag, - fmt.Sprintf("%x", val)) - z++ - } else { - continue - } - } - z = 0 - for k := range r.Value.ValueNode.Content { - if k%2 == 0 { - // if there is no value (value is another map or something else), render the node into yaml and hash it. - // https://github.com/pb33f/libopenapi/issues/61 - val := r.Value.ValueNode.Content[k+1].Value - if val == "" { - val = low.HashYAMLNodeSlice(r.Value.ValueNode.Content[k+1].Content) - } - rKeys[z] = fmt.Sprintf("%v-%v-%v", - r.Value.ValueNode.Content[k].Value, - r.Value.ValueNode.Content[k+1].Tag, - fmt.Sprintf("%x", val)) - z++ - } else { - continue - } - } - sort.Strings(lKeys) - sort.Strings(rKeys) - for k := range lKeys { - if k < len(rKeys) && lKeys[k] != rKeys[k] { - CreateChangeWithEncoding(&changes, Modified, v3.ValueLabel, - l.Value.GetValueNode(), r.Value.GetValueNode(), BreakingModified(CompExample, PropValue), l.Value.GetValue(), r.Value.GetValue()) - continue - } - if k >= len(rKeys) { - CreateChangeWithEncoding(&changes, PropertyRemoved, v3.ValueLabel, - l.Value.ValueNode, r.Value.ValueNode, BreakingRemoved(CompExample, PropValue), l.Value.Value, r.Value.Value) - } - } - for k := range rKeys { - if k >= len(lKeys) { - CreateChangeWithEncoding(&changes, PropertyAdded, v3.ValueLabel, - l.Value.ValueNode, r.Value.ValueNode, BreakingAdded(CompExample, PropValue), l.Value.Value, r.Value.Value) - } - } - } else { - props = append(props, NewPropertyCheck(CompExample, PropValue, - l.Value.ValueNode, r.Value.ValueNode, - v3.ValueLabel, &changes, l, r)) - } - // ExternalValue - props = append(props, NewPropertyCheck(CompExample, PropExternalValue, - l.ExternalValue.ValueNode, r.ExternalValue.ValueNode, - v3.ExternalValue, &changes, l, r)) - - // DataValue (OpenAPI 3.2+) - props = append(props, NewPropertyCheck(CompExample, PropDataValue, - l.DataValue.ValueNode, r.DataValue.ValueNode, - base.DataValueLabel, &changes, l, r)) - - // SerializedValue (OpenAPI 3.2+) - props = append(props, NewPropertyCheck(CompExample, PropSerializedValue, - l.SerializedValue.ValueNode, r.SerializedValue.ValueNode, - base.SerializedValueLabel, &changes, l, r)) - - // check properties - CheckProperties(props) - - // check extensions - ec.ExtensionChanges = CheckExtensions(l, r) - ec.PropertyChanges = NewPropertyChanges(changes) - if ec.TotalChanges() <= 0 { - return nil - } - return ec -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/examples.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/examples.go deleted file mode 100644 index c271c2700bd..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/examples.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - v2 "github.com/pb33f/libopenapi/datamodel/low/v2" - "go.yaml.in/yaml/v4" -) - -// ExamplesChanges represents changes made between Swagger Examples objects (Not OpenAPI 3). -type ExamplesChanges struct { - *PropertyChanges -} - -// GetAllChanges returns a slice of all changes made between Examples objects -func (a *ExamplesChanges) GetAllChanges() []*Change { - if a == nil { - return nil - } - return a.Changes -} - -// TotalChanges represents the total number of changes made between Example instances. -func (a *ExamplesChanges) TotalChanges() int { - if a == nil { - return 0 - } - return a.PropertyChanges.TotalChanges() -} - -// TotalBreakingChanges will always return 0. Examples cannot break a contract. -func (a *ExamplesChanges) TotalBreakingChanges() int { - return 0 // not supported. -} - -// CompareExamplesV2 compares two Swagger Examples objects, returning a pointer to -// ExamplesChanges if anything was found. -func CompareExamplesV2(l, r *v2.Examples) *ExamplesChanges { - lHashes := make(map[string]string) - rHashes := make(map[string]string) - lValues := make(map[string]low.ValueReference[*yaml.Node]) - rValues := make(map[string]low.ValueReference[*yaml.Node]) - - for k, v := range l.Values.FromOldest() { - lHashes[k.Value] = low.GenerateHashString(v.Value) - lValues[k.Value] = v - } - - for k, v := range r.Values.FromOldest() { - rHashes[k.Value] = low.GenerateHashString(v.Value) - rValues[k.Value] = v - } - var changes []*Change - - // check left example hashes - for k := range lHashes { - rhash := rHashes[k] - if rhash == "" { - CreateChange(&changes, ObjectRemoved, k, - lValues[k].GetValueNode(), nil, false, - lValues[k].GetValue(), nil) - continue - } - if lHashes[k] == rHashes[k] { - continue - } - CreateChange(&changes, Modified, k, - lValues[k].GetValueNode(), rValues[k].GetValueNode(), false, - lValues[k].GetValue(), lValues[k].GetValue()) - - } - - // check right example hashes - for k := range rHashes { - lhash := lHashes[k] - if lhash == "" { - CreateChange(&changes, ObjectAdded, k, - nil, lValues[k].GetValueNode(), false, - nil, lValues[k].GetValue()) - continue - } - } - - ex := new(ExamplesChanges) - ex.PropertyChanges = NewPropertyChanges(changes) - if ex.TotalChanges() <= 0 { - return nil - } - return ex -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/extensions.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/extensions.go deleted file mode 100644 index 6aa571b84e3..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/extensions.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "strings" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// ExtensionChanges represents any changes to custom extensions defined for an OpenAPI object. -type ExtensionChanges struct { - *PropertyChanges -} - -// GetAllChanges returns a slice of all changes made between Extension objects -func (e *ExtensionChanges) GetAllChanges() []*Change { - if e == nil { - return nil - } - return e.Changes -} - -// TotalChanges returns the total number of object extensions that were made. -func (e *ExtensionChanges) TotalChanges() int { - if e == nil { - return 0 - } - return e.PropertyChanges.TotalChanges() -} - -// TotalBreakingChanges returns the total number of breaking changes in Extension objects. -func (e *ExtensionChanges) TotalBreakingChanges() int { - if e == nil { - return 0 - } - return e.PropertyChanges.TotalBreakingChanges() -} - -// CompareExtensions will compare a left and right map of Tag/ValueReference models for any changes to -// anything. This function does not try and cast the value of an extension to perform checks, it -// will perform a basic value check. -// -// A current limitation relates to extensions being objects and a property of the object changes, -// there is currently no support for knowing anything changed - so it is ignored. -func CompareExtensions(l, r *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]]) *ExtensionChanges { - // look at the original and then look through the new. - seenLeft := make(map[string]*low.ValueReference[*yaml.Node]) - seenRight := make(map[string]*low.ValueReference[*yaml.Node]) - - for k, h := range l.FromOldest() { - seenLeft[strings.ToLower(k.Value)] = &h - } - for k, h := range r.FromOldest() { - seenRight[strings.ToLower(k.Value)] = &h - } - - var changes []*Change - for i := range seenLeft { - - CheckForObjectAdditionOrRemovalWithEncoding[*yaml.Node](seenLeft, seenRight, i, &changes, false, true) - - if seenRight[i] != nil { - var props []*PropertyCheck - - props = append(props, &PropertyCheck{ - LeftNode: seenLeft[i].ValueNode, - RightNode: seenRight[i].ValueNode, - Label: i, - Changes: &changes, - Breaking: false, - Original: seenLeft[i].Value, - New: seenRight[i].Value, - }) - - // check properties with encoding for extensions - CheckPropertiesWithEncoding(props) - } - } - for i := range seenRight { - if seenLeft[i] == nil { - CheckForObjectAdditionOrRemovalWithEncoding[*yaml.Node](seenLeft, seenRight, i, &changes, false, true) - } - } - ex := new(ExtensionChanges) - ex.PropertyChanges = NewPropertyChanges(changes) - if ex.TotalChanges() <= 0 { - return nil - } - return ex -} - -// CheckExtensions is a helper method to un-pack a left and right model that contains extensions. Once unpacked -// the extensions are compared and returns a pointer to ExtensionChanges. If nothing changed, nil is returned. -func CheckExtensions[T low.HasExtensions[T]](l, r T) *ExtensionChanges { - var lExt, rExt *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - if orderedmap.Len(l.GetExtensions()) > 0 { - lExt = l.GetExtensions() - } - if orderedmap.Len(r.GetExtensions()) > 0 { - rExt = r.GetExtensions() - } - return CompareExtensions(lExt, rExt) -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/external_docs.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/external_docs.go deleted file mode 100644 index 532b902ef14..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/external_docs.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low/base" - "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// ExternalDocChanges represents changes made to any ExternalDoc object from an OpenAPI document. -type ExternalDocChanges struct { - *PropertyChanges - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Example objects -func (e *ExternalDocChanges) GetAllChanges() []*Change { - if e == nil { - return nil - } - var changes []*Change - changes = append(changes, e.Changes...) - if e.ExtensionChanges != nil { - changes = append(changes, e.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns a count of everything that changed -func (e *ExternalDocChanges) TotalChanges() int { - if e == nil { - return 0 - } - c := e.PropertyChanges.TotalChanges() - if e.ExtensionChanges != nil { - c += e.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of breaking changes in ExternalDoc objects. -func (e *ExternalDocChanges) TotalBreakingChanges() int { - if e == nil { - return 0 - } - c := e.PropertyChanges.TotalBreakingChanges() - if e.ExtensionChanges != nil { - c += e.ExtensionChanges.TotalBreakingChanges() - } - return c -} - -// CompareExternalDocs will compare a left (original) and a right (new) slice of ValueReference -// nodes for any changes between them. If there are changes, then a pointer to ExternalDocChanges -// is returned, otherwise if nothing changed - then nil is returned. -func CompareExternalDocs(l, r *base.ExternalDoc) *ExternalDocChanges { - var changes []*Change - props := make([]*PropertyCheck, 0, 2) - - props = append(props, - NewPropertyCheck(CompExternalDocs, PropURL, - l.URL.ValueNode, r.URL.ValueNode, - v3.URLLabel, &changes, l, r), - NewPropertyCheck(CompExternalDocs, PropDescription, - l.Description.ValueNode, r.Description.ValueNode, - v3.DescriptionLabel, &changes, l, r), - ) - - CheckProperties(props) - - dc := new(ExternalDocChanges) - dc.PropertyChanges = NewPropertyChanges(changes) - - // check extensions - dc.ExtensionChanges = CheckExtensions(l, r) - if dc.TotalChanges() <= 0 { - return nil - } - return dc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/header.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/header.go deleted file mode 100644 index c3d5f8cc7cb..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/header.go +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "reflect" - - "github.com/pb33f/libopenapi/datamodel/low" - v2 "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// HeaderChanges represents changes made between two Header objects. Supports both Swagger and OpenAPI header -// objects, V2 only property Items is broken out into its own. -type HeaderChanges struct { - *PropertyChanges - SchemaChanges *SchemaChanges `json:"schemas,omitempty" yaml:"schemas,omitempty"` - ExamplesChanges map[string]*ExampleChanges `json:"examples,omitempty" yaml:"examples,omitempty"` - ContentChanges map[string]*MediaTypeChanges `json:"content,omitempty" yaml:"content,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` - - // Items only supported by Swagger (V2) - ItemsChanges *ItemsChanges `json:"items,omitempty" yaml:"items,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Header objects -func (h *HeaderChanges) GetAllChanges() []*Change { - if h == nil { - return nil - } - var changes []*Change - changes = append(changes, h.Changes...) - for k := range h.ExamplesChanges { - changes = append(changes, h.ExamplesChanges[k].GetAllChanges()...) - } - for k := range h.ContentChanges { - changes = append(changes, h.ContentChanges[k].GetAllChanges()...) - } - if h.ExtensionChanges != nil { - changes = append(changes, h.ExtensionChanges.GetAllChanges()...) - } - if h.SchemaChanges != nil { - changes = append(changes, h.SchemaChanges.GetAllChanges()...) - } - if h.ItemsChanges != nil { - changes = append(changes, h.ItemsChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total number of changes made between two Header objects. -func (h *HeaderChanges) TotalChanges() int { - if h == nil { - return 0 - } - c := h.PropertyChanges.TotalChanges() - for k := range h.ExamplesChanges { - c += h.ExamplesChanges[k].TotalChanges() - } - for k := range h.ContentChanges { - c += h.ContentChanges[k].TotalChanges() - } - if h.ExtensionChanges != nil { - c += h.ExtensionChanges.TotalChanges() - } - if h.SchemaChanges != nil { - c += h.SchemaChanges.TotalChanges() - } - if h.ItemsChanges != nil { - c += h.ItemsChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of breaking changes made between two Header instances. -func (h *HeaderChanges) TotalBreakingChanges() int { - c := h.PropertyChanges.TotalBreakingChanges() - for k := range h.ContentChanges { - c += h.ContentChanges[k].TotalBreakingChanges() - } - if h.ItemsChanges != nil { - c += h.ItemsChanges.TotalBreakingChanges() - } - if h.SchemaChanges != nil { - c += h.SchemaChanges.TotalBreakingChanges() - } - return c -} - -// shared header properties -func addOpenAPIHeaderProperties(left, right low.OpenAPIHeader, changes *[]*Change) []*PropertyCheck { - var props []*PropertyCheck - - // style - addPropertyCheck(&props, left.GetStyle().ValueNode, right.GetStyle().ValueNode, - left.GetStyle(), right.GetStyle(), changes, v3.StyleLabel, - BreakingModified(CompHeader, PropStyle), CompHeader, PropStyle) - - // allow reserved - addPropertyCheck(&props, left.GetAllowReserved().ValueNode, right.GetAllowReserved().ValueNode, - left.GetAllowReserved(), right.GetAllowReserved(), changes, v3.AllowReservedLabel, - BreakingModified(CompHeader, PropAllowReserved), CompHeader, PropAllowReserved) - - // allow empty value - addPropertyCheck(&props, left.GetAllowEmptyValue().ValueNode, right.GetAllowEmptyValue().ValueNode, - left.GetAllowEmptyValue(), right.GetAllowEmptyValue(), changes, v3.AllowEmptyValueLabel, - BreakingModified(CompHeader, PropAllowEmptyValue), CompHeader, PropAllowEmptyValue) - - // explode - addPropertyCheck(&props, left.GetExplode().ValueNode, right.GetExplode().ValueNode, - left.GetExplode(), right.GetExplode(), changes, v3.ExplodeLabel, - BreakingModified(CompHeader, PropExplode), CompHeader, PropExplode) - - // example - CheckPropertyAdditionOrRemovalWithEncoding(left.GetExample().ValueNode, right.GetExample().ValueNode, - v3.ExampleLabel, changes, - BreakingAdded(CompHeader, PropExample) || BreakingRemoved(CompHeader, PropExample), - left.GetExample(), right.GetExample()) - CheckForModificationWithEncoding(left.GetExample().ValueNode, right.GetExample().ValueNode, - v3.ExampleLabel, changes, BreakingModified(CompHeader, PropExample), - left.GetExample(), right.GetExample()) - - // deprecated - addPropertyCheck(&props, left.GetDeprecated().ValueNode, right.GetDeprecated().ValueNode, - left.GetDeprecated(), right.GetDeprecated(), changes, v3.DeprecatedLabel, - BreakingModified(CompHeader, PropDeprecated), CompHeader, PropDeprecated) - - // required - addPropertyCheck(&props, left.GetRequired().ValueNode, right.GetRequired().ValueNode, - left.GetRequired(), right.GetRequired(), changes, v3.RequiredLabel, - BreakingModified(CompHeader, PropRequired), CompHeader, PropRequired) - - return props -} - -// swagger only properties -func addSwaggerHeaderProperties(left, right low.SwaggerHeader, changes *[]*Change) []*PropertyCheck { - var props []*PropertyCheck - - // type - addPropertyCheck(&props, left.GetType().ValueNode, right.GetType().ValueNode, - left.GetType(), right.GetType(), changes, v3.TypeLabel, true, CompHeader, PropType) - - // format - addPropertyCheck(&props, left.GetFormat().ValueNode, right.GetFormat().ValueNode, - left.GetFormat(), right.GetFormat(), changes, v3.FormatLabel, true, CompHeader, PropFormat) - - // collection format - addPropertyCheck(&props, left.GetCollectionFormat().ValueNode, right.GetCollectionFormat().ValueNode, - left.GetCollectionFormat(), right.GetCollectionFormat(), changes, v3.CollectionFormatLabel, true, CompHeader, PropCollectionFormat) - - // maximum - addPropertyCheck(&props, left.GetMaximum().ValueNode, right.GetMaximum().ValueNode, - left.GetMaximum(), right.GetMaximum(), changes, v3.MaximumLabel, true, CompHeader, PropMaximum) - - // minimum - addPropertyCheck(&props, left.GetMinimum().ValueNode, right.GetMinimum().ValueNode, - left.GetMinimum(), right.GetMinimum(), changes, v3.MinimumLabel, true, CompHeader, PropMinimum) - - // exclusive maximum - addPropertyCheck(&props, left.GetExclusiveMaximum().ValueNode, right.GetExclusiveMaximum().ValueNode, - left.GetExclusiveMaximum(), right.GetExclusiveMaximum(), changes, v3.ExclusiveMaximumLabel, true, CompHeader, PropExclusiveMaximum) - - // exclusive minimum - addPropertyCheck(&props, left.GetExclusiveMinimum().ValueNode, right.GetExclusiveMinimum().ValueNode, - left.GetExclusiveMinimum(), right.GetExclusiveMinimum(), changes, v3.ExclusiveMinimumLabel, true, CompHeader, PropExclusiveMinimum) - - // max length - addPropertyCheck(&props, left.GetMaxLength().ValueNode, right.GetMaxLength().ValueNode, - left.GetMaxLength(), right.GetMaxLength(), changes, v3.MaxLengthLabel, true, CompHeader, PropMaxLength) - - // min length - addPropertyCheck(&props, left.GetMinLength().ValueNode, right.GetMinLength().ValueNode, - left.GetMinLength(), right.GetMinLength(), changes, v3.MinLengthLabel, true, CompHeader, PropMinLength) - - // pattern - addPropertyCheck(&props, left.GetPattern().ValueNode, right.GetPattern().ValueNode, - left.GetPattern(), right.GetPattern(), changes, v3.PatternLabel, true, CompHeader, PropPattern) - - // max items - addPropertyCheck(&props, left.GetMaxItems().ValueNode, right.GetMaxItems().ValueNode, - left.GetMaxItems(), right.GetMaxItems(), changes, v3.MaxItemsLabel, true, CompHeader, PropMaxItems) - - // min items - addPropertyCheck(&props, left.GetMinItems().ValueNode, right.GetMinItems().ValueNode, - left.GetMinItems(), right.GetMinItems(), changes, v3.MinItemsLabel, true, CompHeader, PropMinItems) - - // unique items - addPropertyCheck(&props, left.GetUniqueItems().ValueNode, right.GetUniqueItems().ValueNode, - left.GetUniqueItems(), right.GetUniqueItems(), changes, v3.UniqueItemsLabel, true, CompHeader, PropUniqueItems) - - // multiple of - addPropertyCheck(&props, left.GetMultipleOf().ValueNode, right.GetMultipleOf().ValueNode, - left.GetMultipleOf(), right.GetMultipleOf(), changes, v3.MultipleOfLabel, true, CompHeader, PropMultipleOf) - - return props -} - -// common header properties -func addCommonHeaderProperties(left, right low.HasDescription, changes *[]*Change) []*PropertyCheck { - var props []*PropertyCheck - - // description - addPropertyCheck(&props, left.GetDescription().ValueNode, right.GetDescription().ValueNode, - left.GetDescription(), right.GetDescription(), changes, v3.DescriptionLabel, - BreakingModified(CompHeader, PropDescription), CompHeader, PropDescription) - - return props -} - -// CompareHeadersV2 is a Swagger compatible, typed signature used for other generic functions. It simply -// wraps CompareHeaders and provides nothing other that a typed interface. -func CompareHeadersV2(l, r *v2.Header) *HeaderChanges { - return CompareHeaders(l, r) -} - -// CompareHeadersV3 is an OpenAPI 3+ compatible, typed signature used for other generic functions. It simply -// wraps CompareHeaders and provides nothing other that a typed interface. -func CompareHeadersV3(l, r *v3.Header) *HeaderChanges { - return CompareHeaders(l, r) -} - -// CompareHeaders will compare left and right Header objects (any version of Swagger or OpenAPI) and return -// a pointer to HeaderChanges with anything that has changed, or nil if nothing changed. -func CompareHeaders(l, r any) *HeaderChanges { - var changes []*Change - var props []*PropertyCheck - hc := new(HeaderChanges) - - // handle swagger. - if reflect.TypeOf(&v2.Header{}) == reflect.TypeOf(l) && reflect.TypeOf(&v2.Header{}) == reflect.TypeOf(r) { - lHeader := l.(*v2.Header) - rHeader := r.(*v2.Header) - - // perform hash check to avoid further processing - if low.AreEqual(lHeader, rHeader) { - return nil - } - - props = append(props, addCommonHeaderProperties(lHeader, rHeader, &changes)...) - props = append(props, addSwaggerHeaderProperties(lHeader, rHeader, &changes)...) - - // enum - if len(lHeader.Enum.Value) > 0 || len(rHeader.Enum.Value) > 0 { - ExtractRawValueSliceChanges(lHeader.Enum.Value, rHeader.Enum.Value, &changes, v3.EnumLabel, true) - } - - // items - if !lHeader.Items.IsEmpty() && !rHeader.Items.IsEmpty() { - if !low.AreEqual(lHeader.Items.Value, rHeader.Items.Value) { - hc.ItemsChanges = CompareItems(lHeader.Items.Value, rHeader.Items.Value) - } - } - if lHeader.Items.IsEmpty() && !rHeader.Items.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.ItemsLabel, nil, - rHeader.Items.ValueNode, BreakingAdded(CompHeader, PropItems), nil, rHeader.Items.Value) - } - if !lHeader.Items.IsEmpty() && rHeader.Items.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.SchemaLabel, lHeader.Items.ValueNode, - nil, BreakingRemoved(CompHeader, PropItems), lHeader.Items.Value, nil) - } - hc.ExtensionChanges = CompareExtensions(lHeader.Extensions, rHeader.Extensions) - } - - // handle OpenAPI - if reflect.TypeOf(&v3.Header{}) == reflect.TypeOf(l) && reflect.TypeOf(&v3.Header{}) == reflect.TypeOf(r) { - lHeader := l.(*v3.Header) - rHeader := r.(*v3.Header) - - // perform hash check to avoid further processing - if low.AreEqual(lHeader, rHeader) { - return nil - } - - props = append(props, addCommonHeaderProperties(lHeader, rHeader, &changes)...) - props = append(props, addOpenAPIHeaderProperties(lHeader, rHeader, &changes)...) - - // header - if !lHeader.Schema.IsEmpty() || !rHeader.Schema.IsEmpty() { - hc.SchemaChanges = CompareSchemas(lHeader.Schema.Value, rHeader.Schema.Value) - } - - // examples - hc.ExamplesChanges = CheckMapForChanges(lHeader.Examples.Value, rHeader.Examples.Value, - &changes, v3.ExamplesLabel, CompareExamples) - - // content - hc.ContentChanges = CheckMapForChanges(lHeader.Content.Value, rHeader.Content.Value, - &changes, v3.ContentLabel, CompareMediaTypes) - - hc.ExtensionChanges = CompareExtensions(lHeader.Extensions, rHeader.Extensions) - - } - CheckProperties(props) - hc.PropertyChanges = NewPropertyChanges(changes) - return hc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/info.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/info.go deleted file mode 100644 index b042ed9b876..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/info.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low/base" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// InfoChanges represents the number of changes to an Info object. Part of an OpenAPI document -type InfoChanges struct { - *PropertyChanges - ContactChanges *ContactChanges `json:"contact,omitempty" yaml:"contact,omitempty"` - LicenseChanges *LicenseChanges `json:"license,omitempty" yaml:"license,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Info objects -func (i *InfoChanges) GetAllChanges() []*Change { - if i == nil { - return nil - } - var changes []*Change - changes = append(changes, i.Changes...) - if i.ContactChanges != nil { - changes = append(changes, i.ContactChanges.GetAllChanges()...) - } - if i.LicenseChanges != nil { - changes = append(changes, i.LicenseChanges.GetAllChanges()...) - } - if i.ExtensionChanges != nil { - changes = append(changes, i.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges represents the total number of changes made to an Info object. -func (i *InfoChanges) TotalChanges() int { - if i == nil { - return 0 - } - t := i.PropertyChanges.TotalChanges() - if i.ContactChanges != nil { - t += i.ContactChanges.TotalChanges() - } - if i.LicenseChanges != nil { - t += i.LicenseChanges.TotalChanges() - } - if i.ExtensionChanges != nil { - t += i.ExtensionChanges.TotalChanges() - } - return t -} - -// TotalBreakingChanges returns the total number of breaking changes in Info objects. -func (i *InfoChanges) TotalBreakingChanges() int { - if i == nil { - return 0 - } - c := i.PropertyChanges.TotalBreakingChanges() - if i.ContactChanges != nil { - c += i.ContactChanges.TotalBreakingChanges() - } - if i.LicenseChanges != nil { - c += i.LicenseChanges.TotalBreakingChanges() - } - if i.ExtensionChanges != nil { - c += i.ExtensionChanges.TotalBreakingChanges() - } - return c -} - -// CompareInfo will compare a left (original) and a right (new) Info object. Any changes -// will be returned in a pointer to InfoChanges, otherwise if nothing is found, then nil is -// returned instead. -func CompareInfo(l, r *base.Info) *InfoChanges { - var changes []*Change - props := make([]*PropertyCheck, 0, 5) - - props = append(props, - NewPropertyCheck(CompInfo, PropTitle, - l.Title.ValueNode, r.Title.ValueNode, - v3.TitleLabel, &changes, l, r), - NewPropertyCheck(CompInfo, PropSummary, - l.Summary.ValueNode, r.Summary.ValueNode, - v3.SummaryLabel, &changes, l, r), - NewPropertyCheck(CompInfo, PropDescription, - l.Description.ValueNode, r.Description.ValueNode, - v3.DescriptionLabel, &changes, l, r), - NewPropertyCheck(CompInfo, PropTermsOfService, - l.TermsOfService.ValueNode, r.TermsOfService.ValueNode, - v3.TermsOfServiceLabel, &changes, l, r), - NewPropertyCheck(CompInfo, PropVersion, - l.Version.ValueNode, r.Version.ValueNode, - v3.VersionLabel, &changes, l, r), - ) - - // check properties - CheckProperties(props) - - i := new(InfoChanges) - - // compare contact. - if l.Contact.Value != nil && r.Contact.Value != nil { - i.ContactChanges = CompareContact(l.Contact.Value, r.Contact.Value) - } else { - if l.Contact.Value == nil && r.Contact.Value != nil { - CreateChange(&changes, ObjectAdded, v3.ContactLabel, - nil, r.Contact.ValueNode, BreakingAdded(CompInfo, PropContact), nil, r.Contact.Value) - } - if l.Contact.Value != nil && r.Contact.Value == nil { - CreateChange(&changes, ObjectRemoved, v3.ContactLabel, - l.Contact.ValueNode, nil, BreakingRemoved(CompInfo, PropContact), l.Contact.Value, nil) - } - } - - // compare license. - if l.License.Value != nil && r.License.Value != nil { - i.LicenseChanges = CompareLicense(l.License.Value, r.License.Value) - } else { - if l.License.Value == nil && r.License.Value != nil { - CreateChange(&changes, ObjectAdded, v3.LicenseLabel, - nil, r.License.ValueNode, BreakingAdded(CompInfo, PropLicense), nil, r.License.Value) - } - if l.License.Value != nil && r.License.Value == nil { - CreateChange(&changes, ObjectRemoved, v3.LicenseLabel, - l.License.ValueNode, nil, BreakingRemoved(CompInfo, PropLicense), r.License.Value, nil) - } - } - - // check extensions. - i.ExtensionChanges = CompareExtensions(l.Extensions, r.Extensions) - - i.PropertyChanges = NewPropertyChanges(changes) - if i.TotalChanges() <= 0 { - return nil - } - return i -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/items.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/items.go deleted file mode 100644 index 04b016188fc..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/items.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - v2 "github.com/pb33f/libopenapi/datamodel/low/v2" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// ItemsChanges represent changes found between a left (original) and right (modified) object. Items is only -// used by Swagger documents. -type ItemsChanges struct { - *PropertyChanges - ItemsChanges *ItemsChanges `json:"items,omitempty" yaml:"items,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Items objects -func (i *ItemsChanges) GetAllChanges() []*Change { - if i == nil { - return nil - } - var changes []*Change - changes = append(changes, i.Changes...) - if i.ItemsChanges != nil { - changes = append(changes, i.ItemsChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total number of changes found between two Items objects -// This is a recursive function because Items can contain Items. Be careful! -func (i *ItemsChanges) TotalChanges() int { - if i == nil { - return 0 - } - c := i.PropertyChanges.TotalChanges() - if i.ItemsChanges != nil { - c += i.ItemsChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of breaking changes found between two Swagger Items objects -// This is a recursive method, Items are recursive, be careful! -func (i *ItemsChanges) TotalBreakingChanges() int { - c := i.PropertyChanges.TotalBreakingChanges() - if i.ItemsChanges != nil { - c += i.ItemsChanges.TotalBreakingChanges() - } - return c -} - -// CompareItems compares two sets of Swagger Item objects. If there are any changes found then a pointer to -// ItemsChanges will be returned, otherwise nil is returned. -// -// It is worth nothing that Items can contain Items. This means recursion is possible and has the potential for -// runaway code if not using the resolver's circular reference checking. -func CompareItems(l, r *v2.Items) *ItemsChanges { - var changes []*Change - var props []*PropertyCheck - - ic := new(ItemsChanges) - - // header is identical to items, except for a description. - props = append(props, addSwaggerHeaderProperties(l, r, &changes)...) - CheckProperties(props) - - if !l.Items.IsEmpty() && !r.Items.IsEmpty() { - // inline, check hashes, if they don't match, compare. - if l.Items.Value.Hash() != r.Items.Value.Hash() { - // compare. - ic.ItemsChanges = CompareItems(l.Items.Value, r.Items.Value) - } - } - if l.Items.IsEmpty() && !r.Items.IsEmpty() { - // added items - CreateChange(&changes, PropertyAdded, v3.ItemsLabel, - nil, r.Items.GetValueNode(), true, nil, r.Items.GetValue()) - } - if !l.Items.IsEmpty() && r.Items.IsEmpty() { - // removed items - CreateChange(&changes, PropertyRemoved, v3.ItemsLabel, - l.Items.GetValueNode(), nil, true, l.Items.GetValue(), - nil) - } - ic.PropertyChanges = NewPropertyChanges(changes) - if ic.TotalChanges() <= 0 { - return nil - } - return ic -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/license.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/license.go deleted file mode 100644 index ba361e4551f..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/license.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low/base" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// LicenseChanges represent changes to a License object that is a child of Info object. Part of an OpenAPI document -type LicenseChanges struct { - *PropertyChanges - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between License objects -func (l *LicenseChanges) GetAllChanges() []*Change { - if l == nil { - return nil - } - var changes []*Change - changes = append(changes, l.Changes...) - if l.ExtensionChanges != nil { - changes = append(changes, l.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges represents the total number of changes made to a License instance. -func (l *LicenseChanges) TotalChanges() int { - if l == nil { - return 0 - } - c := l.PropertyChanges.TotalChanges() - - if l.ExtensionChanges != nil { - c += l.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of breaking changes in License objects. -func (l *LicenseChanges) TotalBreakingChanges() int { - if l == nil { - return 0 - } - c := l.PropertyChanges.TotalBreakingChanges() - if l.ExtensionChanges != nil { - c += l.ExtensionChanges.TotalBreakingChanges() - } - return c -} - -// CompareLicense will check a left (original) and right (new) License object for any changes. If there -// were any, a pointer to a LicenseChanges object is returned, otherwise if nothing changed - the function -// returns nil. -func CompareLicense(l, r *base.License) *LicenseChanges { - var changes []*Change - props := make([]*PropertyCheck, 0, 3) - - props = append(props, - NewPropertyCheck(CompLicense, PropURL, - l.URL.ValueNode, r.URL.ValueNode, - v3.URLLabel, &changes, l, r), - NewPropertyCheck(CompLicense, PropName, - l.Name.ValueNode, r.Name.ValueNode, - v3.NameLabel, &changes, l, r), - NewPropertyCheck(CompLicense, PropIdentifier, - l.Identifier.ValueNode, r.Identifier.ValueNode, - v3.Identifier, &changes, l, r), - ) - - CheckProperties(props) - - lc := new(LicenseChanges) - lc.PropertyChanges = NewPropertyChanges(changes) - lc.ExtensionChanges = CompareExtensions(l.Extensions, r.Extensions) - if lc.TotalChanges() <= 0 { - return nil - } - return lc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/link.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/link.go deleted file mode 100644 index aabdda1260c..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/link.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// LinkChanges represent changes made between two OpenAPI Link Objects. -type LinkChanges struct { - *PropertyChanges - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` - ServerChanges *ServerChanges `json:"server,omitempty" yaml:"server,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Link objects -func (l *LinkChanges) GetAllChanges() []*Change { - if l == nil { - return nil - } - var changes []*Change - changes = append(changes, l.Changes...) - if l.ServerChanges != nil { - changes = append(changes, l.ServerChanges.GetAllChanges()...) - } - if l.ExtensionChanges != nil { - changes = append(changes, l.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total changes made between OpenAPI Link objects -func (l *LinkChanges) TotalChanges() int { - if l == nil { - return 0 - } - c := l.PropertyChanges.TotalChanges() - if l.ExtensionChanges != nil { - c += l.ExtensionChanges.TotalChanges() - } - if l.ServerChanges != nil { - c += l.ServerChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the number of breaking changes made between two OpenAPI Link Objects -func (l *LinkChanges) TotalBreakingChanges() int { - c := l.PropertyChanges.TotalBreakingChanges() - if l.ServerChanges != nil { - c += l.ServerChanges.TotalBreakingChanges() - } - return c -} - -// CompareLinks checks a left and right OpenAPI Link for any changes. If they are found, returns a pointer to -// LinkChanges, and returns nil if nothing is found. -func CompareLinks(l, r *v3.Link) *LinkChanges { - if low.AreEqual(l, r) { - return nil - } - - var changes []*Change - props := make([]*PropertyCheck, 0, 4) - - props = append(props, - NewPropertyCheck(CompLink, PropOperationRef, - l.OperationRef.ValueNode, r.OperationRef.ValueNode, - v3.OperationRefLabel, &changes, l, r), - NewPropertyCheck(CompLink, PropOperationID, - l.OperationId.ValueNode, r.OperationId.ValueNode, - v3.OperationIdLabel, &changes, l, r), - NewPropertyCheck(CompLink, PropRequestBody, - l.RequestBody.ValueNode, r.RequestBody.ValueNode, - v3.RequestBodyLabel, &changes, l, r), - NewPropertyCheck(CompLink, PropDescription, - l.Description.ValueNode, r.Description.ValueNode, - v3.DescriptionLabel, &changes, l, r), - ) - - CheckProperties(props) - lc := new(LinkChanges) - lc.ExtensionChanges = CompareExtensions(l.Extensions, r.Extensions) - - // server - if !l.Server.IsEmpty() && !r.Server.IsEmpty() { - if !low.AreEqual(l.Server.Value, r.Server.Value) { - lc.ServerChanges = CompareServers(l.Server.Value, r.Server.Value) - } - } - if !l.Server.IsEmpty() && r.Server.IsEmpty() { - CreateChange(&changes, PropertyRemoved, v3.ServerLabel, - l.Server.ValueNode, nil, BreakingRemoved(CompLink, PropServer), - l.Server.Value, nil) - } - if l.Server.IsEmpty() && !r.Server.IsEmpty() { - CreateChange(&changes, PropertyAdded, v3.ServerLabel, - nil, r.Server.ValueNode, BreakingAdded(CompLink, PropServer), - nil, r.Server.Value) - } - - // parameters - lValues := make(map[string]low.ValueReference[string]) - rValues := make(map[string]low.ValueReference[string]) - for k, v := range l.Parameters.Value.FromOldest() { - lValues[k.Value] = v - } - for k, v := range r.Parameters.Value.FromOldest() { - rValues[k.Value] = v - } - for k := range lValues { - if _, ok := rValues[k]; !ok { - CreateChange(&changes, ObjectRemoved, v3.ParametersLabel, - lValues[k].ValueNode, nil, BreakingRemoved(CompLink, PropParameters), - k, nil) - continue - } - if lValues[k].Value != rValues[k].Value { - CreateChange(&changes, Modified, v3.ParametersLabel, - lValues[k].ValueNode, rValues[k].ValueNode, BreakingModified(CompLink, PropParameters), - k, k) - } - - } - for k := range rValues { - if _, ok := lValues[k]; !ok { - CreateChange(&changes, ObjectAdded, v3.ParametersLabel, - nil, rValues[k].ValueNode, BreakingAdded(CompLink, PropParameters), - nil, k) - } - } - - lc.PropertyChanges = NewPropertyChanges(changes) - return lc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/media_type.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/media_type.go deleted file mode 100644 index 90de6f7ae75..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/media_type.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// MediaTypeChanges represent changes made between two OpenAPI MediaType instances. -type MediaTypeChanges struct { - *PropertyChanges - SchemaChanges *SchemaChanges `json:"schemas,omitempty" yaml:"schemas,omitempty"` - ItemSchemaChanges *SchemaChanges `json:"itemSchemas,omitempty" yaml:"itemSchemas,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` - ExampleChanges map[string]*ExampleChanges `json:"examples,omitempty" yaml:"examples,omitempty"` - EncodingChanges map[string]*EncodingChanges `json:"encoding,omitempty" yaml:"encoding,omitempty"` - ItemEncodingChanges map[string]*EncodingChanges `json:"itemEncoding,omitempty" yaml:"itemEncoding,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between MediaType objects -func (m *MediaTypeChanges) GetAllChanges() []*Change { - if m == nil { - return nil - } - var changes []*Change - changes = append(changes, m.Changes...) - if m.SchemaChanges != nil { - changes = append(changes, m.SchemaChanges.GetAllChanges()...) - } - if m.ItemSchemaChanges != nil { - changes = append(changes, m.ItemSchemaChanges.GetAllChanges()...) - } - for k := range m.ExampleChanges { - changes = append(changes, m.ExampleChanges[k].GetAllChanges()...) - } - for k := range m.EncodingChanges { - changes = append(changes, m.EncodingChanges[k].GetAllChanges()...) - } - for k := range m.ItemEncodingChanges { - changes = append(changes, m.ItemEncodingChanges[k].GetAllChanges()...) - } - if m.ExtensionChanges != nil { - changes = append(changes, m.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total number of changes between two MediaType instances. -func (m *MediaTypeChanges) TotalChanges() int { - if m == nil { - return 0 - } - c := m.PropertyChanges.TotalChanges() - for k := range m.ExampleChanges { - c += m.ExampleChanges[k].TotalChanges() - } - if m.SchemaChanges != nil { - c += m.SchemaChanges.TotalChanges() - } - if m.ItemSchemaChanges != nil { - c += m.ItemSchemaChanges.TotalChanges() - } - if len(m.EncodingChanges) > 0 { - for i := range m.EncodingChanges { - c += m.EncodingChanges[i].TotalChanges() - } - } - if len(m.ItemEncodingChanges) > 0 { - for i := range m.ItemEncodingChanges { - c += m.ItemEncodingChanges[i].TotalChanges() - } - } - if m.ExtensionChanges != nil { - c += m.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of breaking changes made between two MediaType instances. -func (m *MediaTypeChanges) TotalBreakingChanges() int { - c := m.PropertyChanges.TotalBreakingChanges() - for k := range m.ExampleChanges { - c += m.ExampleChanges[k].TotalBreakingChanges() - } - if m.SchemaChanges != nil { - c += m.SchemaChanges.TotalBreakingChanges() - } - if m.ItemSchemaChanges != nil { - c += m.ItemSchemaChanges.TotalBreakingChanges() - } - if len(m.EncodingChanges) > 0 { - for i := range m.EncodingChanges { - c += m.EncodingChanges[i].TotalBreakingChanges() - } - } - if len(m.ItemEncodingChanges) > 0 { - for i := range m.ItemEncodingChanges { - c += m.ItemEncodingChanges[i].TotalBreakingChanges() - } - } - return c -} - -// CompareMediaTypes compares a left and a right MediaType object for any changes. If found, a pointer to a -// MediaTypeChanges instance is returned; otherwise nothing is returned. -func CompareMediaTypes(l, r *v3.MediaType) *MediaTypeChanges { - var props []*PropertyCheck - var changes []*Change - - mc := new(MediaTypeChanges) - - if low.AreEqual(l, r) { - return nil - } - - // Example - CheckPropertyAdditionOrRemovalWithEncoding(l.Example.ValueNode, r.Example.ValueNode, - v3.ExampleLabel, &changes, - BreakingAdded(CompMediaType, PropExample) || BreakingRemoved(CompMediaType, PropExample), - l.Example.Value, r.Example.Value) - CheckForModificationWithEncoding(l.Example.ValueNode, r.Example.ValueNode, - v3.ExampleLabel, &changes, BreakingModified(CompMediaType, PropExample), - l.Example.Value, r.Example.Value) - - CheckProperties(props) - - // schema - if !l.Schema.IsEmpty() && !r.Schema.IsEmpty() { - mc.SchemaChanges = CompareSchemas(l.Schema.Value, r.Schema.Value) - } - if !l.Schema.IsEmpty() && r.Schema.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.SchemaLabel, l.Schema.ValueNode, - nil, BreakingRemoved(CompMediaType, PropSchema), l.Schema.Value, nil) - } - if l.Schema.IsEmpty() && !r.Schema.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.SchemaLabel, nil, - r.Schema.ValueNode, BreakingAdded(CompMediaType, PropSchema), nil, r.Schema.Value) - } - - // examples - use nil-aware version so added/removed examples appear in the map for tree rendering - mc.ExampleChanges = CheckMapForChangesWithNilSupport(l.Examples.Value, r.Examples.Value, - &changes, v3.ExamplesLabel, CompareExamples) - - // encoding - mc.EncodingChanges = CheckMapForChanges(l.Encoding.Value, r.Encoding.Value, - &changes, v3.EncodingLabel, CompareEncoding) - - // itemSchema - if !l.ItemSchema.IsEmpty() && !r.ItemSchema.IsEmpty() { - mc.ItemSchemaChanges = CompareSchemas(l.ItemSchema.Value, r.ItemSchema.Value) - } - if !l.ItemSchema.IsEmpty() && r.ItemSchema.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.ItemSchemaLabel, l.ItemSchema.ValueNode, - nil, BreakingRemoved(CompMediaType, PropItemSchema), l.ItemSchema.Value, nil) - } - if l.ItemSchema.IsEmpty() && !r.ItemSchema.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.ItemSchemaLabel, nil, - r.ItemSchema.ValueNode, BreakingAdded(CompMediaType, PropItemSchema), nil, r.ItemSchema.Value) - } - - // itemEncoding - mc.ItemEncodingChanges = CheckMapForChanges(l.ItemEncoding.Value, r.ItemEncoding.Value, - &changes, v3.ItemEncodingLabel, CompareEncoding) - - mc.ExtensionChanges = CompareExtensions(l.Extensions, r.Extensions) - mc.PropertyChanges = NewPropertyChanges(changes) - return mc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/oauth_flows.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/oauth_flows.go deleted file mode 100644 index b6373299fa0..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/oauth_flows.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// OAuthFlowsChanges represents changes found between two OpenAPI OAuthFlows objects. -type OAuthFlowsChanges struct { - *PropertyChanges - ImplicitChanges *OAuthFlowChanges `json:"implicit,omitempty" yaml:"implicit,omitempty"` - PasswordChanges *OAuthFlowChanges `json:"password,omitempty" yaml:"password,omitempty"` - ClientCredentialsChanges *OAuthFlowChanges `json:"clientCredentials,omitempty" yaml:"clientCredentials,omitempty"` - AuthorizationCodeChanges *OAuthFlowChanges `json:"authCode,omitempty" yaml:"authCode,omitempty"` - DeviceChanges *OAuthFlowChanges `json:"device,omitempty" yaml:"device,omitempty"` // OpenAPI 3.2+ device flow - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between OAuthFlows objects -func (o *OAuthFlowsChanges) GetAllChanges() []*Change { - if o == nil { - return nil - } - var changes []*Change - changes = append(changes, o.Changes...) - if o.ImplicitChanges != nil { - changes = append(changes, o.ImplicitChanges.GetAllChanges()...) - } - if o.PasswordChanges != nil { - changes = append(changes, o.PasswordChanges.GetAllChanges()...) - } - if o.ClientCredentialsChanges != nil { - changes = append(changes, o.ClientCredentialsChanges.GetAllChanges()...) - } - if o.AuthorizationCodeChanges != nil { - changes = append(changes, o.AuthorizationCodeChanges.GetAllChanges()...) - } - if o.DeviceChanges != nil { - changes = append(changes, o.DeviceChanges.GetAllChanges()...) - } - if o.ExtensionChanges != nil { - changes = append(changes, o.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the number of changes made between two OAuthFlows instances. -func (o *OAuthFlowsChanges) TotalChanges() int { - if o == nil { - return 0 - } - c := o.PropertyChanges.TotalChanges() - if o.ImplicitChanges != nil { - c += o.ImplicitChanges.TotalChanges() - } - if o.PasswordChanges != nil { - c += o.PasswordChanges.TotalChanges() - } - if o.ClientCredentialsChanges != nil { - c += o.ClientCredentialsChanges.TotalChanges() - } - if o.AuthorizationCodeChanges != nil { - c += o.AuthorizationCodeChanges.TotalChanges() - } - if o.DeviceChanges != nil { - c += o.DeviceChanges.TotalChanges() - } - if o.ExtensionChanges != nil { - c += o.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the number of breaking changes made between two OAuthFlows objects. -func (o *OAuthFlowsChanges) TotalBreakingChanges() int { - c := o.PropertyChanges.TotalBreakingChanges() - if o.ImplicitChanges != nil { - c += o.ImplicitChanges.TotalBreakingChanges() - } - if o.PasswordChanges != nil { - c += o.PasswordChanges.TotalBreakingChanges() - } - if o.ClientCredentialsChanges != nil { - c += o.ClientCredentialsChanges.TotalBreakingChanges() - } - if o.AuthorizationCodeChanges != nil { - c += o.AuthorizationCodeChanges.TotalBreakingChanges() - } - if o.DeviceChanges != nil { - c += o.DeviceChanges.TotalBreakingChanges() - } - return c -} - -// CompareOAuthFlows compares a left and right OAuthFlows object. If changes are found a pointer to *OAuthFlowsChanges -// is returned, otherwise nil is returned. -func CompareOAuthFlows(l, r *v3.OAuthFlows) *OAuthFlowsChanges { - if low.AreEqual(l, r) { - return nil - } - - oa := new(OAuthFlowsChanges) - var changes []*Change - - // client credentials - if !l.ClientCredentials.IsEmpty() && !r.ClientCredentials.IsEmpty() { - oa.ClientCredentialsChanges = CompareOAuthFlow(l.ClientCredentials.Value, r.ClientCredentials.Value) - } - if !l.ClientCredentials.IsEmpty() && r.ClientCredentials.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.ClientCredentialsLabel, - l.ClientCredentials.ValueNode, nil, BreakingRemoved(CompOAuthFlows, PropClientCredentials), - l.ClientCredentials.Value, nil) - } - if l.ClientCredentials.IsEmpty() && !r.ClientCredentials.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.ClientCredentialsLabel, - nil, r.ClientCredentials.ValueNode, BreakingAdded(CompOAuthFlows, PropClientCredentials), - nil, r.ClientCredentials.Value) - } - - // implicit - if !l.Implicit.IsEmpty() && !r.Implicit.IsEmpty() { - oa.ImplicitChanges = CompareOAuthFlow(l.Implicit.Value, r.Implicit.Value) - } - if !l.Implicit.IsEmpty() && r.Implicit.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.ImplicitLabel, - l.Implicit.ValueNode, nil, BreakingRemoved(CompOAuthFlows, PropImplicit), - l.Implicit.Value, nil) - } - if l.Implicit.IsEmpty() && !r.Implicit.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.ImplicitLabel, - nil, r.Implicit.ValueNode, BreakingAdded(CompOAuthFlows, PropImplicit), - nil, r.Implicit.Value) - } - - // password - if !l.Password.IsEmpty() && !r.Password.IsEmpty() { - oa.PasswordChanges = CompareOAuthFlow(l.Password.Value, r.Password.Value) - } - if !l.Password.IsEmpty() && r.Password.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.PasswordLabel, - l.Password.ValueNode, nil, BreakingRemoved(CompOAuthFlows, PropPassword), - l.Password.Value, nil) - } - if l.Password.IsEmpty() && !r.Password.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.PasswordLabel, - nil, r.Password.ValueNode, BreakingAdded(CompOAuthFlows, PropPassword), - nil, r.Password.Value) - } - - // auth code - if !l.AuthorizationCode.IsEmpty() && !r.AuthorizationCode.IsEmpty() { - oa.AuthorizationCodeChanges = CompareOAuthFlow(l.AuthorizationCode.Value, r.AuthorizationCode.Value) - } - if !l.AuthorizationCode.IsEmpty() && r.AuthorizationCode.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.AuthorizationCodeLabel, - l.AuthorizationCode.ValueNode, nil, BreakingRemoved(CompOAuthFlows, PropAuthorizationCode), - l.AuthorizationCode.Value, nil) - } - if l.AuthorizationCode.IsEmpty() && !r.AuthorizationCode.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.AuthorizationCodeLabel, - nil, r.AuthorizationCode.ValueNode, BreakingAdded(CompOAuthFlows, PropAuthorizationCode), - nil, r.AuthorizationCode.Value) - } - - // device flow (OpenAPI 3.2+) - if !l.Device.IsEmpty() && !r.Device.IsEmpty() { - oa.DeviceChanges = CompareOAuthFlow(l.Device.Value, r.Device.Value) - } - if !l.Device.IsEmpty() && r.Device.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.DeviceLabel, - l.Device.ValueNode, nil, BreakingRemoved(CompOAuthFlows, PropDevice), - l.Device.Value, nil) - } - if l.Device.IsEmpty() && !r.Device.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.DeviceLabel, - nil, r.Device.ValueNode, BreakingAdded(CompOAuthFlows, PropDevice), - nil, r.Device.Value) - } - - oa.ExtensionChanges = CompareExtensions(l.Extensions, r.Extensions) - oa.PropertyChanges = NewPropertyChanges(changes) - return oa -} - -// OAuthFlowChanges represents an OpenAPI OAuthFlow object. -type OAuthFlowChanges struct { - *PropertyChanges - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between OAuthFlow objects -func (o *OAuthFlowChanges) GetAllChanges() []*Change { - if o == nil { - return nil - } - var changes []*Change - changes = append(changes, o.Changes...) - if o.ExtensionChanges != nil { - changes = append(changes, o.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total number of changes made between two OAuthFlow objects -func (o *OAuthFlowChanges) TotalChanges() int { - if o == nil { - return 0 - } - c := o.PropertyChanges.TotalChanges() - if o.ExtensionChanges != nil { - c += o.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of breaking changes made between two OAuthFlow objects -func (o *OAuthFlowChanges) TotalBreakingChanges() int { - return o.PropertyChanges.TotalBreakingChanges() -} - -// CompareOAuthFlow checks a left and a right OAuthFlow object for changes. If found, returns a pointer to -// an OAuthFlowChanges instance, or nil if nothing is found. -func CompareOAuthFlow(l, r *v3.OAuthFlow) *OAuthFlowChanges { - if low.AreEqual(l, r) { - return nil - } - - var changes []*Change - props := make([]*PropertyCheck, 0, 3) - - props = append(props, - NewPropertyCheck(CompOAuthFlow, PropAuthorizationURL, - l.AuthorizationUrl.ValueNode, r.AuthorizationUrl.ValueNode, - v3.AuthorizationUrlLabel, &changes, l, r), - NewPropertyCheck(CompOAuthFlow, PropTokenURL, - l.TokenUrl.ValueNode, r.TokenUrl.ValueNode, - v3.TokenUrlLabel, &changes, l, r), - NewPropertyCheck(CompOAuthFlow, PropRefreshURL, - l.RefreshUrl.ValueNode, r.RefreshUrl.ValueNode, - v3.RefreshUrlLabel, &changes, l, r), - ) - - CheckProperties(props) - - for k, v := range l.Scopes.Value.FromOldest() { - if r != nil && r.FindScope(k.Value) == nil { - CreateChange(&changes, ObjectRemoved, v3.Scopes, v.ValueNode, nil, BreakingRemoved(CompOAuthFlow, PropScopes), k.Value, nil) - continue - } - if r != nil && r.FindScope(k.Value) != nil { - if v.Value != r.FindScope(k.Value).Value { - CreateChange(&changes, Modified, v3.Scopes, - v.ValueNode, r.FindScope(k.Value).ValueNode, BreakingModified(CompOAuthFlow, PropScopes), - v.Value, r.FindScope(k.Value).Value) - } - } - } - for k, v := range r.Scopes.Value.FromOldest() { - if l != nil && l.FindScope(k.Value) == nil { - CreateChange(&changes, ObjectAdded, v3.Scopes, nil, v.ValueNode, BreakingAdded(CompOAuthFlow, PropScopes), nil, k.Value) - } - } - oa := new(OAuthFlowChanges) - oa.PropertyChanges = NewPropertyChanges(changes) - oa.ExtensionChanges = CompareExtensions(l.Extensions, r.Extensions) - return oa -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/operation.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/operation.go deleted file mode 100644 index 664362398e4..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/operation.go +++ /dev/null @@ -1,613 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "reflect" - "sort" - "strings" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - v2 "github.com/pb33f/libopenapi/datamodel/low/v2" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" - "go.yaml.in/yaml/v4" -) - -// OperationChanges represent changes made between two Swagger or OpenAPI Operation objects. -type OperationChanges struct { - *PropertyChanges - ExternalDocChanges *ExternalDocChanges `json:"externalDoc,omitempty" yaml:"externalDoc,omitempty"` - ParameterChanges []*ParameterChanges `json:"parameters,omitempty" yaml:"parameters,omitempty"` - ResponsesChanges *ResponsesChanges `json:"responses,omitempty" yaml:"responses,omitempty"` - SecurityRequirementChanges []*SecurityRequirementChanges `json:"securityRequirements,omitempty" yaml:"securityRequirements,omitempty"` - - // OpenAPI 3+ only changes - RequestBodyChanges *RequestBodyChanges `json:"requestBodies,omitempty" yaml:"requestBodies,omitempty"` - ServerChanges []*ServerChanges `json:"servers,omitempty" yaml:"servers,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` - CallbackChanges map[string]*CallbackChanges `json:"callbacks,omitempty" yaml:"callbacks,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Operation objects -func (o *OperationChanges) GetAllChanges() []*Change { - if o == nil { - return nil - } - var changes []*Change - changes = append(changes, o.Changes...) - if o.ExternalDocChanges != nil { - changes = append(changes, o.ExternalDocChanges.GetAllChanges()...) - } - for k := range o.ParameterChanges { - changes = append(changes, o.ParameterChanges[k].GetAllChanges()...) - } - if o.ResponsesChanges != nil { - changes = append(changes, o.ResponsesChanges.GetAllChanges()...) - } - for k := range o.SecurityRequirementChanges { - changes = append(changes, o.SecurityRequirementChanges[k].GetAllChanges()...) - } - if o.RequestBodyChanges != nil { - changes = append(changes, o.RequestBodyChanges.GetAllChanges()...) - } - for k := range o.ServerChanges { - changes = append(changes, o.ServerChanges[k].GetAllChanges()...) - } - for k := range o.CallbackChanges { - changes = append(changes, o.CallbackChanges[k].GetAllChanges()...) - } - if o.ExtensionChanges != nil { - changes = append(changes, o.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total number of changes made between two Swagger or OpenAPI Operation objects. -func (o *OperationChanges) TotalChanges() int { - if o == nil { - return 0 - } - c := o.PropertyChanges.TotalChanges() - if o.ExternalDocChanges != nil { - c += o.ExternalDocChanges.TotalChanges() - } - for k := range o.ParameterChanges { - c += o.ParameterChanges[k].TotalChanges() - } - if o.ResponsesChanges != nil { - c += o.ResponsesChanges.TotalChanges() - } - for k := range o.SecurityRequirementChanges { - c += o.SecurityRequirementChanges[k].TotalChanges() - } - if o.RequestBodyChanges != nil { - c += o.RequestBodyChanges.TotalChanges() - } - for k := range o.ServerChanges { - c += o.ServerChanges[k].TotalChanges() - } - for k := range o.CallbackChanges { - c += o.CallbackChanges[k].TotalChanges() - } - if o.ExtensionChanges != nil { - c += o.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of breaking changes made between two Swagger -// or OpenAPI Operation objects. -func (o *OperationChanges) TotalBreakingChanges() int { - c := o.PropertyChanges.TotalBreakingChanges() - if o.ExternalDocChanges != nil { - c += o.ExternalDocChanges.TotalBreakingChanges() - } - for k := range o.ParameterChanges { - c += o.ParameterChanges[k].TotalBreakingChanges() - } - if o.ResponsesChanges != nil { - c += o.ResponsesChanges.TotalBreakingChanges() - } - for k := range o.SecurityRequirementChanges { - c += o.SecurityRequirementChanges[k].TotalBreakingChanges() - } - for k := range o.CallbackChanges { - c += o.CallbackChanges[k].TotalBreakingChanges() - } - if o.RequestBodyChanges != nil { - c += o.RequestBodyChanges.TotalBreakingChanges() - } - for k := range o.ServerChanges { - c += o.ServerChanges[k].TotalBreakingChanges() - } - return c -} - -// check for properties shared between operations objects. -func addSharedOperationProperties(left, right low.SharedOperations, changes *[]*Change) []*PropertyCheck { - var props []*PropertyCheck - - // tags - if len(left.GetTags().Value) > 0 || len(right.GetTags().Value) > 0 { - ExtractStringValueSliceChangesWithRules(left.GetTags().Value, right.GetTags().Value, - changes, v3.TagsLabel, CompOperation, PropTags) - } - - // summary - addPropertyCheck(&props, left.GetSummary().ValueNode, right.GetSummary().ValueNode, - left.GetSummary(), right.GetSummary(), changes, v3.SummaryLabel, - BreakingModified(CompOperation, PropSummary), CompOperation, PropSummary) - - // description - addPropertyCheck(&props, left.GetDescription().ValueNode, right.GetDescription().ValueNode, - left.GetDescription(), right.GetDescription(), changes, v3.DescriptionLabel, - BreakingModified(CompOperation, PropDescription), CompOperation, PropDescription) - - // deprecated - addPropertyCheck(&props, left.GetDeprecated().ValueNode, right.GetDeprecated().ValueNode, - left.GetDeprecated(), right.GetDeprecated(), changes, v3.DeprecatedLabel, - BreakingModified(CompOperation, PropDeprecated), CompOperation, PropDeprecated) - - // operation id - addPropertyCheck(&props, left.GetOperationId().ValueNode, right.GetOperationId().ValueNode, - left.GetOperationId(), right.GetOperationId(), changes, v3.OperationIdLabel, - BreakingModified(CompOperation, PropOperationID), CompOperation, PropOperationID) - - return props -} - -// check shared objects -func compareSharedOperationObjects(l, r low.SharedOperations, changes *[]*Change, opChanges *OperationChanges) { - // external docs - if !l.GetExternalDocs().IsEmpty() && !r.GetExternalDocs().IsEmpty() { - lExtDoc := l.GetExternalDocs().Value.(*base.ExternalDoc) - rExtDoc := r.GetExternalDocs().Value.(*base.ExternalDoc) - if !low.AreEqual(lExtDoc, rExtDoc) { - opChanges.ExternalDocChanges = CompareExternalDocs(lExtDoc, rExtDoc) - } - } - if l.GetExternalDocs().IsEmpty() && !r.GetExternalDocs().IsEmpty() { - CreateChange(changes, PropertyAdded, v3.ExternalDocsLabel, - nil, r.GetExternalDocs().ValueNode, BreakingAdded(CompOperation, PropExternalDocs), nil, - r.GetExternalDocs().Value) - } - if !l.GetExternalDocs().IsEmpty() && r.GetExternalDocs().IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.ExternalDocsLabel, - l.GetExternalDocs().ValueNode, nil, BreakingRemoved(CompOperation, PropExternalDocs), l.GetExternalDocs().Value, - nil) - } - - // responses - if !l.GetResponses().IsEmpty() && !r.GetResponses().IsEmpty() { - opChanges.ResponsesChanges = CompareResponses(l.GetResponses().Value, r.GetResponses().Value) - } - if l.GetResponses().IsEmpty() && !r.GetResponses().IsEmpty() { - CreateChange(changes, PropertyAdded, v3.ResponsesLabel, - nil, r.GetResponses().ValueNode, BreakingAdded(CompOperation, PropResponses), nil, - r.GetResponses().Value) - } - if !l.GetResponses().IsEmpty() && r.GetResponses().IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.ResponsesLabel, - l.GetResponses().ValueNode, nil, BreakingRemoved(CompOperation, PropResponses), l.GetResponses().Value, - nil) - } -} - -// CompareOperations compares a left and right Swagger or OpenAPI Operation object. If changes are found, returns -// a pointer to an OperationChanges instance, or nil if nothing is found. -func CompareOperations(l, r any) *OperationChanges { - var changes []*Change - var props []*PropertyCheck - - oc := new(OperationChanges) - - // Swagger - if reflect.TypeOf(&v2.Operation{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v2.Operation{}) == reflect.TypeOf(r) { - - lOperation := l.(*v2.Operation) - rOperation := r.(*v2.Operation) - - // perform hash check to avoid further processing - if low.AreEqual(lOperation, rOperation) { - return nil - } - - props = append(props, addSharedOperationProperties(lOperation, rOperation, &changes)...) - - compareSharedOperationObjects(lOperation, rOperation, &changes, oc) - - // parameters - lParamsUntyped := lOperation.GetParameters() - rParamsUntyped := rOperation.GetParameters() - if !lParamsUntyped.IsEmpty() && !rParamsUntyped.IsEmpty() { - lParams := lParamsUntyped.Value.([]low.ValueReference[*v2.Parameter]) - rParams := rParamsUntyped.Value.([]low.ValueReference[*v2.Parameter]) - - lv := make(map[string]*v2.Parameter, len(lParams)) - rv := make(map[string]*v2.Parameter, len(rParams)) - lRefs := make(map[string]*low.ValueReference[*v2.Parameter], len(lParams)) - rRefs := make(map[string]*low.ValueReference[*v2.Parameter], len(rParams)) - - for i := range lParams { - s := lParams[i].Value.Name.Value - lv[s] = lParams[i].Value - lRefs[s] = &lParams[i] // Keep the reference wrapper - } - for i := range rParams { - s := rParams[i].Value.Name.Value - rv[s] = rParams[i].Value - rRefs[s] = &rParams[i] // Keep the reference wrapper - } - - var paramChanges []*ParameterChanges - for n := range lv { - if _, ok := rv[n]; ok { - if !low.AreEqual(lv[n], rv[n]) { - ch := CompareParameters(lv[n], rv[n]) - if ch != nil { - // Preserve reference information if this parameter is a $ref - PreserveParameterReference(lRefs, rRefs, n, ch) - paramChanges = append(paramChanges, ch) - } - } - continue - } - CreateChange(&changes, ObjectRemoved, v3.ParametersLabel, - lv[n].Name.ValueNode, nil, BreakingRemoved(CompOperation, PropParameters), lv[n], - nil) - - } - for n := range rv { - if _, ok := lv[n]; !ok { - CreateChange(&changes, ObjectAdded, v3.ParametersLabel, - nil, rv[n].Name.ValueNode, rv[n].Required.Value, nil, - rv[n]) - } - } - oc.ParameterChanges = paramChanges - } - if !lParamsUntyped.IsEmpty() && rParamsUntyped.IsEmpty() { - CreateChange(&changes, PropertyRemoved, v3.ParametersLabel, - lParamsUntyped.ValueNode, nil, true, lParamsUntyped.Value, - nil) - } - if lParamsUntyped.IsEmpty() && !rParamsUntyped.IsEmpty() { - rParams := rParamsUntyped.Value.([]low.ValueReference[*v2.Parameter]) - breaking := false - for i := range rParams { - if rParams[i].Value.Required.Value { - breaking = true - } - } - CreateChange(&changes, PropertyAdded, v3.ParametersLabel, - nil, rParamsUntyped.ValueNode, breaking, nil, - rParamsUntyped.Value) - } - - // security - if !lOperation.Security.IsEmpty() || !rOperation.Security.IsEmpty() { - checkSecurity(lOperation.Security, rOperation.Security, &changes, oc) - } - - // produces - if len(lOperation.Produces.Value) > 0 || len(rOperation.Produces.Value) > 0 { - ExtractStringValueSliceChanges(lOperation.Produces.Value, rOperation.Produces.Value, - &changes, v3.ProducesLabel, true) - } - - // consumes - if len(lOperation.Consumes.Value) > 0 || len(rOperation.Consumes.Value) > 0 { - ExtractStringValueSliceChanges(lOperation.Consumes.Value, rOperation.Consumes.Value, - &changes, v3.ConsumesLabel, true) - } - - // schemes - if len(lOperation.Schemes.Value) > 0 || len(rOperation.Schemes.Value) > 0 { - ExtractStringValueSliceChanges(lOperation.Schemes.Value, rOperation.Schemes.Value, - &changes, v3.SchemesLabel, true) - } - - oc.ExtensionChanges = CompareExtensions(lOperation.Extensions, rOperation.Extensions) - } - - // OpenAPI - if reflect.TypeOf(&v3.Operation{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v3.Operation{}) == reflect.TypeOf(r) { - - lOperation := l.(*v3.Operation) - rOperation := r.(*v3.Operation) - - // perform hash check to avoid further processing - if low.AreEqual(lOperation, rOperation) { - return nil - } - - props = append(props, addSharedOperationProperties(lOperation, rOperation, &changes)...) - compareSharedOperationObjects(lOperation, rOperation, &changes, oc) - - // parameters - lParamsUntyped := lOperation.GetParameters() - rParamsUntyped := rOperation.GetParameters() - if !lParamsUntyped.IsEmpty() && !rParamsUntyped.IsEmpty() { - lParams := lParamsUntyped.Value.([]low.ValueReference[*v3.Parameter]) - rParams := rParamsUntyped.Value.([]low.ValueReference[*v3.Parameter]) - - lv := make(map[string]*v3.Parameter, len(lParams)) - rv := make(map[string]*v3.Parameter, len(rParams)) - lRefs := make(map[string]*low.ValueReference[*v3.Parameter], len(lParams)) - rRefs := make(map[string]*low.ValueReference[*v3.Parameter], len(rParams)) - - for i := range lParams { - s := lParams[i].Value.Name.Value - lv[s] = lParams[i].Value - lRefs[s] = &lParams[i] // Keep the reference wrapper - } - for i := range rParams { - s := rParams[i].Value.Name.Value - rv[s] = rParams[i].Value - rRefs[s] = &rParams[i] // Keep the reference wrapper - } - - var paramChanges []*ParameterChanges - for n := range lv { - if _, ok := rv[n]; ok { - if !low.AreEqual(lv[n], rv[n]) { - ch := CompareParameters(lv[n], rv[n]) - if ch != nil { - // Preserve reference information if this parameter is a $ref - PreserveParameterReference(lRefs, rRefs, n, ch) - paramChanges = append(paramChanges, ch) - } - } - continue - } - CreateChange(&changes, ObjectRemoved, v3.ParametersLabel, - lv[n].Name.ValueNode, nil, BreakingRemoved(CompOperation, PropParameters), lv[n], - nil) - - } - for n := range rv { - if _, ok := lv[n]; !ok { - // Check configurable breaking rules first - breaking := BreakingAdded(CompOperation, PropParameters) - // If config doesn't say breaking, fall back to semantic check (required parameter) - if !breaking { - breaking = rv[n].Required.Value - } - CreateChange(&changes, ObjectAdded, v3.ParametersLabel, - nil, rv[n].Name.ValueNode, breaking, nil, - rv[n]) - } - } - oc.ParameterChanges = paramChanges - } - if !lParamsUntyped.IsEmpty() && rParamsUntyped.IsEmpty() { - CreateChange(&changes, PropertyRemoved, v3.ParametersLabel, - lParamsUntyped.ValueNode, nil, BreakingRemoved(CompOperation, PropParameters), lParamsUntyped.Value, - nil) - } - if lParamsUntyped.IsEmpty() && !rParamsUntyped.IsEmpty() { - rParams := rParamsUntyped.Value.([]low.ValueReference[*v3.Parameter]) - // Check configurable breaking rules first - breaking := BreakingAdded(CompOperation, PropParameters) - // If config doesn't say breaking, fall back to semantic check (required parameter) - if !breaking { - for i := range rParams { - if rParams[i].Value.Required.Value { - breaking = true - break - } - } - } - CreateChange(&changes, PropertyAdded, v3.ParametersLabel, - nil, rParamsUntyped.ValueNode, breaking, nil, - rParamsUntyped.Value) - } - - // security - if !lOperation.Security.IsEmpty() || !rOperation.Security.IsEmpty() { - checkSecurity(lOperation.Security, rOperation.Security, &changes, oc) - } - - // request body - if !lOperation.RequestBody.IsEmpty() && !rOperation.RequestBody.IsEmpty() { - if !low.AreEqual(lOperation.RequestBody.Value, rOperation.RequestBody.Value) { - oc.RequestBodyChanges = CompareRequestBodies(lOperation.RequestBody.Value, rOperation.RequestBody.Value) - } - } - if !lOperation.RequestBody.IsEmpty() && rOperation.RequestBody.IsEmpty() { - CreateChange(&changes, PropertyRemoved, v3.RequestBodyLabel, - lOperation.RequestBody.ValueNode, nil, BreakingRemoved(CompOperation, PropRequestBody), lOperation.RequestBody.Value, - nil) - } - if lOperation.RequestBody.IsEmpty() && !rOperation.RequestBody.IsEmpty() { - CreateChange(&changes, PropertyAdded, v3.RequestBodyLabel, - nil, rOperation.RequestBody.ValueNode, BreakingAdded(CompOperation, PropRequestBody), nil, - rOperation.RequestBody.Value) - } - - // callbacks - use CheckMapForChangesWithNilSupport to properly populate CallbackChanges - // for added/removed callbacks, enabling proper tree hierarchy rendering - oc.CallbackChanges = CheckMapForChangesWithNilSupport(lOperation.Callbacks.Value, rOperation.Callbacks.Value, - &changes, v3.CallbacksLabel, CompareCallback) - - // servers - oc.ServerChanges = checkServers(lOperation.Servers, rOperation.Servers, CompOperation, PropServers) - oc.ExtensionChanges = CompareExtensions(lOperation.Extensions, rOperation.Extensions) - - } - CheckProperties(props) - oc.PropertyChanges = NewPropertyChanges(changes) - return oc -} - -// check servers property -// component and property are used for breaking rules lookup (e.g., CompOperation/PropServers or CompServers/"") -func checkServers(lServers, rServers low.NodeReference[[]low.ValueReference[*v3.Server]], component, property string) []*ServerChanges { - var serverChanges []*ServerChanges - - if !lServers.IsEmpty() && !rServers.IsEmpty() { - - lv := make(map[string]low.ValueReference[*v3.Server], len(lServers.Value)) - rv := make(map[string]low.ValueReference[*v3.Server], len(rServers.Value)) - - for i := range lServers.Value { - var s string - if !lServers.Value[i].Value.URL.IsEmpty() { - s = lServers.Value[i].Value.URL.Value - } else { - s = low.GenerateHashString(lServers.Value[i].Value) - } - lv[s] = lServers.Value[i] - } - for i := range rServers.Value { - var s string - if !rServers.Value[i].Value.URL.IsEmpty() { - s = rServers.Value[i].Value.URL.Value - } else { - s = low.GenerateHashString(rServers.Value[i].Value) - } - rv[s] = rServers.Value[i] - } - - for k := range lv { - - var changes []*Change - - if _, ok := rv[k]; ok { - if !low.AreEqual(lv[k].Value, rv[k].Value) { - serverChanges = append(serverChanges, CompareServers(lv[k].Value, rv[k].Value)) - } - continue - } - lv[k].ValueNode.Value = lv[k].Value.URL.Value - CreateChange(&changes, ObjectRemoved, v3.ServersLabel, - lv[k].ValueNode, nil, BreakingRemoved(component, property), lv[k].Value, - nil) - sc := new(ServerChanges) - sc.PropertyChanges = NewPropertyChanges(changes) - serverChanges = append(serverChanges, sc) - - } - - for k := range rv { - if _, ok := lv[k]; !ok { - - var changes []*Change - rv[k].ValueNode.Value = rv[k].Value.URL.Value - CreateChange(&changes, ObjectAdded, v3.ServersLabel, - nil, rv[k].ValueNode, BreakingAdded(component, property), nil, - rv[k].Value) - - sc := new(ServerChanges) - sc.PropertyChanges = NewPropertyChanges(changes) - serverChanges = append(serverChanges, sc) - } - } - } - var changes []*Change - sc := new(ServerChanges) - if !lServers.IsEmpty() && rServers.IsEmpty() { - CreateChange(&changes, PropertyRemoved, v3.ServersLabel, - lServers.ValueNode, nil, BreakingRemoved(component, property), lServers.Value, - nil) - } - if lServers.IsEmpty() && !rServers.IsEmpty() { - CreateChange(&changes, PropertyAdded, v3.ServersLabel, - nil, rServers.ValueNode, BreakingAdded(component, property), nil, - rServers.Value) - } - sc.PropertyChanges = NewPropertyChanges(changes) - if len(changes) > 0 { - serverChanges = append(serverChanges, sc) - } - if len(serverChanges) <= 0 { - return nil - } - return serverChanges -} - -// check security property. -func checkSecurity(lSecurity, rSecurity low.NodeReference[[]low.ValueReference[*base.SecurityRequirement]], - changes *[]*Change, oc any, -) { - lv := make(map[string]*base.SecurityRequirement, len(lSecurity.Value)) - rv := make(map[string]*base.SecurityRequirement, len(rSecurity.Value)) - lvn := make(map[string]*yaml.Node, len(lSecurity.Value)) - rvn := make(map[string]*yaml.Node, len(rSecurity.Value)) - - for i := range lSecurity.Value { - keys := lSecurity.Value[i].Value.GetKeys() - sort.Strings(keys) - s := strings.Join(keys, "|") - lv[s] = lSecurity.Value[i].Value - lvn[s] = lSecurity.Value[i].ValueNode - - } - for i := range rSecurity.Value { - keys := rSecurity.Value[i].Value.GetKeys() - sort.Strings(keys) - s := strings.Join(keys, "|") - rv[s] = rSecurity.Value[i].Value - rvn[s] = rSecurity.Value[i].ValueNode - } - - // Determine breaking rules based on type (zero allocations using type switch) - var addedBreaking, removedBreaking bool - switch oc.(type) { - case *DocumentChanges: - addedBreaking = BreakingAdded(CompSecurity, "") - removedBreaking = BreakingRemoved(CompSecurity, "") - case *OperationChanges: - addedBreaking = BreakingAdded(CompOperation, PropSecurity) - removedBreaking = BreakingRemoved(CompOperation, PropSecurity) - } - - var secChanges []*SecurityRequirementChanges - for n := range lv { - if _, ok := rv[n]; ok { - if !low.AreEqual(lv[n], rv[n]) { - ch := CompareSecurityRequirement(lv[n], rv[n]) - if ch != nil { - secChanges = append(secChanges, ch) - } - } - continue - } - // Whole security requirement was removed - create SecurityRequirementChanges - // so it appears under "Security Requirements" section - schemeNames := strings.Join(lv[n].GetKeys(), ", ") - - var reqChanges []*Change - CreateChange(&reqChanges, ObjectRemoved, schemeNames, - lvn[n], nil, removedBreaking, lv[n], nil) - secChanges = append(secChanges, &SecurityRequirementChanges{ - PropertyChanges: NewPropertyChanges(reqChanges), - }) - } - for n := range rv { - if _, ok := lv[n]; !ok { - // Whole security requirement was added - create SecurityRequirementChanges - // so it appears under "Security Requirements" section - schemeNames := strings.Join(rv[n].GetKeys(), ", ") - - var reqChanges []*Change - CreateChange(&reqChanges, ObjectAdded, schemeNames, - nil, rvn[n], addedBreaking, nil, rv[n]) - secChanges = append(secChanges, &SecurityRequirementChanges{ - PropertyChanges: NewPropertyChanges(reqChanges), - }) - } - } - - // Assign to correct type using type switch (zero allocations) - switch v := oc.(type) { - case *OperationChanges: - v.SecurityRequirementChanges = secChanges - case *DocumentChanges: - v.SecurityRequirementChanges = secChanges - } -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/parameter.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/parameter.go deleted file mode 100644 index a533bfe383a..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/parameter.go +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "reflect" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - v2 "github.com/pb33f/libopenapi/datamodel/low/v2" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// ParameterChanges represents changes found between Swagger or OpenAPI Parameter objects. -type ParameterChanges struct { - *PropertyChanges - Name string `json:"name,omitempty" yaml:"name,omitempty"` - SchemaChanges *SchemaChanges `json:"schemas,omitempty" yaml:"schemas,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` - - // Swagger supports Items. - ItemsChanges *ItemsChanges `json:"items,omitempty" yaml:"items,omitempty"` - - // OpenAPI supports examples and content types. - ExamplesChanges map[string]*ExampleChanges `json:"examples,omitempty" yaml:"examples,omitempty"` - ContentChanges map[string]*MediaTypeChanges `json:"content,omitempty" yaml:"content,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Parameter objects -func (p *ParameterChanges) GetAllChanges() []*Change { - if p == nil { - return nil - } - var changes []*Change - changes = append(changes, p.Changes...) - if p.SchemaChanges != nil { - changes = append(changes, p.SchemaChanges.GetAllChanges()...) - } - for i := range p.ExamplesChanges { - changes = append(changes, p.ExamplesChanges[i].GetAllChanges()...) - } - if p.ItemsChanges != nil { - changes = append(changes, p.ItemsChanges.GetAllChanges()...) - } - if p.ExtensionChanges != nil { - changes = append(changes, p.ExtensionChanges.GetAllChanges()...) - } - for i := range p.ContentChanges { - changes = append(changes, p.ContentChanges[i].GetAllChanges()...) - } - return changes -} - -// TotalChanges returns a count of everything that changed -func (p *ParameterChanges) TotalChanges() int { - if p == nil { - return 0 - } - c := p.PropertyChanges.TotalChanges() - if p.SchemaChanges != nil { - c += p.SchemaChanges.TotalChanges() - } - for i := range p.ExamplesChanges { - c += p.ExamplesChanges[i].TotalChanges() - } - if p.ItemsChanges != nil { - c += p.ItemsChanges.TotalChanges() - } - if p.ExtensionChanges != nil { - c += p.ExtensionChanges.TotalChanges() - } - for i := range p.ContentChanges { - c += p.ContentChanges[i].TotalChanges() - } - return c -} - -// TotalBreakingChanges always returns 0 for ExternalDoc objects, they are non-binding. -func (p *ParameterChanges) TotalBreakingChanges() int { - c := p.PropertyChanges.TotalBreakingChanges() - if p.SchemaChanges != nil { - c += p.SchemaChanges.TotalBreakingChanges() - } - if p.ItemsChanges != nil { - c += p.ItemsChanges.TotalBreakingChanges() - } - for i := range p.ContentChanges { - c += p.ContentChanges[i].TotalBreakingChanges() - } - return c -} - -func addPropertyCheck(props *[]*PropertyCheck, - lvn, rvn *yaml.Node, lv, rv any, changes *[]*Change, label string, breaking bool, - component, property string, -) { - *props = append(*props, &PropertyCheck{ - LeftNode: lvn, - RightNode: rvn, - Label: label, - Changes: changes, - Breaking: breaking, - Component: component, - Property: property, - Original: lv, - New: rv, - }) -} - -func addOpenAPIParameterProperties(left, right low.OpenAPIParameter, changes *[]*Change) []*PropertyCheck { - var props []*PropertyCheck - - // style - addPropertyCheck(&props, left.GetStyle().ValueNode, right.GetStyle().ValueNode, - left.GetStyle(), right.GetStyle(), changes, v3.StyleLabel, - BreakingModified(CompParameter, PropStyle), CompParameter, PropStyle) - - // allow reserved - addPropertyCheck(&props, left.GetAllowReserved().ValueNode, right.GetAllowReserved().ValueNode, - left.GetAllowReserved(), right.GetAllowReserved(), changes, v3.AllowReservedLabel, - BreakingModified(CompParameter, PropAllowReserved), CompParameter, PropAllowReserved) - - // explode - addPropertyCheck(&props, left.GetExplode().ValueNode, right.GetExplode().ValueNode, - left.GetExplode(), right.GetExplode(), changes, v3.ExplodeLabel, - BreakingModified(CompParameter, PropExplode), CompParameter, PropExplode) - - // deprecated - addPropertyCheck(&props, left.GetDeprecated().ValueNode, right.GetDeprecated().ValueNode, - left.GetDeprecated(), right.GetDeprecated(), changes, v3.DeprecatedLabel, - BreakingModified(CompParameter, PropDeprecated), CompParameter, PropDeprecated) - - // example - addPropertyCheck(&props, left.GetExample().ValueNode, right.GetExample().ValueNode, - left.GetExample(), right.GetExample(), changes, v3.ExampleLabel, - BreakingModified(CompParameter, PropExample), CompParameter, PropExample) - - return props -} - -func addSwaggerParameterProperties(left, right low.SwaggerParameter, changes *[]*Change) []*PropertyCheck { - var props []*PropertyCheck - - // type - addPropertyCheck(&props, left.GetType().ValueNode, right.GetType().ValueNode, - left.GetType(), right.GetType(), changes, v3.TypeLabel, true, CompParameter, PropType) - - // format - addPropertyCheck(&props, left.GetFormat().ValueNode, right.GetFormat().ValueNode, - left.GetFormat(), right.GetFormat(), changes, v3.FormatLabel, true, CompParameter, PropFormat) - - // collection format - addPropertyCheck(&props, left.GetCollectionFormat().ValueNode, right.GetCollectionFormat().ValueNode, - left.GetCollectionFormat(), right.GetCollectionFormat(), changes, v3.CollectionFormatLabel, true, CompParameter, PropCollectionFormat) - - // maximum - addPropertyCheck(&props, left.GetMaximum().ValueNode, right.GetMaximum().ValueNode, - left.GetMaximum(), right.GetMaximum(), changes, v3.MaximumLabel, true, CompParameter, PropMaximum) - - // minimum - addPropertyCheck(&props, left.GetMinimum().ValueNode, right.GetMinimum().ValueNode, - left.GetMinimum(), right.GetMinimum(), changes, v3.MinimumLabel, true, CompParameter, PropMinimum) - - // exclusive maximum - addPropertyCheck(&props, left.GetExclusiveMaximum().ValueNode, right.GetExclusiveMaximum().ValueNode, - left.GetExclusiveMaximum(), right.GetExclusiveMaximum(), changes, v3.ExclusiveMaximumLabel, true, CompParameter, PropExclusiveMaximum) - - // exclusive minimum - addPropertyCheck(&props, left.GetExclusiveMinimum().ValueNode, right.GetExclusiveMinimum().ValueNode, - left.GetExclusiveMinimum(), right.GetExclusiveMinimum(), changes, v3.ExclusiveMinimumLabel, true, CompParameter, PropExclusiveMinimum) - - // max length - addPropertyCheck(&props, left.GetMaxLength().ValueNode, right.GetMaxLength().ValueNode, - left.GetMaxLength(), right.GetMaxLength(), changes, v3.MaxLengthLabel, true, CompParameter, PropMaxLength) - - // min length - addPropertyCheck(&props, left.GetMinLength().ValueNode, right.GetMinLength().ValueNode, - left.GetMinLength(), right.GetMinLength(), changes, v3.MinLengthLabel, true, CompParameter, PropMinLength) - - // pattern - addPropertyCheck(&props, left.GetPattern().ValueNode, right.GetPattern().ValueNode, - left.GetPattern(), right.GetPattern(), changes, v3.PatternLabel, true, CompParameter, PropPattern) - - // max items - addPropertyCheck(&props, left.GetMaxItems().ValueNode, right.GetMaxItems().ValueNode, - left.GetMaxItems(), right.GetMaxItems(), changes, v3.MaxItemsLabel, true, CompParameter, PropMaxItems) - - // min items - addPropertyCheck(&props, left.GetMinItems().ValueNode, right.GetMinItems().ValueNode, - left.GetMinItems(), right.GetMinItems(), changes, v3.MinItemsLabel, true, CompParameter, PropMinItems) - - // unique items - addPropertyCheck(&props, left.GetUniqueItems().ValueNode, right.GetUniqueItems().ValueNode, - left.GetUniqueItems(), right.GetUniqueItems(), changes, v3.UniqueItemsLabel, true, CompParameter, PropUniqueItems) - - // default - addPropertyCheck(&props, left.GetDefault().ValueNode, right.GetDefault().ValueNode, - left.GetDefault(), right.GetDefault(), changes, v3.DefaultLabel, true, CompParameter, PropDefault) - - // multiple of - addPropertyCheck(&props, left.GetMultipleOf().ValueNode, right.GetMultipleOf().ValueNode, - left.GetMultipleOf(), right.GetMultipleOf(), changes, v3.MultipleOfLabel, true, CompParameter, PropMultipleOf) - - return props -} - -func addCommonParameterProperties(left, right low.SharedParameters, changes *[]*Change) []*PropertyCheck { - var props []*PropertyCheck - - addPropertyCheck(&props, left.GetName().ValueNode, right.GetName().ValueNode, - left.GetName(), right.GetName(), changes, v3.NameLabel, - BreakingModified(CompParameter, PropName), CompParameter, PropName) - - // in - addPropertyCheck(&props, left.GetIn().ValueNode, right.GetIn().ValueNode, - left.GetIn(), right.GetIn(), changes, v3.InLabel, - BreakingModified(CompParameter, PropIn), CompParameter, PropIn) - - // description - addPropertyCheck(&props, left.GetDescription().ValueNode, right.GetDescription().ValueNode, - left.GetDescription(), right.GetDescription(), changes, v3.DescriptionLabel, - BreakingModified(CompParameter, PropDescription), CompParameter, PropDescription) - - // required - addPropertyCheck(&props, left.GetRequired().ValueNode, right.GetRequired().ValueNode, - left.GetRequired(), right.GetRequired(), changes, v3.RequiredLabel, - BreakingModified(CompParameter, PropRequired), CompParameter, PropRequired) - - // allow empty value - addPropertyCheck(&props, left.GetAllowEmptyValue().ValueNode, right.GetAllowEmptyValue().ValueNode, - left.GetAllowEmptyValue(), right.GetAllowEmptyValue(), changes, v3.AllowEmptyValueLabel, - BreakingModified(CompParameter, PropAllowEmptyValue), CompParameter, PropAllowEmptyValue) - - return props -} - -// CompareParametersV3 is an OpenAPI type safe proxy for CompareParameters -func CompareParametersV3(l, r *v3.Parameter) *ParameterChanges { - return CompareParameters(l, r) -} - -// CompareParameters compares a left and right Swagger or OpenAPI Parameter object for any changes. If found returns -// a pointer to ParameterChanges. If nothing is found, returns nil. -func CompareParameters(l, r any) *ParameterChanges { - var changes []*Change - var props []*PropertyCheck - - pc := new(ParameterChanges) - var lSchema *base.SchemaProxy - var rSchema *base.SchemaProxy - var lext, rext *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - - if reflect.TypeOf(&v2.Parameter{}) == reflect.TypeOf(l) && reflect.TypeOf(&v2.Parameter{}) == reflect.TypeOf(r) { - lParam := l.(*v2.Parameter) - rParam := r.(*v2.Parameter) - pc.Name = lParam.Name.Value - - // perform hash check to avoid further processing - if low.AreEqual(lParam, rParam) { - return nil - } - - props = append(props, addSwaggerParameterProperties(lParam, rParam, &changes)...) - props = append(props, addCommonParameterProperties(lParam, rParam, &changes)...) - - // extract schema - if lParam != nil { - lSchema = lParam.Schema.Value - lext = lParam.Extensions - } - if rParam != nil { - rext = rParam.Extensions - rSchema = rParam.Schema.Value - } - - // items - if !lParam.Items.IsEmpty() && !rParam.Items.IsEmpty() { - if lParam.Items.Value.Hash() != rParam.Items.Value.Hash() { - pc.ItemsChanges = CompareItems(lParam.Items.Value, rParam.Items.Value) - } - } - if lParam.Items.IsEmpty() && !rParam.Items.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.ItemsLabel, - nil, rParam.Items.ValueNode, BreakingAdded(CompParameter, PropItems), nil, - rParam.Items.Value) - } - if !lParam.Items.IsEmpty() && rParam.Items.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.ItemsLabel, - lParam.Items.ValueNode, nil, BreakingRemoved(CompParameter, PropItems), lParam.Items.Value, - nil) - } - - // enum - if len(lParam.Enum.Value) > 0 || len(rParam.Enum.Value) > 0 { - ExtractRawValueSliceChanges(lParam.Enum.Value, rParam.Enum.Value, &changes, v3.EnumLabel, true) - } - } - - // OpenAPI - if reflect.TypeOf(&v3.Parameter{}) == reflect.TypeOf(l) && reflect.TypeOf(&v3.Parameter{}) == reflect.TypeOf(r) { - - lParam := l.(*v3.Parameter) - rParam := r.(*v3.Parameter) - pc.Name = lParam.Name.Value - - // perform hash check to avoid further processing - if low.AreEqual(lParam, rParam) { - return nil - } - - props = append(props, addOpenAPIParameterProperties(lParam, rParam, &changes)...) - props = append(props, addCommonParameterProperties(lParam, rParam, &changes)...) - if lParam != nil { - lext = lParam.Extensions - lSchema = lParam.Schema.Value - } - if rParam != nil { - rext = rParam.Extensions - rSchema = rParam.Schema.Value - } - - // example - checkParameterExample(lParam.Example, rParam.Example, changes) - - // examples - pc.ExamplesChanges = CheckMapForChanges(lParam.Examples.Value, rParam.Examples.Value, - &changes, v3.ExamplesLabel, CompareExamples) - - // content - pc.ContentChanges = CheckMapForChanges(lParam.Content.Value, rParam.Content.Value, - &changes, v3.ContentLabel, CompareMediaTypes) - } - CheckProperties(props) - - if lSchema != nil && rSchema != nil { - pc.SchemaChanges = CompareSchemas(lSchema, rSchema) - } - if lSchema != nil && rSchema == nil { - CreateChange(&changes, ObjectRemoved, v3.SchemaLabel, - lSchema.GetValueNode(), nil, BreakingRemoved(CompParameter, PropSchema), lSchema, - nil) - } - - if lSchema == nil && rSchema != nil { - CreateChange(&changes, ObjectAdded, v3.SchemaLabel, - nil, rSchema.GetValueNode(), BreakingAdded(CompParameter, PropSchema), nil, - rSchema) - } - - pc.PropertyChanges = NewPropertyChanges(changes) - pc.ExtensionChanges = CompareExtensions(lext, rext) - return pc -} - -func checkParameterExample(expLeft, expRight low.NodeReference[*yaml.Node], changes []*Change) { - CheckPropertyAdditionOrRemovalWithEncoding(expLeft.ValueNode, expRight.ValueNode, - v3.ExampleLabel, &changes, - BreakingAdded(CompParameter, PropExample) || BreakingRemoved(CompParameter, PropExample), - expLeft.Value, expRight.Value) - CheckForModificationWithEncoding(expLeft.ValueNode, expRight.ValueNode, - v3.ExampleLabel, &changes, BreakingModified(CompParameter, PropExample), - expLeft.Value, expRight.Value) -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/path_item.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/path_item.go deleted file mode 100644 index 803be6f6c7e..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/path_item.go +++ /dev/null @@ -1,746 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "reflect" - - "github.com/pb33f/libopenapi/datamodel/low" - v2 "github.com/pb33f/libopenapi/datamodel/low/v2" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// PathItemChanges represents changes found between to Swagger or OpenAPI PathItem object. -type PathItemChanges struct { - *PropertyChanges - GetChanges *OperationChanges `json:"get,omitempty" yaml:"get,omitempty"` - PutChanges *OperationChanges `json:"put,omitempty" yaml:"put,omitempty"` - PostChanges *OperationChanges `json:"post,omitempty" yaml:"post,omitempty"` - DeleteChanges *OperationChanges `json:"delete,omitempty" yaml:"delete,omitempty"` - OptionsChanges *OperationChanges `json:"options,omitempty" yaml:"options,omitempty"` - HeadChanges *OperationChanges `json:"head,omitempty" yaml:"head,omitempty"` - PatchChanges *OperationChanges `json:"patch,omitempty" yaml:"patch,omitempty"` - TraceChanges *OperationChanges `json:"trace,omitempty" yaml:"trace,omitempty"` - QueryChanges *OperationChanges `json:"query,omitempty" yaml:"query,omitempty"` - AdditionalOperationChanges map[string]*OperationChanges `json:"additionalOperations,omitempty" yaml:"additionalOperations,omitempty"` // OpenAPI 3.2+ additional operations - ServerChanges []*ServerChanges `json:"servers,omitempty" yaml:"servers,omitempty"` - ParameterChanges []*ParameterChanges `json:"parameters,omitempty" yaml:"parameters,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between PathItem objects -func (p *PathItemChanges) GetAllChanges() []*Change { - if p == nil { - return nil - } - var changes []*Change - changes = append(changes, p.Changes...) - if p.GetChanges != nil { - changes = append(changes, p.GetChanges.GetAllChanges()...) - } - if p.PutChanges != nil { - changes = append(changes, p.PutChanges.GetAllChanges()...) - } - if p.PostChanges != nil { - changes = append(changes, p.PostChanges.GetAllChanges()...) - } - if p.DeleteChanges != nil { - changes = append(changes, p.DeleteChanges.GetAllChanges()...) - } - if p.OptionsChanges != nil { - changes = append(changes, p.OptionsChanges.GetAllChanges()...) - } - if p.HeadChanges != nil { - changes = append(changes, p.HeadChanges.GetAllChanges()...) - } - if p.PatchChanges != nil { - changes = append(changes, p.PatchChanges.GetAllChanges()...) - } - if p.TraceChanges != nil { - changes = append(changes, p.TraceChanges.GetAllChanges()...) - } - if p.QueryChanges != nil { - changes = append(changes, p.QueryChanges.GetAllChanges()...) - } - for k := range p.AdditionalOperationChanges { - changes = append(changes, p.AdditionalOperationChanges[k].GetAllChanges()...) - } - for i := range p.ServerChanges { - changes = append(changes, p.ServerChanges[i].GetAllChanges()...) - } - for i := range p.ParameterChanges { - changes = append(changes, p.ParameterChanges[i].GetAllChanges()...) - } - if p.ExtensionChanges != nil { - changes = append(changes, p.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total number of changes found between two Swagger or OpenAPI PathItems -func (p *PathItemChanges) TotalChanges() int { - if p == nil { - return 0 - } - c := p.PropertyChanges.TotalChanges() - if p.GetChanges != nil { - c += p.GetChanges.TotalChanges() - } - if p.PutChanges != nil { - c += p.PutChanges.TotalChanges() - } - if p.PostChanges != nil { - c += p.PostChanges.TotalChanges() - } - if p.DeleteChanges != nil { - c += p.DeleteChanges.TotalChanges() - } - if p.OptionsChanges != nil { - c += p.OptionsChanges.TotalChanges() - } - if p.HeadChanges != nil { - c += p.HeadChanges.TotalChanges() - } - if p.PatchChanges != nil { - c += p.PatchChanges.TotalChanges() - } - if p.TraceChanges != nil { - c += p.TraceChanges.TotalChanges() - } - if p.QueryChanges != nil { - c += p.QueryChanges.TotalChanges() - } - for k := range p.AdditionalOperationChanges { - c += p.AdditionalOperationChanges[k].TotalChanges() - } - for i := range p.ServerChanges { - c += p.ServerChanges[i].TotalChanges() - } - for i := range p.ParameterChanges { - c += p.ParameterChanges[i].TotalChanges() - } - if p.ExtensionChanges != nil { - c += p.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of breaking changes found between two Swagger or OpenAPI PathItems -func (p *PathItemChanges) TotalBreakingChanges() int { - c := p.PropertyChanges.TotalBreakingChanges() - if p.GetChanges != nil { - c += p.GetChanges.TotalBreakingChanges() - } - if p.PutChanges != nil { - c += p.PutChanges.TotalBreakingChanges() - } - if p.PostChanges != nil { - c += p.PostChanges.TotalBreakingChanges() - } - if p.DeleteChanges != nil { - c += p.DeleteChanges.TotalBreakingChanges() - } - if p.OptionsChanges != nil { - c += p.OptionsChanges.TotalBreakingChanges() - } - if p.HeadChanges != nil { - c += p.HeadChanges.TotalBreakingChanges() - } - if p.PatchChanges != nil { - c += p.PatchChanges.TotalBreakingChanges() - } - if p.TraceChanges != nil { - c += p.TraceChanges.TotalBreakingChanges() - } - if p.QueryChanges != nil { - c += p.QueryChanges.TotalBreakingChanges() - } - for k := range p.AdditionalOperationChanges { - c += p.AdditionalOperationChanges[k].TotalBreakingChanges() - } - for i := range p.ServerChanges { - c += p.ServerChanges[i].TotalBreakingChanges() - } - for i := range p.ParameterChanges { - c += p.ParameterChanges[i].TotalBreakingChanges() - } - return c -} - -type opCheck struct { - label string - changes *OperationChanges -} - -// ComparePathItemsV3 is an OpenAPI typesafe proxy method for ComparePathItems -func ComparePathItemsV3(l, r *v3.PathItem) *PathItemChanges { - return ComparePathItems(l, r) -} - -// ComparePathItems compare a left and right Swagger or OpenAPI PathItem object for changes. If found, returns -// a pointer to PathItemChanges, or returns nil if nothing is found. -func ComparePathItems(l, r any) *PathItemChanges { - var changes []*Change - var props []*PropertyCheck - - pc := new(PathItemChanges) - - // Swagger - if reflect.TypeOf(&v2.PathItem{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v2.PathItem{}) == reflect.TypeOf(r) { - - lPath := l.(*v2.PathItem) - rPath := r.(*v2.PathItem) - - // perform hash check to avoid further processing - if low.AreEqual(lPath, rPath) { - return nil - } - - props = append(props, compareSwaggerPathItem(lPath, rPath, &changes, pc)...) - } - - // OpenAPI - if reflect.TypeOf(&v3.PathItem{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v3.PathItem{}) == reflect.TypeOf(r) { - - lPath := l.(*v3.PathItem) - rPath := r.(*v3.PathItem) - - // perform hash check to avoid further processing - if low.AreEqual(lPath, rPath) { - return nil - } - - // description - props = append(props, NewPropertyCheck(CompPathItem, PropDescription, - lPath.Description.ValueNode, rPath.Description.ValueNode, - v3.DescriptionLabel, &changes, lPath, rPath)) - - // summary - props = append(props, NewPropertyCheck(CompPathItem, PropSummary, - lPath.Summary.ValueNode, rPath.Summary.ValueNode, - v3.SummaryLabel, &changes, lPath, rPath)) - - compareOpenAPIPathItem(lPath, rPath, &changes, pc) - } - - CheckProperties(props) - pc.PropertyChanges = NewPropertyChanges(changes) - return pc -} - -func compareSwaggerPathItem(lPath, rPath *v2.PathItem, changes *[]*Change, pc *PathItemChanges) []*PropertyCheck { - var props []*PropertyCheck - - totalOps := 0 - opChan := make(chan opCheck) - // get - if !lPath.Get.IsEmpty() && !rPath.Get.IsEmpty() { - totalOps++ - go checkOperation(lPath.Get.Value, rPath.Get.Value, opChan, v3.GetLabel) - } - if !lPath.Get.IsEmpty() && rPath.Get.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.GetLabel, - lPath.Get.ValueNode, nil, BreakingRemoved(CompPathItem, PropGet), lPath.Get.Value, nil) - } - if lPath.Get.IsEmpty() && !rPath.Get.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.GetLabel, - nil, rPath.Get.ValueNode, BreakingAdded(CompPathItem, PropGet), nil, rPath.Get.Value) - } - - // put - if !lPath.Put.IsEmpty() && !rPath.Put.IsEmpty() { - totalOps++ - go checkOperation(lPath.Put.Value, rPath.Put.Value, opChan, v3.PutLabel) - } - if !lPath.Put.IsEmpty() && rPath.Put.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.PutLabel, - lPath.Put.ValueNode, nil, BreakingRemoved(CompPathItem, PropPut), lPath.Put.Value, nil) - } - if lPath.Put.IsEmpty() && !rPath.Put.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.PutLabel, - nil, rPath.Put.ValueNode, BreakingAdded(CompPathItem, PropPut), nil, lPath.Put.Value) - } - - // post - if !lPath.Post.IsEmpty() && !rPath.Post.IsEmpty() { - totalOps++ - go checkOperation(lPath.Post.Value, rPath.Post.Value, opChan, v3.PostLabel) - } - if !lPath.Post.IsEmpty() && rPath.Post.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.PostLabel, - lPath.Post.ValueNode, nil, BreakingRemoved(CompPathItem, PropPost), lPath.Post.Value, nil) - } - if lPath.Post.IsEmpty() && !rPath.Post.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.PostLabel, - nil, rPath.Post.ValueNode, BreakingAdded(CompPathItem, PropPost), nil, lPath.Post.Value) - } - - // delete - if !lPath.Delete.IsEmpty() && !rPath.Delete.IsEmpty() { - totalOps++ - go checkOperation(lPath.Delete.Value, rPath.Delete.Value, opChan, v3.DeleteLabel) - } - if !lPath.Delete.IsEmpty() && rPath.Delete.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.DeleteLabel, - lPath.Delete.ValueNode, nil, BreakingRemoved(CompPathItem, PropDelete), lPath.Delete.Value, nil) - } - if lPath.Delete.IsEmpty() && !rPath.Delete.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.DeleteLabel, - nil, rPath.Delete.ValueNode, BreakingAdded(CompPathItem, PropDelete), nil, lPath.Delete.Value) - } - - // options - if !lPath.Options.IsEmpty() && !rPath.Options.IsEmpty() { - totalOps++ - go checkOperation(lPath.Options.Value, rPath.Options.Value, opChan, v3.OptionsLabel) - } - if !lPath.Options.IsEmpty() && rPath.Options.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.OptionsLabel, - lPath.Options.ValueNode, nil, BreakingRemoved(CompPathItem, PropOptions), lPath.Options.Value, nil) - } - if lPath.Options.IsEmpty() && !rPath.Options.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.OptionsLabel, - nil, rPath.Options.ValueNode, BreakingAdded(CompPathItem, PropOptions), nil, lPath.Options.Value) - } - - // head - if !lPath.Head.IsEmpty() && !rPath.Head.IsEmpty() { - totalOps++ - go checkOperation(lPath.Head.Value, rPath.Head.Value, opChan, v3.HeadLabel) - } - if !lPath.Head.IsEmpty() && rPath.Head.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.HeadLabel, - lPath.Head.ValueNode, nil, BreakingRemoved(CompPathItem, PropHead), lPath.Head.Value, nil) - } - if lPath.Head.IsEmpty() && !rPath.Head.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.HeadLabel, - nil, rPath.Head.ValueNode, BreakingAdded(CompPathItem, PropHead), nil, lPath.Head.Value) - } - - // patch - if !lPath.Patch.IsEmpty() && !rPath.Patch.IsEmpty() { - totalOps++ - go checkOperation(lPath.Patch.Value, rPath.Patch.Value, opChan, v3.PatchLabel) - } - if !lPath.Patch.IsEmpty() && rPath.Patch.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.PatchLabel, - lPath.Patch.ValueNode, nil, BreakingRemoved(CompPathItem, PropPatch), lPath.Patch.Value, nil) - } - if lPath.Patch.IsEmpty() && !rPath.Patch.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.PatchLabel, - nil, rPath.Patch.ValueNode, BreakingAdded(CompPathItem, PropPatch), nil, lPath.Patch.Value) - } - - // parameters - if !lPath.Parameters.IsEmpty() && !rPath.Parameters.IsEmpty() { - lParams := lPath.Parameters.Value - rParams := rPath.Parameters.Value - lp, rp := extractV2ParametersIntoInterface(lParams, rParams) - checkParameters(lp, rp, changes, pc) - } - if !lPath.Parameters.IsEmpty() && rPath.Parameters.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.ParametersLabel, - lPath.Parameters.ValueNode, nil, BreakingRemoved(CompPathItem, PropParameters), lPath.Parameters.Value, - nil) - } - if lPath.Parameters.IsEmpty() && !rPath.Parameters.IsEmpty() { - // Check configurable breaking rules first - breaking := BreakingAdded(CompPathItem, PropParameters) - // If config says not breaking, fall back to semantic check (required params are breaking) - if !breaking { - for i := range rPath.Parameters.Value { - param := rPath.Parameters.Value[i].Value - if param.Required.Value { - breaking = true - break - } - } - } - CreateChange(changes, PropertyAdded, v3.ParametersLabel, - nil, rPath.Parameters.ValueNode, breaking, nil, - rPath.Parameters.Value) - } - - // collect up operations changes. - completedOperations := 0 - for completedOperations < totalOps { - n := <-opChan - switch n.label { - case v3.GetLabel: - pc.GetChanges = n.changes - case v3.PutLabel: - pc.PutChanges = n.changes - case v3.PostLabel: - pc.PostChanges = n.changes - case v3.DeleteLabel: - pc.DeleteChanges = n.changes - case v3.OptionsLabel: - pc.OptionsChanges = n.changes - case v2.HeadLabel: - pc.HeadChanges = n.changes - case v2.PatchLabel: - pc.PatchChanges = n.changes - } - completedOperations++ - - } - pc.ExtensionChanges = CompareExtensions(lPath.Extensions, rPath.Extensions) - return props -} - -func extractV2ParametersIntoInterface(l, r []low.ValueReference[*v2.Parameter]) ([]low.ValueReference[low.SharedParameters], - []low.ValueReference[low.SharedParameters], -) { - lp := make([]low.ValueReference[low.SharedParameters], len(l)) - rp := make([]low.ValueReference[low.SharedParameters], len(r)) - for i := range l { - lp[i] = low.ValueReference[low.SharedParameters]{ - Value: l[i].Value, - ValueNode: l[i].ValueNode, - } - } - for i := range r { - rp[i] = low.ValueReference[low.SharedParameters]{ - Value: r[i].Value, - ValueNode: r[i].ValueNode, - } - } - return lp, rp -} - -func extractV3ParametersIntoInterface(l, r []low.ValueReference[*v3.Parameter]) ([]low.ValueReference[low.SharedParameters], - []low.ValueReference[low.SharedParameters], -) { - lp := make([]low.ValueReference[low.SharedParameters], len(l)) - rp := make([]low.ValueReference[low.SharedParameters], len(r)) - for i := range l { - lp[i] = low.ValueReference[low.SharedParameters]{ - Value: l[i].Value, - ValueNode: l[i].ValueNode, - } - } - for i := range r { - rp[i] = low.ValueReference[low.SharedParameters]{ - Value: r[i].Value, - ValueNode: r[i].ValueNode, - } - } - return lp, rp -} - -func checkParameters(lParams, rParams []low.ValueReference[low.SharedParameters], changes *[]*Change, pc *PathItemChanges) { - lv := make(map[string]low.SharedParameters, len(lParams)) - rv := make(map[string]low.SharedParameters, len(rParams)) - lRefs := make(map[string]*low.ValueReference[low.SharedParameters], len(lParams)) - rRefs := make(map[string]*low.ValueReference[low.SharedParameters], len(rParams)) - - for i := range lParams { - s := lParams[i].Value.GetName().Value - lv[s] = lParams[i].Value - lRefs[s] = &lParams[i] // Keep the reference wrapper - } - for i := range rParams { - s := rParams[i].Value.GetName().Value - rv[s] = rParams[i].Value - rRefs[s] = &rParams[i] // Keep the reference wrapper - } - - var paramChanges []*ParameterChanges - for n := range lv { - if _, ok := rv[n]; ok { - if !low.AreEqual(lv[n], rv[n]) { - ch := CompareParameters(lv[n], rv[n]) - if ch != nil { - // Preserve reference information if this parameter is a $ref - PreserveParameterReference(lRefs, rRefs, n, ch) - paramChanges = append(paramChanges, ch) - } - } - continue - } - CreateChange(changes, ObjectRemoved, v3.ParametersLabel, - lv[n].GetName().ValueNode, nil, true, lv[n], - nil) - - } - for n := range rv { - if _, ok := lv[n]; !ok { - CreateChange(changes, ObjectAdded, v3.ParametersLabel, - nil, rv[n].GetName().ValueNode, rv[n].GetRequired().Value, nil, - rv[n]) - } - } - pc.ParameterChanges = paramChanges -} - -func compareOpenAPIPathItem(lPath, rPath *v3.PathItem, changes *[]*Change, pc *PathItemChanges) { - // var props []*PropertyCheck - - totalOps := 0 - opChan := make(chan opCheck) - - // get - if !lPath.Get.IsEmpty() && !rPath.Get.IsEmpty() { - totalOps++ - go checkOperation(lPath.Get.Value, rPath.Get.Value, opChan, v3.GetLabel) - } - if !lPath.Get.IsEmpty() && rPath.Get.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.GetLabel, - lPath.Get.ValueNode, nil, BreakingRemoved(CompPathItem, PropGet), lPath.Get.Value, nil) - } - if lPath.Get.IsEmpty() && !rPath.Get.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.GetLabel, - nil, rPath.Get.ValueNode, BreakingAdded(CompPathItem, PropGet), nil, lPath.Get.Value) - } - - // put - if !lPath.Put.IsEmpty() && !rPath.Put.IsEmpty() { - totalOps++ - go checkOperation(lPath.Put.Value, rPath.Put.Value, opChan, v3.PutLabel) - } - if !lPath.Put.IsEmpty() && rPath.Put.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.PutLabel, - lPath.Put.ValueNode, nil, BreakingRemoved(CompPathItem, PropPut), lPath.Put.Value, nil) - } - if lPath.Put.IsEmpty() && !rPath.Put.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.PutLabel, - nil, rPath.Put.ValueNode, BreakingAdded(CompPathItem, PropPut), nil, lPath.Put.Value) - } - - // post - if !lPath.Post.IsEmpty() && !rPath.Post.IsEmpty() { - totalOps++ - go checkOperation(lPath.Post.Value, rPath.Post.Value, opChan, v3.PostLabel) - } - if !lPath.Post.IsEmpty() && rPath.Post.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.PostLabel, - lPath.Post.ValueNode, nil, BreakingRemoved(CompPathItem, PropPost), lPath.Post.Value, nil) - } - if lPath.Post.IsEmpty() && !rPath.Post.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.PostLabel, - nil, rPath.Post.ValueNode, BreakingAdded(CompPathItem, PropPost), nil, lPath.Post.Value) - } - - // delete - if !lPath.Delete.IsEmpty() && !rPath.Delete.IsEmpty() { - totalOps++ - go checkOperation(lPath.Delete.Value, rPath.Delete.Value, opChan, v3.DeleteLabel) - } - if !lPath.Delete.IsEmpty() && rPath.Delete.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.DeleteLabel, - lPath.Delete.ValueNode, nil, BreakingRemoved(CompPathItem, PropDelete), lPath.Delete.Value, nil) - } - if lPath.Delete.IsEmpty() && !rPath.Delete.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.DeleteLabel, - nil, rPath.Delete.ValueNode, BreakingAdded(CompPathItem, PropDelete), nil, lPath.Delete.Value) - } - - // options - if !lPath.Options.IsEmpty() && !rPath.Options.IsEmpty() { - totalOps++ - go checkOperation(lPath.Options.Value, rPath.Options.Value, opChan, v3.OptionsLabel) - } - if !lPath.Options.IsEmpty() && rPath.Options.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.OptionsLabel, - lPath.Options.ValueNode, nil, BreakingRemoved(CompPathItem, PropOptions), lPath.Options.Value, nil) - } - if lPath.Options.IsEmpty() && !rPath.Options.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.OptionsLabel, - nil, rPath.Options.ValueNode, BreakingAdded(CompPathItem, PropOptions), nil, lPath.Options.Value) - } - - // head - if !lPath.Head.IsEmpty() && !rPath.Head.IsEmpty() { - totalOps++ - go checkOperation(lPath.Head.Value, rPath.Head.Value, opChan, v3.HeadLabel) - } - if !lPath.Head.IsEmpty() && rPath.Head.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.HeadLabel, - lPath.Head.ValueNode, nil, BreakingRemoved(CompPathItem, PropHead), lPath.Head.Value, nil) - } - if lPath.Head.IsEmpty() && !rPath.Head.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.HeadLabel, - nil, rPath.Head.ValueNode, BreakingAdded(CompPathItem, PropHead), nil, lPath.Head.Value) - } - - // patch - if !lPath.Patch.IsEmpty() && !rPath.Patch.IsEmpty() { - totalOps++ - go checkOperation(lPath.Patch.Value, rPath.Patch.Value, opChan, v3.PatchLabel) - } - if !lPath.Patch.IsEmpty() && rPath.Patch.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.PatchLabel, - lPath.Patch.ValueNode, nil, BreakingRemoved(CompPathItem, PropPatch), lPath.Patch.Value, nil) - } - if lPath.Patch.IsEmpty() && !rPath.Patch.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.PatchLabel, - nil, rPath.Patch.ValueNode, BreakingAdded(CompPathItem, PropPatch), nil, lPath.Patch.Value) - } - - // trace - if !lPath.Trace.IsEmpty() && !rPath.Trace.IsEmpty() { - totalOps++ - go checkOperation(lPath.Trace.Value, rPath.Trace.Value, opChan, v3.TraceLabel) - } - if !lPath.Trace.IsEmpty() && rPath.Trace.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.TraceLabel, - lPath.Trace.ValueNode, nil, BreakingRemoved(CompPathItem, PropTrace), lPath.Trace.Value, nil) - } - if lPath.Trace.IsEmpty() && !rPath.Trace.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.TraceLabel, - nil, rPath.Trace.ValueNode, BreakingAdded(CompPathItem, PropTrace), nil, lPath.Trace.Value) - } - - // query - if !lPath.Query.IsEmpty() && !rPath.Query.IsEmpty() { - totalOps++ - go checkOperation(lPath.Query.Value, rPath.Query.Value, opChan, v3.QueryLabel) - } - if !lPath.Query.IsEmpty() && rPath.Query.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.QueryLabel, - lPath.Query.ValueNode, nil, BreakingRemoved(CompPathItem, PropQuery), lPath.Query.Value, nil) - } - if lPath.Query.IsEmpty() && !rPath.Query.IsEmpty() { - CreateChange(changes, PropertyAdded, v3.QueryLabel, - nil, rPath.Query.ValueNode, BreakingAdded(CompPathItem, PropQuery), nil, rPath.Query.Value) - } - - // additionalOperations (OpenAPI 3.2+) - if lPath.AdditionalOperations.Value != nil && rPath.AdditionalOperations.Value == nil { - CreateChange(changes, PropertyRemoved, v3.AdditionalOperationsLabel, - lPath.AdditionalOperations.ValueNode, nil, BreakingRemoved(CompPathItem, PropAdditionalOperations), lPath.AdditionalOperations.Value, nil) - } - if lPath.AdditionalOperations.Value == nil && rPath.AdditionalOperations.Value != nil { - CreateChange(changes, PropertyAdded, v3.AdditionalOperationsLabel, - nil, rPath.AdditionalOperations.ValueNode, BreakingAdded(CompPathItem, PropAdditionalOperations), nil, rPath.AdditionalOperations.Value) - } - if lPath.AdditionalOperations.Value != nil && rPath.AdditionalOperations.Value != nil { - - lKeys := make([]low.KeyReference[string], 0, lPath.AdditionalOperations.Value.Len()) - for lk := range lPath.AdditionalOperations.Value.FromOldest() { - lKeys = append(lKeys, lk) - } - rKeys := make([]low.KeyReference[string], 0, rPath.AdditionalOperations.Value.Len()) - for rk := range rPath.AdditionalOperations.Value.FromOldest() { - rKeys = append(rKeys, rk) - } - - for i := range lKeys { - // check right keys for match - found := false - for j := range rKeys { - if lKeys[i].Value == rKeys[j].Value { - found = true - // compare the two operations - totalOps++ - go checkOperation(lPath.AdditionalOperations.Value.GetOrZero(lKeys[j]).Value, - rPath.AdditionalOperations.Value.GetOrZero(rKeys[j]).Value, opChan, lKeys[i].Value) - break - } - } - // not found, was removed - if !found { - CreateChange(changes, PropertyRemoved, v3.AdditionalOperationsLabel, - lPath.AdditionalOperations.Value.GetOrZero(lKeys[i]).ValueNode, nil, BreakingRemoved(CompPathItem, PropAdditionalOperations), - lPath.AdditionalOperations.Value.GetOrZero(lKeys[i]).Value, nil) - } - } - - // check for added operations - for i := range rKeys { - // check left keys for match - found := false - for j := range lKeys { - if rKeys[i].Value == lKeys[j].Value { - found = true - break - } - } - // not found, was added - if !found { - CreateChange(changes, PropertyAdded, v3.AdditionalOperationsLabel, - nil, rPath.AdditionalOperations.Value.GetOrZero(rKeys[i]).ValueNode, BreakingAdded(CompPathItem, PropAdditionalOperations), - nil, rPath.AdditionalOperations.Value.GetOrZero(rKeys[i]).Value) - } - } - } - - // servers - pc.ServerChanges = checkServers(lPath.Servers, rPath.Servers, CompPathItem, PropServers) - - // parameters - if !lPath.Parameters.IsEmpty() && !rPath.Parameters.IsEmpty() { - lParams := lPath.Parameters.Value - rParams := rPath.Parameters.Value - lp, rp := extractV3ParametersIntoInterface(lParams, rParams) - checkParameters(lp, rp, changes, pc) - } - - if !lPath.Parameters.IsEmpty() && rPath.Parameters.IsEmpty() { - CreateChange(changes, PropertyRemoved, v3.ParametersLabel, - lPath.Parameters.ValueNode, nil, BreakingRemoved(CompPathItem, PropParameters), lPath.Parameters.Value, - nil) - } - if lPath.Parameters.IsEmpty() && !rPath.Parameters.IsEmpty() { - // Check configurable breaking rules first - breaking := BreakingAdded(CompPathItem, PropParameters) - // If config says not breaking, fall back to semantic check (required params are breaking) - if !breaking { - for i := range rPath.Parameters.Value { - param := rPath.Parameters.Value[i].Value - if param.Required.Value { - breaking = true - break - } - } - } - CreateChange(changes, PropertyAdded, v3.ParametersLabel, - nil, rPath.Parameters.ValueNode, breaking, nil, - rPath.Parameters.Value) - } - - // collect up operations changes. - completedOperations := 0 - for completedOperations < totalOps { - n := <-opChan - switch n.label { - case v3.GetLabel: - pc.GetChanges = n.changes - case v3.PutLabel: - pc.PutChanges = n.changes - case v3.PostLabel: - pc.PostChanges = n.changes - case v3.DeleteLabel: - pc.DeleteChanges = n.changes - case v3.OptionsLabel: - pc.OptionsChanges = n.changes - case v3.HeadLabel: - pc.HeadChanges = n.changes - case v3.PatchLabel: - pc.PatchChanges = n.changes - case v3.TraceLabel: - pc.TraceChanges = n.changes - case v3.QueryLabel: - pc.QueryChanges = n.changes - default: - if pc.AdditionalOperationChanges == nil { - pc.AdditionalOperationChanges = make(map[string]*OperationChanges) - } - if n.changes != nil { - pc.AdditionalOperationChanges[n.label] = n.changes - } - } - completedOperations++ - } - pc.ExtensionChanges = CompareExtensions(lPath.Extensions, rPath.Extensions) -} - -func checkOperation(l, r any, done chan opCheck, method string) { - done <- opCheck{ - label: method, - changes: CompareOperations(l, r), - } -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/paths.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/paths.go deleted file mode 100644 index 2b35d36e4c5..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/paths.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "reflect" - "sync" - - "github.com/pb33f/libopenapi/datamodel/low" - v2 "github.com/pb33f/libopenapi/datamodel/low/v2" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// PathsChanges represents changes found between two Swagger or OpenAPI Paths Objects. -type PathsChanges struct { - *PropertyChanges - PathItemsChanges map[string]*PathItemChanges `json:"pathItems,omitempty" yaml:"pathItems,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Paths objects -func (p *PathsChanges) GetAllChanges() []*Change { - if p == nil { - return nil - } - var changes []*Change - changes = append(changes, p.Changes...) - for k := range p.PathItemsChanges { - if p.PathItemsChanges[k] != nil { - changes = append(changes, p.PathItemsChanges[k].GetAllChanges()...) - } - } - if p.ExtensionChanges != nil { - changes = append(changes, p.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total number of changes between two Swagger or OpenAPI Paths Objects -func (p *PathsChanges) TotalChanges() int { - if p == nil { - return 0 - } - c := p.PropertyChanges.TotalChanges() - for k := range p.PathItemsChanges { - if p.PathItemsChanges[k] != nil { - c += p.PathItemsChanges[k].TotalChanges() - } - } - if p.ExtensionChanges != nil { - c += p.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns tht total number of changes found between two Swagger or OpenAPI Path Objects -func (p *PathsChanges) TotalBreakingChanges() int { - c := p.PropertyChanges.TotalBreakingChanges() - for k := range p.PathItemsChanges { - if p.PathItemsChanges[k] != nil { - c += p.PathItemsChanges[k].TotalBreakingChanges() - } - } - return c -} - -// ComparePaths compares a left and right Swagger or OpenAPI Paths Object for changes. If found, returns a pointer -// to a PathsChanges instance. Returns nil if nothing is found. -func ComparePaths(l, r any) *PathsChanges { - var changes []*Change - - pc := new(PathsChanges) - pathChanges := make(map[string]*PathItemChanges) - - // Swagger - if reflect.TypeOf(&v2.Paths{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v2.Paths{}) == reflect.TypeOf(r) { - - lPath := l.(*v2.Paths) - rPath := r.(*v2.Paths) - - // perform hash check to avoid further processing - if low.AreEqual(lPath, rPath) { - return nil - } - - lKeys := make(map[string]low.ValueReference[*v2.PathItem]) - rKeys := make(map[string]low.ValueReference[*v2.PathItem]) - for k, v := range lPath.PathItems.FromOldest() { - lKeys[k.Value] = v - } - for k, v := range rPath.PathItems.FromOldest() { - rKeys[k.Value] = v - } - - // run every comparison in a thread. - var mLock sync.Mutex - compare := func(path string, _ map[string]*PathItemChanges, l, r *v2.PathItem, doneChan chan struct{}) { - if !low.AreEqual(l, r) { - mLock.Lock() - pathChanges[path] = ComparePathItems(l, r) - mLock.Unlock() - } - doneChan <- struct{}{} - } - - doneChan := make(chan struct{}) - pathsChecked := 0 - - for k := range lKeys { - if _, ok := rKeys[k]; ok { - go compare(k, pathChanges, lKeys[k].Value, rKeys[k].Value, doneChan) - pathsChecked++ - continue - } - g, p := lPath.FindPathAndKey(k) - CreateChange(&changes, ObjectRemoved, k, - g.KeyNode, nil, BreakingRemoved(CompPaths, PropPath), - p.Value, nil) - } - - for k := range rKeys { - if _, ok := lKeys[k]; !ok { - g, p := rPath.FindPathAndKey(k) - CreateChange(&changes, ObjectAdded, k, - nil, g.KeyNode, BreakingAdded(CompPaths, PropPath), - nil, p.Value) - } - } - - // wait for the things to be done. - completedChecks := 0 - for completedChecks < pathsChecked { - <-doneChan - completedChecks++ - } - if len(pathChanges) > 0 { - pc.PathItemsChanges = pathChanges - } - - pc.ExtensionChanges = CompareExtensions(lPath.Extensions, rPath.Extensions) - } - - // OpenAPI - if reflect.TypeOf(&v3.Paths{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v3.Paths{}) == reflect.TypeOf(r) { - - lPath := l.(*v3.Paths) - rPath := r.(*v3.Paths) - - // perform hash check to avoid further processing - if low.AreEqual(lPath, rPath) { - return nil - } - - lKeys := make(map[string]low.ValueReference[*v3.PathItem]) - rKeys := make(map[string]low.ValueReference[*v3.PathItem]) - - if lPath != nil && lPath.PathItems != nil { - for k, v := range lPath.PathItems.FromOldest() { - lKeys[k.Value] = v - } - } - if rPath != nil && rPath.PathItems != nil { - for k, v := range rPath.PathItems.FromOldest() { - rKeys[k.Value] = v - } - } - - // run every comparison in a thread. - var mLock sync.Mutex - compare := func(path string, _ map[string]*PathItemChanges, l, r *v3.PathItem, doneChan chan struct{}) { - if !low.AreEqual(l, r) { - mLock.Lock() - pathChanges[path] = ComparePathItems(l, r) - mLock.Unlock() - } - doneChan <- struct{}{} - } - - doneChan := make(chan struct{}) - pathsChecked := 0 - - for k := range lKeys { - if _, ok := rKeys[k]; ok { - go compare(k, pathChanges, lKeys[k].Value, rKeys[k].Value, doneChan) - pathsChecked++ - continue - } - g, p := lPath.FindPathAndKey(k) - CreateChange(&changes, ObjectRemoved, k, - g.KeyNode, nil, BreakingRemoved(CompPaths, PropPath), - p.Value, nil) - } - - for k := range rKeys { - if _, ok := lKeys[k]; !ok { - g, p := rPath.FindPathAndKey(k) - CreateChange(&changes, ObjectAdded, k, - nil, g.KeyNode, BreakingAdded(CompPaths, PropPath), - nil, p.Value) - } - } - // wait for the things to be done. - completedChecks := 0 - for completedChecks < pathsChecked { - <-doneChan - completedChecks++ - } - if len(pathChanges) > 0 { - pc.PathItemsChanges = pathChanges - } - - var lExt, rExt *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - if lPath != nil { - lExt = lPath.Extensions - } - if rPath != nil { - rExt = rPath.Extensions - } - - pc.ExtensionChanges = CompareExtensions(lExt, rExt) - } - pc.PropertyChanges = NewPropertyChanges(changes) - return pc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/request_body.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/request_body.go deleted file mode 100644 index 95afa638454..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/request_body.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// RequestBodyChanges represents changes made between two OpenAPI RequestBody Objects -type RequestBodyChanges struct { - *PropertyChanges - ContentChanges map[string]*MediaTypeChanges `json:"content,omitempty" yaml:"content,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between RequestBody objects -func (rb *RequestBodyChanges) GetAllChanges() []*Change { - if rb == nil { - return nil - } - var changes []*Change - changes = append(changes, rb.Changes...) - for k := range rb.ContentChanges { - changes = append(changes, rb.ContentChanges[k].GetAllChanges()...) - } - if rb.ExtensionChanges != nil { - changes = append(changes, rb.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total number of changes found between two OpenAPI RequestBody objects -func (rb *RequestBodyChanges) TotalChanges() int { - if rb == nil { - return 0 - } - c := rb.PropertyChanges.TotalChanges() - for k := range rb.ContentChanges { - c += rb.ContentChanges[k].TotalChanges() - } - if rb.ExtensionChanges != nil { - c += rb.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of breaking changes found between OpenAPI RequestBody objects -func (rb *RequestBodyChanges) TotalBreakingChanges() int { - c := rb.PropertyChanges.TotalBreakingChanges() - for k := range rb.ContentChanges { - c += rb.ContentChanges[k].TotalBreakingChanges() - } - return c -} - -// CompareRequestBodies compares a left and right OpenAPI RequestBody object for changes. If found returns a pointer -// to a RequestBodyChanges instance. Returns nil if nothing was found. -func CompareRequestBodies(l, r *v3.RequestBody) *RequestBodyChanges { - if low.AreEqual(l, r) { - return nil - } - - var changes []*Change - props := make([]*PropertyCheck, 0, 2) - - props = append(props, - NewPropertyCheck(CompRequestBody, PropDescription, - l.Description.ValueNode, r.Description.ValueNode, - v3.DescriptionLabel, &changes, l, r), - NewPropertyCheck(CompRequestBody, PropRequired, - l.Required.ValueNode, r.Required.ValueNode, - v3.RequiredLabel, &changes, l, r), - ) - - CheckProperties(props) - - rbc := new(RequestBodyChanges) - rbc.ContentChanges = CheckMapForChanges(l.Content.Value, r.Content.Value, - &changes, v3.ContentLabel, CompareMediaTypes) - rbc.ExtensionChanges = CompareExtensions(l.Extensions, r.Extensions) - rbc.PropertyChanges = NewPropertyChanges(changes) - return rbc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/response.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/response.go deleted file mode 100644 index 69225839b62..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/response.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "reflect" - - "github.com/pb33f/libopenapi/datamodel/low" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -import ( - "github.com/pb33f/libopenapi/datamodel/low/v2" -) - -// ResponseChanges represents changes found between two Swagger or OpenAPI Response objects. -type ResponseChanges struct { - *PropertyChanges - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` - HeadersChanges map[string]*HeaderChanges `json:"headers,omitempty" yaml:"headers,omitempty"` - - // Swagger Response Properties. - SchemaChanges *SchemaChanges `json:"schemas,omitempty" yaml:"schemas,omitempty"` - ExamplesChanges *ExamplesChanges `json:"examples,omitempty" yaml:"examples,omitempty"` - - // OpenAPI Response Properties. - ContentChanges map[string]*MediaTypeChanges `json:"content,omitempty" yaml:"content,omitempty"` - LinkChanges map[string]*LinkChanges `json:"links,omitempty" yaml:"links,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between RequestBody objects -func (r *ResponseChanges) GetAllChanges() []*Change { - if r == nil { - return nil - } - var changes []*Change - changes = append(changes, r.Changes...) - if r.ExtensionChanges != nil { - changes = append(changes, r.ExtensionChanges.GetAllChanges()...) - } - if r.SchemaChanges != nil { - changes = append(changes, r.SchemaChanges.GetAllChanges()...) - } - if r.ExamplesChanges != nil { - changes = append(changes, r.ExamplesChanges.GetAllChanges()...) - } - for k := range r.HeadersChanges { - changes = append(changes, r.HeadersChanges[k].GetAllChanges()...) - } - for k := range r.ContentChanges { - changes = append(changes, r.ContentChanges[k].GetAllChanges()...) - } - for k := range r.LinkChanges { - changes = append(changes, r.LinkChanges[k].GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total number of changes found between two Swagger or OpenAPI Response Objects -func (r *ResponseChanges) TotalChanges() int { - if r == nil { - return 0 - } - c := r.PropertyChanges.TotalChanges() - if r.ExtensionChanges != nil { - c += r.ExtensionChanges.TotalChanges() - } - if r.SchemaChanges != nil { - c += r.SchemaChanges.TotalChanges() - } - if r.ExamplesChanges != nil { - c += r.ExamplesChanges.TotalChanges() - } - for k := range r.HeadersChanges { - c += r.HeadersChanges[k].TotalChanges() - } - for k := range r.ContentChanges { - c += r.ContentChanges[k].TotalChanges() - } - for k := range r.LinkChanges { - c += r.LinkChanges[k].TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of breaking changes found between two swagger or OpenAPI -// Response Objects -func (r *ResponseChanges) TotalBreakingChanges() int { - c := r.PropertyChanges.TotalBreakingChanges() - if r.SchemaChanges != nil { - c += r.SchemaChanges.TotalBreakingChanges() - } - for k := range r.HeadersChanges { - c += r.HeadersChanges[k].TotalBreakingChanges() - } - for k := range r.ContentChanges { - c += r.ContentChanges[k].TotalBreakingChanges() - } - for k := range r.LinkChanges { - c += r.LinkChanges[k].TotalBreakingChanges() - } - return c -} - -// CompareResponseV2 is a Swagger type safe proxy for CompareResponse -func CompareResponseV2(l, r *v2.Response) *ResponseChanges { - return CompareResponse(l, r) -} - -// CompareResponseV3 is an OpenAPI type safe proxy for CompareResponse -func CompareResponseV3(l, r *v3.Response) *ResponseChanges { - return CompareResponse(l, r) -} - -// CompareResponse compares a left and right Swagger or OpenAPI Response object. If anything is found -// a pointer to a ResponseChanges is returned, otherwise it returns nil. -func CompareResponse(l, r any) *ResponseChanges { - var changes []*Change - var props []*PropertyCheck - - rc := new(ResponseChanges) - - if reflect.TypeOf(&v2.Response{}) == reflect.TypeOf(l) && reflect.TypeOf(&v2.Response{}) == reflect.TypeOf(r) { - - lResponse := l.(*v2.Response) - rResponse := r.(*v2.Response) - - // perform hash check to avoid further processing - if low.AreEqual(lResponse, rResponse) { - return nil - } - - // description - addPropertyCheck(&props, lResponse.Description.ValueNode, rResponse.Description.ValueNode, - lResponse.Description.Value, rResponse.Description.Value, &changes, v3.DescriptionLabel, false, CompResponse, PropDescription) - - if !lResponse.Schema.IsEmpty() && !rResponse.Schema.IsEmpty() { - rc.SchemaChanges = CompareSchemas(lResponse.Schema.Value, rResponse.Schema.Value) - } - if !lResponse.Schema.IsEmpty() && rResponse.Schema.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.SchemaLabel, - lResponse.Schema.ValueNode, nil, BreakingRemoved(CompResponse, PropSchema), - lResponse.Schema.Value, nil) - } - if lResponse.Schema.IsEmpty() && !rResponse.Schema.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.SchemaLabel, - nil, rResponse.Schema.ValueNode, BreakingAdded(CompResponse, PropSchema), - nil, rResponse.Schema.Value) - } - - rc.HeadersChanges = CheckMapForChanges(lResponse.Headers.Value, rResponse.Headers.Value, - &changes, v3.HeadersLabel, CompareHeadersV2) - - if !lResponse.Examples.IsEmpty() && !rResponse.Examples.IsEmpty() { - rc.ExamplesChanges = CompareExamplesV2(lResponse.Examples.Value, rResponse.Examples.Value) - } - if !lResponse.Examples.IsEmpty() && rResponse.Examples.IsEmpty() { - CreateChange(&changes, PropertyRemoved, v3.ExamplesLabel, - lResponse.Schema.ValueNode, nil, BreakingRemoved(CompResponse, PropExamples), - lResponse.Schema.Value, nil) - } - if lResponse.Examples.IsEmpty() && !rResponse.Examples.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.ExamplesLabel, - nil, rResponse.Schema.ValueNode, BreakingAdded(CompResponse, PropExamples), - nil, lResponse.Schema.Value) - } - - rc.ExtensionChanges = CompareExtensions(lResponse.Extensions, rResponse.Extensions) - } - - if reflect.TypeOf(&v3.Response{}) == reflect.TypeOf(l) && reflect.TypeOf(&v3.Response{}) == reflect.TypeOf(r) { - - lResponse := l.(*v3.Response) - rResponse := r.(*v3.Response) - - // perform hash check to avoid further processing - if low.AreEqual(lResponse, rResponse) { - return nil - } - - // summary (OpenAPI 3.2+) - addPropertyCheck(&props, lResponse.Summary.ValueNode, rResponse.Summary.ValueNode, - lResponse.Summary.Value, rResponse.Summary.Value, &changes, v3.SummaryLabel, - BreakingModified(CompResponse, PropSummary), CompResponse, PropSummary) - - // description - addPropertyCheck(&props, lResponse.Description.ValueNode, rResponse.Description.ValueNode, - lResponse.Description.Value, rResponse.Description.Value, &changes, v3.DescriptionLabel, - BreakingModified(CompResponse, PropDescription), CompResponse, PropDescription) - - rc.HeadersChanges = CheckMapForChanges(lResponse.Headers.Value, rResponse.Headers.Value, - &changes, v3.HeadersLabel, CompareHeadersV3) - - rc.ContentChanges = CheckMapForChanges(lResponse.Content.Value, rResponse.Content.Value, - &changes, v3.ContentLabel, CompareMediaTypes) - - rc.LinkChanges = CheckMapForChanges(lResponse.Links.Value, rResponse.Links.Value, - &changes, v3.LinksLabel, CompareLinks) - - rc.ExtensionChanges = CompareExtensions(lResponse.Extensions, rResponse.Extensions) - } - - CheckProperties(props) - rc.PropertyChanges = NewPropertyChanges(changes) - return rc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/responses.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/responses.go deleted file mode 100644 index 7a6ecc7679b..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/responses.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "reflect" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// ResponsesChanges represents changes made between two Swagger or OpenAPI Responses objects. -type ResponsesChanges struct { - *PropertyChanges - ResponseChanges map[string]*ResponseChanges `json:"response,omitempty" yaml:"response,omitempty"` - DefaultChanges *ResponseChanges `json:"default,omitempty" yaml:"default,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Responses objects -func (r *ResponsesChanges) GetAllChanges() []*Change { - if r == nil { - return nil - } - var changes []*Change - changes = append(changes, r.Changes...) - for k := range r.ResponseChanges { - changes = append(changes, r.ResponseChanges[k].GetAllChanges()...) - } - if r.DefaultChanges != nil { - changes = append(changes, r.DefaultChanges.GetAllChanges()...) - } - if r.ExtensionChanges != nil { - changes = append(changes, r.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total number of changes found between two Swagger or OpenAPI Responses objects -func (r *ResponsesChanges) TotalChanges() int { - if r == nil { - return 0 - } - c := r.PropertyChanges.TotalChanges() - for k := range r.ResponseChanges { - c += r.ResponseChanges[k].TotalChanges() - } - if r.DefaultChanges != nil { - c += r.DefaultChanges.TotalChanges() - } - if r.ExtensionChanges != nil { - c += r.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of changes found between two Swagger or OpenAPI -// Responses Objects -func (r *ResponsesChanges) TotalBreakingChanges() int { - c := r.PropertyChanges.TotalBreakingChanges() - for k := range r.ResponseChanges { - c += r.ResponseChanges[k].TotalBreakingChanges() - } - if r.DefaultChanges != nil { - c += r.DefaultChanges.TotalBreakingChanges() - } - return c -} - -// CompareResponses compares a left and right Swagger or OpenAPI Responses object for any changes. If found -// returns a pointer to ResponsesChanges, or returns nil. -func CompareResponses(l, r any) *ResponsesChanges { - var changes []*Change - - rc := new(ResponsesChanges) - - // swagger - if reflect.TypeOf(&v2.Responses{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v2.Responses{}) == reflect.TypeOf(r) { - - lResponses := l.(*v2.Responses) - rResponses := r.(*v2.Responses) - - // perform hash check to avoid further processing - if low.AreEqual(lResponses, rResponses) { - return nil - } - - if !lResponses.Default.IsEmpty() && !rResponses.Default.IsEmpty() { - rc.DefaultChanges = CompareResponse(lResponses.Default.Value, rResponses.Default.Value) - } - if !lResponses.Default.IsEmpty() && rResponses.Default.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.DefaultLabel, - lResponses.Default.ValueNode, nil, BreakingRemoved(CompResponses, PropDefault), - lResponses.Default.Value, nil) - } - if lResponses.Default.IsEmpty() && !rResponses.Default.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.DefaultLabel, - nil, rResponses.Default.ValueNode, BreakingAdded(CompResponses, PropDefault), - nil, lResponses.Default.Value) - } - - rc.ResponseChanges = CheckMapForChangesWithRules(lResponses.Codes, rResponses.Codes, - &changes, v3.CodesLabel, CompareResponseV2, CompResponses, PropCodes) - - rc.ExtensionChanges = CompareExtensions(lResponses.Extensions, rResponses.Extensions) - } - - // openapi - if reflect.TypeOf(&v3.Responses{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v3.Responses{}) == reflect.TypeOf(r) { - - lResponses := l.(*v3.Responses) - rResponses := r.(*v3.Responses) - - // perform hash check to avoid further processing - if low.AreEqual(lResponses, rResponses) { - return nil - } - - if !lResponses.Default.IsEmpty() && !rResponses.Default.IsEmpty() { - rc.DefaultChanges = CompareResponse(lResponses.Default.Value, rResponses.Default.Value) - } - if !lResponses.Default.IsEmpty() && rResponses.Default.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.DefaultLabel, - lResponses.Default.ValueNode, nil, BreakingRemoved(CompResponses, PropDefault), - lResponses.Default.Value, nil) - } - if lResponses.Default.IsEmpty() && !rResponses.Default.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.DefaultLabel, - nil, rResponses.Default.ValueNode, BreakingAdded(CompResponses, PropDefault), - nil, lResponses.Default.Value) - } - - rc.ResponseChanges = CheckMapForChangesWithRules(lResponses.Codes, rResponses.Codes, - &changes, v3.CodesLabel, CompareResponseV3, CompResponses, PropCodes) - - rc.ExtensionChanges = CompareExtensions(lResponses.Extensions, rResponses.Extensions) - - } - - rc.PropertyChanges = NewPropertyChanges(changes) - return rc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/schema.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/schema.go deleted file mode 100644 index 96b07314718..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/schema.go +++ /dev/null @@ -1,2104 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "fmt" - "slices" - "sort" - "sync" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// SchemaChanges represent all changes to a base.Schema OpenAPI object. These changes are represented -// by all versions of OpenAPI. -// -// Any additions or removals to slice based results will be recorded in the PropertyChanges of the parent -// changes, and not the child for example, adding a new schema to `anyOf` will create a new change result in -// PropertyChanges.Changes, and not in the AnyOfChanges property. -type SchemaChanges struct { - *PropertyChanges - DiscriminatorChanges *DiscriminatorChanges `json:"discriminator,omitempty" yaml:"discriminator,omitempty"` - AllOfChanges []*SchemaChanges `json:"allOf,omitempty" yaml:"allOf,omitempty"` - AnyOfChanges []*SchemaChanges `json:"anyOf,omitempty" yaml:"anyOf,omitempty"` - OneOfChanges []*SchemaChanges `json:"oneOf,omitempty" yaml:"oneOf,omitempty"` - PrefixItemsChanges []*SchemaChanges `json:"prefixItems,omitempty" yaml:"prefixItems,omitempty"` - NotChanges *SchemaChanges `json:"not,omitempty" yaml:"not,omitempty"` - ItemsChanges *SchemaChanges `json:"items,omitempty" yaml:"items,omitempty"` - SchemaPropertyChanges map[string]*SchemaChanges `json:"properties,omitempty" yaml:"properties,omitempty"` - ExternalDocChanges *ExternalDocChanges `json:"externalDoc,omitempty" yaml:"externalDoc,omitempty"` - XMLChanges *XMLChanges `json:"xml,omitempty" yaml:"xml,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` - AdditionalPropertiesChanges *SchemaChanges `json:"additionalProperties,omitempty" yaml:"additionalProperties,omitempty"` - - // 3.1 specifics - IfChanges *SchemaChanges `json:"if,omitempty" yaml:"if,omitempty"` - ElseChanges *SchemaChanges `json:"else,omitempty" yaml:"else,omitempty"` - ThenChanges *SchemaChanges `json:"then,omitempty" yaml:"then,omitempty"` - PropertyNamesChanges *SchemaChanges `json:"propertyNames,omitempty" yaml:"propertyNames,omitempty"` - ContainsChanges *SchemaChanges `json:"contains,omitempty" yaml:"contains,omitempty"` - UnevaluatedItemsChanges *SchemaChanges `json:"unevaluatedItems,omitempty" yaml:"unevaluatedItems,omitempty"` - UnevaluatedPropertiesChanges *SchemaChanges `json:"unevaluatedProperties,omitempty" yaml:"unevaluatedProperties,omitempty"` - DependentSchemasChanges map[string]*SchemaChanges `json:"dependentSchemas,omitempty" yaml:"dependentSchemas,omitempty"` - DependentRequiredChanges []*Change `json:"dependentRequired,omitempty" yaml:"dependentRequired,omitempty"` - PatternPropertiesChanges map[string]*SchemaChanges `json:"patternProperties,omitempty" yaml:"patternProperties,omitempty"` - ContentSchemaChanges *SchemaChanges `json:"contentSchema,omitempty" yaml:"contentSchema,omitempty"` - VocabularyChanges []*Change `json:"$vocabulary,omitempty" yaml:"$vocabulary,omitempty"` -} - -func (s *SchemaChanges) GetPropertyChanges() []*Change { - if s == nil { - return nil - } - changes := s.Changes - if s.SchemaPropertyChanges != nil { - for n := range s.SchemaPropertyChanges { - if s.SchemaPropertyChanges[n] != nil { - changes = append(changes, s.SchemaPropertyChanges[n].GetAllChanges()...) - } - } - } - if s.DependentSchemasChanges != nil { - for n := range s.DependentSchemasChanges { - if s.DependentSchemasChanges[n] != nil { - changes = append(changes, s.DependentSchemasChanges[n].GetAllChanges()...) - } - } - } - if len(s.DependentRequiredChanges) > 0 { - changes = append(changes, s.DependentRequiredChanges...) - } - if s.PatternPropertiesChanges != nil { - for n := range s.PatternPropertiesChanges { - if s.PatternPropertiesChanges[n] != nil { - changes = append(changes, s.PatternPropertiesChanges[n].GetAllChanges()...) - } - } - } - if s.XMLChanges != nil { - changes = append(changes, s.XMLChanges.GetAllChanges()...) - } - return changes -} - -// GetAllChanges returns a slice of all changes made between Responses objects -func (s *SchemaChanges) GetAllChanges() []*Change { - if s == nil { - return nil - } - var changes []*Change - changes = append(changes, s.Changes...) - if s.DiscriminatorChanges != nil { - changes = append(changes, s.DiscriminatorChanges.GetAllChanges()...) - } - if len(s.AllOfChanges) > 0 { - for n := range s.AllOfChanges { - if s.AllOfChanges[n] != nil { - changes = append(changes, s.AllOfChanges[n].GetAllChanges()...) - } - } - } - if len(s.AnyOfChanges) > 0 { - for n := range s.AnyOfChanges { - if s.AnyOfChanges[n] != nil { - changes = append(changes, s.AnyOfChanges[n].GetAllChanges()...) - } - } - } - if len(s.OneOfChanges) > 0 { - for n := range s.OneOfChanges { - if s.OneOfChanges[n] != nil { - changes = append(changes, s.OneOfChanges[n].GetAllChanges()...) - } - } - } - if len(s.PrefixItemsChanges) > 0 { - for n := range s.PrefixItemsChanges { - if s.PrefixItemsChanges[n] != nil { - changes = append(changes, s.PrefixItemsChanges[n].GetAllChanges()...) - } - } - } - if s.NotChanges != nil { - changes = append(changes, s.NotChanges.GetAllChanges()...) - } - if s.ItemsChanges != nil { - changes = append(changes, s.ItemsChanges.GetAllChanges()...) - } - if s.IfChanges != nil { - changes = append(changes, s.IfChanges.GetAllChanges()...) - } - if s.ElseChanges != nil { - changes = append(changes, s.ElseChanges.GetAllChanges()...) - } - if s.ThenChanges != nil { - changes = append(changes, s.ThenChanges.GetAllChanges()...) - } - if s.PropertyNamesChanges != nil { - changes = append(changes, s.PropertyNamesChanges.GetAllChanges()...) - } - if s.ContainsChanges != nil { - changes = append(changes, s.ContainsChanges.GetAllChanges()...) - } - if s.UnevaluatedItemsChanges != nil { - changes = append(changes, s.UnevaluatedItemsChanges.GetAllChanges()...) - } - if s.UnevaluatedPropertiesChanges != nil { - changes = append(changes, s.UnevaluatedPropertiesChanges.GetAllChanges()...) - } - if s.AdditionalPropertiesChanges != nil { - changes = append(changes, s.AdditionalPropertiesChanges.GetAllChanges()...) - } - if s.SchemaPropertyChanges != nil { - for n := range s.SchemaPropertyChanges { - if s.SchemaPropertyChanges[n] != nil { - changes = append(changes, s.SchemaPropertyChanges[n].GetAllChanges()...) - } - } - } - if s.DependentSchemasChanges != nil { - for n := range s.DependentSchemasChanges { - if s.DependentSchemasChanges[n] != nil { - changes = append(changes, s.DependentSchemasChanges[n].GetAllChanges()...) - } - } - } - if len(s.DependentRequiredChanges) > 0 { - changes = append(changes, s.DependentRequiredChanges...) - } - if s.PatternPropertiesChanges != nil { - for n := range s.PatternPropertiesChanges { - if s.PatternPropertiesChanges[n] != nil { - changes = append(changes, s.PatternPropertiesChanges[n].GetAllChanges()...) - } - } - } - if s.ExternalDocChanges != nil { - changes = append(changes, s.ExternalDocChanges.GetAllChanges()...) - } - if s.XMLChanges != nil { - changes = append(changes, s.XMLChanges.GetAllChanges()...) - } - if s.ExtensionChanges != nil { - changes = append(changes, s.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns a count of the total number of changes made to this schema and all sub-schemas -func (s *SchemaChanges) TotalChanges() int { - if s == nil { - return 0 - } - t := s.PropertyChanges.TotalChanges() - if s.DiscriminatorChanges != nil { - t += s.DiscriminatorChanges.TotalChanges() - } - if len(s.AllOfChanges) > 0 { - for n := range s.AllOfChanges { - t += s.AllOfChanges[n].TotalChanges() - } - } - if len(s.AnyOfChanges) > 0 { - for n := range s.AnyOfChanges { - if s.AnyOfChanges[n] != nil { - t += s.AnyOfChanges[n].TotalChanges() - } - } - } - if len(s.OneOfChanges) > 0 { - for n := range s.OneOfChanges { - t += s.OneOfChanges[n].TotalChanges() - } - } - if len(s.PrefixItemsChanges) > 0 { - for n := range s.PrefixItemsChanges { - t += s.PrefixItemsChanges[n].TotalChanges() - } - } - - if s.NotChanges != nil { - t += s.NotChanges.TotalChanges() - } - if s.ItemsChanges != nil { - t += s.ItemsChanges.TotalChanges() - } - if s.IfChanges != nil { - t += s.IfChanges.TotalChanges() - } - if s.ElseChanges != nil { - t += s.ElseChanges.TotalChanges() - } - if s.ThenChanges != nil { - t += s.ThenChanges.TotalChanges() - } - if s.PropertyNamesChanges != nil { - t += s.PropertyNamesChanges.TotalChanges() - } - if s.ContainsChanges != nil { - t += s.ContainsChanges.TotalChanges() - } - if s.UnevaluatedItemsChanges != nil { - t += s.UnevaluatedItemsChanges.TotalChanges() - } - if s.UnevaluatedPropertiesChanges != nil { - t += s.UnevaluatedPropertiesChanges.TotalChanges() - } - if s.AdditionalPropertiesChanges != nil { - t += s.AdditionalPropertiesChanges.TotalChanges() - } - if s.SchemaPropertyChanges != nil { - for n := range s.SchemaPropertyChanges { - if s.SchemaPropertyChanges[n] != nil { - t += s.SchemaPropertyChanges[n].TotalChanges() - } - } - } - if s.DependentSchemasChanges != nil { - for n := range s.DependentSchemasChanges { - t += s.DependentSchemasChanges[n].TotalChanges() - } - } - if len(s.DependentRequiredChanges) > 0 { - t += len(s.DependentRequiredChanges) - } - if s.PatternPropertiesChanges != nil { - for n := range s.PatternPropertiesChanges { - t += s.PatternPropertiesChanges[n].TotalChanges() - } - } - if s.ContentSchemaChanges != nil { - t += s.ContentSchemaChanges.TotalChanges() - } - if len(s.VocabularyChanges) > 0 { - t += len(s.VocabularyChanges) - } - if s.ExternalDocChanges != nil { - t += s.ExternalDocChanges.TotalChanges() - } - if s.XMLChanges != nil { - t += s.XMLChanges.TotalChanges() - } - if s.ExtensionChanges != nil { - t += s.ExtensionChanges.TotalChanges() - } - return t -} - -// TotalBreakingChanges returns the total number of breaking changes made to this schema and all sub-schemas. -func (s *SchemaChanges) TotalBreakingChanges() int { - if s == nil { - return 0 - } - t := s.PropertyChanges.TotalBreakingChanges() - if s.DiscriminatorChanges != nil { - t += s.DiscriminatorChanges.TotalBreakingChanges() - } - if len(s.AllOfChanges) > 0 { - for n := range s.AllOfChanges { - t += s.AllOfChanges[n].TotalBreakingChanges() - } - } - if len(s.AllOfChanges) > 0 { - for n := range s.AllOfChanges { - t += s.AllOfChanges[n].TotalBreakingChanges() - } - } - if len(s.AnyOfChanges) > 0 { - for n := range s.AnyOfChanges { - t += s.AnyOfChanges[n].TotalBreakingChanges() - } - } - if len(s.OneOfChanges) > 0 { - for n := range s.OneOfChanges { - t += s.OneOfChanges[n].TotalBreakingChanges() - } - } - if len(s.PrefixItemsChanges) > 0 { - for n := range s.PrefixItemsChanges { - t += s.PrefixItemsChanges[n].TotalBreakingChanges() - } - } - if s.NotChanges != nil { - t += s.NotChanges.TotalBreakingChanges() - } - if s.ItemsChanges != nil { - t += s.ItemsChanges.TotalBreakingChanges() - } - if s.IfChanges != nil { - t += s.IfChanges.TotalBreakingChanges() - } - if s.ElseChanges != nil { - t += s.ElseChanges.TotalBreakingChanges() - } - if s.ThenChanges != nil { - t += s.ThenChanges.TotalBreakingChanges() - } - if s.PropertyNamesChanges != nil { - t += s.PropertyNamesChanges.TotalBreakingChanges() - } - if s.ContainsChanges != nil { - t += s.ContainsChanges.TotalBreakingChanges() - } - if s.UnevaluatedItemsChanges != nil { - t += s.UnevaluatedItemsChanges.TotalBreakingChanges() - } - if s.UnevaluatedPropertiesChanges != nil { - t += s.UnevaluatedPropertiesChanges.TotalBreakingChanges() - } - if s.AdditionalPropertiesChanges != nil { - t += s.AdditionalPropertiesChanges.TotalBreakingChanges() - } - if s.DependentSchemasChanges != nil { - for n := range s.DependentSchemasChanges { - t += s.DependentSchemasChanges[n].TotalBreakingChanges() - } - } - if len(s.DependentRequiredChanges) > 0 { - // Count breaking changes in dependent required changes - for _, change := range s.DependentRequiredChanges { - if change.Breaking { - t++ - } - } - } - if s.PatternPropertiesChanges != nil { - for n := range s.PatternPropertiesChanges { - t += s.PatternPropertiesChanges[n].TotalBreakingChanges() - } - } - if s.ContentSchemaChanges != nil { - t += s.ContentSchemaChanges.TotalBreakingChanges() - } - if len(s.VocabularyChanges) > 0 { - for _, change := range s.VocabularyChanges { - if change.Breaking { - t++ - } - } - } - if s.XMLChanges != nil { - t += s.XMLChanges.TotalBreakingChanges() - } - if s.SchemaPropertyChanges != nil { - for n := range s.SchemaPropertyChanges { - t += s.SchemaPropertyChanges[n].TotalBreakingChanges() - } - } - return t -} - -// CompareSchemas accepts a left and right SchemaProxy and checks for changes. If anything is found, returns -// a pointer to SchemaChanges, otherwise returns nil -func CompareSchemas(l, r *base.SchemaProxy) *SchemaChanges { - sc := new(SchemaChanges) - var changes []*Change - - // Added - if l == nil && r != nil { - CreateChange(&changes, ObjectAdded, v3.SchemaLabel, - nil, nil, BreakingAdded(CompSchemas, ""), nil, r) - sc.PropertyChanges = NewPropertyChanges(changes) - } - - // Removed - if l != nil && r == nil { - CreateChange(&changes, ObjectRemoved, v3.SchemaLabel, - nil, nil, BreakingRemoved(CompSchemas, ""), l, nil) - sc.PropertyChanges = NewPropertyChanges(changes) - } - - if l != nil && r != nil { - - // if left proxy is a reference and right is a reference (we won't recurse into circular references here) - if l.IsReference() && r.IsReference() { - - // points to the same schema - if l.GetReference() == r.GetReference() { - - // check if this is a circular ref. - if base.CheckSchemaProxyForCircularRefs(l) || base.CheckSchemaProxyForCircularRefs(r) { - // if we have a circular reference, we can't do any more work here. - return nil - } - - if r.GetIndex() != nil && r.GetIndex().GetSpecAbsolutePath() == "" || - r.GetIndex().GetSpecAbsolutePath() == "root.yaml" { - // local reference doesn't need following - return nil - } - - // continue on because the external references are the same and we need to check things going forward. - - } else { - // references are different, that's all we care to know. - CreateChange(&changes, Modified, v3.RefLabel, - l.GetValueNode().Content[1], r.GetValueNode().Content[1], BreakingModified(CompSchema, PropRef), l.GetReference(), - r.GetReference()) - sc.PropertyChanges = NewPropertyChanges(changes) - - // check if this is a circular ref. - if base.CheckSchemaProxyForCircularRefs(l) || base.CheckSchemaProxyForCircularRefs(r) { - // if we have a circular reference, we can't do any more work here. - return nil - } - return sc - } - } - - // changed from inline to ref - if !l.IsReference() && r.IsReference() { - // check if the referenced schema matches or not - // https://github.com/pb33f/libopenapi/issues/218 - lHash := l.Schema().Hash() - rHash := r.Schema().Hash() - if lHash != rHash { - CreateChange(&changes, Modified, v3.RefLabel, - l.GetValueNode(), r.GetValueNode().Content[1], BreakingModified(CompSchema, PropRef), l, r.GetReference()) - sc.PropertyChanges = NewPropertyChanges(changes) - - // check if this is a circular ref. - if base.CheckSchemaProxyForCircularRefs(r) { - // if we have a circular reference, we can't do any more work here. - return nil - } - return sc - } - } - - // changed from ref to inline - if l.IsReference() && !r.IsReference() { - // check if the referenced schema matches or not - // https://github.com/pb33f/libopenapi/issues/218 - lHash := l.Schema().Hash() - rHash := r.Schema().Hash() - if lHash != rHash { - CreateChange(&changes, Modified, v3.RefLabel, - l.GetValueNode().Content[1], r.GetValueNode(), BreakingModified(CompSchema, PropRef), l.GetReference(), r) - sc.PropertyChanges = NewPropertyChanges(changes) - - // check if this is a circular ref. - if base.CheckSchemaProxyForCircularRefs(l) { - // if we have a circular reference, we can't do any more work here. - return nil - } - return sc - } - } - - lSchema := l.Schema() - rSchema := r.Schema() - - if low.AreEqual(lSchema, rSchema) { - // there is no point going on, we know nothing changed! - return nil - } - - // check XML - checkSchemaXML(lSchema, rSchema, &changes, sc) - - // check examples - checkExamples(lSchema, rSchema, &changes) - - // check schema core properties for changes. - checkSchemaPropertyChanges(lSchema, rSchema, l, r, &changes, sc) - - // now for the confusing part, there is also a schema's 'properties' property to parse. - // inception, eat your heart out. - var lProperties, rProperties, lDepSchemas, rDepSchemas, lPattProp, rPattProp *orderedmap.Map[low.KeyReference[string], low.ValueReference[*base.SchemaProxy]] - var loneOf, lallOf, lanyOf, roneOf, rallOf, ranyOf, lprefix, rprefix []low.ValueReference[*base.SchemaProxy] - if lSchema != nil { - lProperties = lSchema.Properties.Value - lDepSchemas = lSchema.DependentSchemas.Value - lPattProp = lSchema.PatternProperties.Value - loneOf = lSchema.OneOf.Value - lallOf = lSchema.AllOf.Value - lanyOf = lSchema.AnyOf.Value - lprefix = lSchema.PrefixItems.Value - } - if rSchema != nil { - rProperties = rSchema.Properties.Value - rDepSchemas = rSchema.DependentSchemas.Value - rPattProp = rSchema.PatternProperties.Value - roneOf = rSchema.OneOf.Value - rallOf = rSchema.AllOf.Value - ranyOf = rSchema.AnyOf.Value - rprefix = rSchema.PrefixItems.Value - } - - props := checkMappedSchemaOfASchema(lProperties, rProperties, &changes) - sc.SchemaPropertyChanges = props - - deps := checkMappedSchemaOfASchema(lDepSchemas, rDepSchemas, &changes) - sc.DependentSchemasChanges = deps - - // Check dependent required changes - var lDepRequired, rDepRequired *orderedmap.Map[low.KeyReference[string], low.ValueReference[[]string]] - if lSchema != nil { - lDepRequired = lSchema.DependentRequired.Value - } - if rSchema != nil { - rDepRequired = rSchema.DependentRequired.Value - } - - depRequiredChanges := checkDependentRequiredChanges(lDepRequired, rDepRequired) - if len(depRequiredChanges) > 0 { - sc.DependentRequiredChanges = depRequiredChanges - } - - patterns := checkMappedSchemaOfASchema(lPattProp, rPattProp, &changes) - sc.PatternPropertiesChanges = patterns - - var wg sync.WaitGroup - wg.Add(4) - go func() { - extractSchemaChanges(loneOf, roneOf, v3.OneOfLabel, - &sc.OneOfChanges, &changes) - wg.Done() - }() - go func() { - extractSchemaChanges(lallOf, rallOf, v3.AllOfLabel, - &sc.AllOfChanges, &changes) - wg.Done() - }() - go func() { - extractSchemaChanges(lanyOf, ranyOf, v3.AnyOfLabel, - &sc.AnyOfChanges, &changes) - wg.Done() - }() - go func() { - extractSchemaChanges(lprefix, rprefix, v3.PrefixItemsLabel, - &sc.PrefixItemsChanges, &changes) - wg.Done() - }() - wg.Wait() - - } - // done - if changes != nil { - sc.PropertyChanges = NewPropertyChanges(changes) - } else { - sc.PropertyChanges = NewPropertyChanges(nil) - } - if sc.TotalChanges() > 0 { - return sc - } - return nil -} - -func checkSchemaXML(lSchema *base.Schema, rSchema *base.Schema, changes *[]*Change, sc *SchemaChanges) { - // XML removed - if lSchema == nil || rSchema == nil { - return - } - if lSchema.XML.Value != nil && rSchema.XML.Value == nil { - CreateChange(changes, ObjectRemoved, v3.XMLLabel, - lSchema.XML.GetValueNode(), nil, BreakingRemoved(CompSchema, PropXML), lSchema.XML.GetValue(), nil) - } - // XML added - if lSchema.XML.Value == nil && rSchema.XML.Value != nil { - CreateChange(changes, ObjectAdded, v3.XMLLabel, - nil, rSchema.XML.GetValueNode(), BreakingAdded(CompSchema, PropXML), nil, rSchema.XML.GetValue()) - } - - // compare XML - if lSchema.XML.Value != nil && rSchema.XML.Value != nil { - if !low.AreEqual(lSchema.XML.Value, rSchema.XML.Value) { - sc.XMLChanges = CompareXML(lSchema.XML.Value, rSchema.XML.Value) - } - } -} - -func checkMappedSchemaOfASchema( - lSchema, - rSchema *orderedmap.Map[low.KeyReference[string], low.ValueReference[*base.SchemaProxy]], - changes *[]*Change, -) map[string]*SchemaChanges { - var syncPropChanges sync.Map // concurrent-safe map - var lProps []string - lEntities := make(map[string]*base.SchemaProxy) - lKeyNodes := make(map[string]*yaml.Node) - var rProps []string - rEntities := make(map[string]*base.SchemaProxy) - rKeyNodes := make(map[string]*yaml.Node) - - for k, v := range lSchema.FromOldest() { - lProps = append(lProps, k.Value) - lEntities[k.Value] = v.Value - lKeyNodes[k.Value] = k.KeyNode - } - for k, v := range rSchema.FromOldest() { - rProps = append(rProps, k.Value) - rEntities[k.Value] = v.Value - rKeyNodes[k.Value] = k.KeyNode - } - sort.Strings(lProps) - sort.Strings(rProps) - buildProperty(lProps, rProps, lEntities, rEntities, &syncPropChanges, changes, rKeyNodes, lKeyNodes) - - // Convert the sync.Map into a regular map[string]*SchemaChanges. - propChanges := make(map[string]*SchemaChanges) - syncPropChanges.Range(func(key, value interface{}) bool { - propChanges[key.(string)] = value.(*SchemaChanges) - return true - }) - return propChanges -} - -func buildProperty(lProps, rProps []string, lEntities, rEntities map[string]*base.SchemaProxy, - propChanges *sync.Map, changes *[]*Change, rKeyNodes, lKeyNodes map[string]*yaml.Node, -) { - var wg sync.WaitGroup - checkProperty := func(key string, lp, rp *base.SchemaProxy) { - defer wg.Done() - if low.AreEqual(lp, rp) { - return - } - s := CompareSchemas(lp, rp) - propChanges.Store(key, s) - } - - // left and right equal. - if len(lProps) == len(rProps) { - for w := range lProps { - lp := lEntities[lProps[w]] - rp := rEntities[rProps[w]] - if lProps[w] == rProps[w] && lp != nil && rp != nil { - wg.Add(1) - go checkProperty(lProps[w], lp, rp) - } - // Handle keys that do not match. - if lProps[w] != rProps[w] { - if !slices.Contains(lProps, rProps[w]) { - // new property added. - CreateChange(changes, ObjectAdded, v3.PropertiesLabel, - nil, rKeyNodes[rProps[w]], BreakingAdded(CompSchema, PropProperties), nil, rEntities[rProps[w]]) - } - if !slices.Contains(rProps, lProps[w]) { - CreateChange(changes, ObjectRemoved, v3.PropertiesLabel, - lKeyNodes[lProps[w]], nil, BreakingRemoved(CompSchema, PropProperties), lEntities[lProps[w]], nil) - } - if slices.Contains(lProps, rProps[w]) { - h := slices.Index(lProps, rProps[w]) - lp = lEntities[lProps[h]] - rp = rEntities[rProps[w]] - wg.Add(1) - go checkProperty(lProps[h], lp, rp) - } - } - } - } - - // things removed - if len(lProps) > len(rProps) { - for w := range lProps { - if rEntities[lProps[w]] != nil { - wg.Add(1) - go checkProperty(lProps[w], lEntities[lProps[w]], rEntities[lProps[w]]) - } else { - CreateChange(changes, ObjectRemoved, v3.PropertiesLabel, - lKeyNodes[lProps[w]], nil, BreakingRemoved(CompSchema, PropProperties), lEntities[lProps[w]], nil) - } - } - for w := range rProps { - if lEntities[rProps[w]] != nil { - wg.Add(1) - go checkProperty(rProps[w], lEntities[rProps[w]], rEntities[rProps[w]]) - } else { - CreateChange(changes, ObjectAdded, v3.PropertiesLabel, - nil, rKeyNodes[rProps[w]], BreakingAdded(CompSchema, PropProperties), nil, rEntities[rProps[w]]) - } - } - } - - // stuff added - if len(rProps) > len(lProps) { - for _, propName := range rProps { - if lEntities[propName] != nil { - wg.Add(1) - go checkProperty(propName, lEntities[propName], rEntities[propName]) - } else { - CreateChange(changes, ObjectAdded, v3.PropertiesLabel, - nil, rKeyNodes[propName], BreakingAdded(CompSchema, PropProperties), nil, rEntities[propName]) - } - } - for _, propName := range lProps { - if rEntities[propName] != nil { - wg.Add(1) - go checkProperty(propName, lEntities[propName], rEntities[propName]) - } else { - CreateChange(changes, ObjectRemoved, v3.PropertiesLabel, - nil, lKeyNodes[propName], BreakingRemoved(CompSchema, PropProperties), lEntities[propName], nil) - } - } - } - - // Wait for all property comparisons to finish. - wg.Wait() -} - -func checkSchemaPropertyChanges( - lSchema *base.Schema, - rSchema *base.Schema, - lProxy *base.SchemaProxy, - rProxy *base.SchemaProxy, - changes *[]*Change, sc *SchemaChanges, -) { - var props []*PropertyCheck - - // $schema (breaking change) - var lnv, rnv *yaml.Node - if lSchema != nil && lSchema.SchemaTypeRef.ValueNode != nil { - lnv = lSchema.SchemaTypeRef.ValueNode - } - if rSchema != nil && rSchema.SchemaTypeRef.ValueNode != nil { - rnv = rSchema.SchemaTypeRef.ValueNode - } - - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.SchemaDialectLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropSchemaDialect), - Component: CompSchema, - Property: PropSchemaDialect, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.ExclusiveMaximum.ValueNode != nil { - lnv = lSchema.ExclusiveMaximum.ValueNode - } - if rSchema != nil && rSchema.ExclusiveMaximum.ValueNode != nil { - rnv = rSchema.ExclusiveMaximum.ValueNode - } - // ExclusiveMaximum - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.ExclusiveMaximumLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropExclusiveMaximum), - Component: CompSchema, - Property: PropExclusiveMaximum, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.ExclusiveMinimum.ValueNode != nil { - lnv = lSchema.ExclusiveMinimum.ValueNode - } - if rSchema != nil && rSchema.ExclusiveMinimum.ValueNode != nil { - rnv = rSchema.ExclusiveMinimum.ValueNode - } - - // ExclusiveMinimum - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.ExclusiveMinimumLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropExclusiveMinimum), - Component: CompSchema, - Property: PropExclusiveMinimum, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.Type.ValueNode != nil { - lnv = lSchema.Type.ValueNode - } - if rSchema != nil && rSchema.Type.ValueNode != nil { - rnv = rSchema.Type.ValueNode - } - // Type - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.TypeLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropType), - Component: CompSchema, - Property: PropType, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.Title.ValueNode != nil { - lnv = lSchema.Title.ValueNode - } - if rSchema != nil && rSchema.Title.ValueNode != nil { - rnv = rSchema.Title.ValueNode - } - // Title - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.TitleLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropTitle), - Component: CompSchema, - Property: PropTitle, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.MultipleOf.ValueNode != nil { - lnv = lSchema.MultipleOf.ValueNode - } - if rSchema != nil && rSchema.MultipleOf.ValueNode != nil { - rnv = rSchema.MultipleOf.ValueNode - } - - // MultipleOf - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.MultipleOfLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropMultipleOf), - Component: CompSchema, - Property: PropMultipleOf, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.Maximum.ValueNode != nil { - lnv = lSchema.Maximum.ValueNode - } - if rSchema != nil && rSchema.Maximum.ValueNode != nil { - rnv = rSchema.Maximum.ValueNode - } - // Maximum - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.MaximumLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropMaximum), - Component: CompSchema, - Property: PropMaximum, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.Minimum.ValueNode != nil { - lnv = lSchema.Minimum.ValueNode - } - if rSchema != nil && rSchema.Minimum.ValueNode != nil { - rnv = rSchema.Minimum.ValueNode - } - // Minimum - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.MinimumLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropMinimum), - Component: CompSchema, - Property: PropMinimum, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.MaxLength.ValueNode != nil { - lnv = lSchema.MaxLength.ValueNode - } - if rSchema != nil && rSchema.MaxLength.ValueNode != nil { - rnv = rSchema.MaxLength.ValueNode - } - // MaxLength - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.MaxLengthLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropMaxLength), - Component: CompSchema, - Property: PropMaxLength, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.MinLength.ValueNode != nil { - lnv = lSchema.MinLength.ValueNode - } - if rSchema != nil && rSchema.MinLength.ValueNode != nil { - rnv = rSchema.MinLength.ValueNode - } - // MinLength - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.MinLengthLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropMinLength), - Component: CompSchema, - Property: PropMinLength, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.Pattern.ValueNode != nil { - lnv = lSchema.Pattern.ValueNode - } - if rSchema != nil && rSchema.Pattern.ValueNode != nil { - rnv = rSchema.Pattern.ValueNode - } - // Pattern - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.PatternLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropPattern), - Component: CompSchema, - Property: PropPattern, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.Format.ValueNode != nil { - lnv = lSchema.Format.ValueNode - } - if rSchema != nil && rSchema.Format.ValueNode != nil { - rnv = rSchema.Format.ValueNode - } - // Format - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.FormatLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropFormat), - Component: CompSchema, - Property: PropFormat, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.MaxItems.ValueNode != nil { - lnv = lSchema.MaxItems.ValueNode - } - if rSchema != nil && rSchema.MaxItems.ValueNode != nil { - rnv = rSchema.MaxItems.ValueNode - } - // MaxItems - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.MaxItemsLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropMaxItems), - Component: CompSchema, - Property: PropMaxItems, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.MinItems.ValueNode != nil { - lnv = lSchema.MinItems.ValueNode - } - if rSchema != nil && rSchema.MinItems.ValueNode != nil { - rnv = rSchema.MinItems.ValueNode - } - // MinItems - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.MinItemsLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropMinItems), - Component: CompSchema, - Property: PropMinItems, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.MaxProperties.ValueNode != nil { - lnv = lSchema.MaxProperties.ValueNode - } - if rSchema != nil && rSchema.MaxProperties.ValueNode != nil { - rnv = rSchema.MaxProperties.ValueNode - } - // MaxProperties - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.MaxPropertiesLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropMaxProperties), - Component: CompSchema, - Property: PropMaxProperties, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.MinProperties.ValueNode != nil { - lnv = lSchema.MinProperties.ValueNode - } - if rSchema != nil && rSchema.MinProperties.ValueNode != nil { - rnv = rSchema.MinProperties.ValueNode - } - - // MinProperties - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.MinPropertiesLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropMinProperties), - Component: CompSchema, - Property: PropMinProperties, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.UniqueItems.ValueNode != nil { - lnv = lSchema.UniqueItems.ValueNode - } - if rSchema != nil && rSchema.UniqueItems.ValueNode != nil { - rnv = rSchema.UniqueItems.ValueNode - } - // UniqueItems - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.UniqueItemsLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropUniqueItems), - Component: CompSchema, - Property: PropUniqueItems, - Original: lSchema, - New: rSchema, - }) - - lnv = nil - rnv = nil - - // AdditionalProperties - if lSchema != nil && lSchema.AdditionalProperties.Value != nil && rSchema != nil && rSchema.AdditionalProperties.Value != nil { - if lSchema.AdditionalProperties.Value.IsA() && rSchema.AdditionalProperties.Value.IsA() { - if !low.AreEqual(lSchema.AdditionalProperties.Value.A, rSchema.AdditionalProperties.Value.A) { - sc.AdditionalPropertiesChanges = CompareSchemas(lSchema.AdditionalProperties.Value.A, rSchema.AdditionalProperties.Value.A) - } - } else { - if lSchema.AdditionalProperties.Value.IsB() && rSchema.AdditionalProperties.Value.IsB() { - if lSchema.AdditionalProperties.Value.B != rSchema.AdditionalProperties.Value.B { - CreateChange(changes, Modified, v3.AdditionalPropertiesLabel, - lSchema.AdditionalProperties.ValueNode, rSchema.AdditionalProperties.ValueNode, BreakingModified(CompSchema, PropAdditionalProperties), - lSchema.AdditionalProperties.Value.B, rSchema.AdditionalProperties.Value.B) - } - } else { - CreateChange(changes, Modified, v3.AdditionalPropertiesLabel, - lSchema.AdditionalProperties.ValueNode, rSchema.AdditionalProperties.ValueNode, BreakingModified(CompSchema, PropAdditionalProperties), - lSchema.AdditionalProperties.Value.B, rSchema.AdditionalProperties.Value.B) - } - } - } - - // added AdditionalProperties - if (lSchema == nil || lSchema.AdditionalProperties.Value == nil) && (rSchema != nil && rSchema.AdditionalProperties.Value != nil) { - CreateChange(changes, ObjectAdded, v3.AdditionalPropertiesLabel, - nil, rSchema.AdditionalProperties.ValueNode, BreakingAdded(CompSchema, PropAdditionalProperties), nil, rSchema.AdditionalProperties.Value) - } - // removed AdditionalProperties - if (lSchema != nil && lSchema.AdditionalProperties.Value != nil) && (rSchema == nil || rSchema.AdditionalProperties.Value == nil) { - CreateChange(changes, ObjectRemoved, v3.AdditionalPropertiesLabel, - lSchema.AdditionalProperties.ValueNode, nil, BreakingRemoved(CompSchema, PropAdditionalProperties), lSchema.AdditionalProperties.Value, nil) - } - - if lSchema != nil && lSchema.Description.ValueNode != nil { - lnv = lSchema.Description.ValueNode - } - if rSchema != nil && rSchema.Description.ValueNode != nil { - rnv = rSchema.Description.ValueNode - } - // Description - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.DescriptionLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropDescription), - Component: CompSchema, - Property: PropDescription, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.ContentEncoding.ValueNode != nil { - lnv = lSchema.ContentEncoding.ValueNode - } - if rSchema != nil && rSchema.ContentEncoding.ValueNode != nil { - rnv = rSchema.ContentEncoding.ValueNode - } - // ContentEncoding - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.ContentEncodingLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropContentEncoding), - Component: CompSchema, - Property: PropContentEncoding, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.ContentMediaType.ValueNode != nil { - lnv = lSchema.ContentMediaType.ValueNode - } - if rSchema != nil && rSchema.ContentMediaType.ValueNode != nil { - rnv = rSchema.ContentMediaType.ValueNode - } - // ContentMediaType - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.ContentMediaType, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropContentMediaType), - Component: CompSchema, - Property: PropContentMediaType, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.Default.ValueNode != nil { - lnv = lSchema.Default.ValueNode - } - if rSchema != nil && rSchema.Default.ValueNode != nil { - rnv = rSchema.Default.ValueNode - } - // Default - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.DefaultLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropDefault), - Component: CompSchema, - Property: PropDefault, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.Const.ValueNode != nil { - lnv = lSchema.Const.ValueNode - } - if rSchema != nil && rSchema.Const.ValueNode != nil { - rnv = rSchema.Const.ValueNode - } - // Const - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.ConstLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropConst), - Component: CompSchema, - Property: PropConst, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.Nullable.ValueNode != nil { - lnv = lSchema.Nullable.ValueNode - } - if rSchema != nil && rSchema.Nullable.ValueNode != nil { - rnv = rSchema.Nullable.ValueNode - } - // Nullable - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.NullableLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropNullable), - Component: CompSchema, - Property: PropNullable, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.ReadOnly.ValueNode != nil { - lnv = lSchema.ReadOnly.ValueNode - } - if rSchema != nil && rSchema.ReadOnly.ValueNode != nil { - rnv = rSchema.ReadOnly.ValueNode - } - // ReadOnly - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.ReadOnlyLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropReadOnly), - Component: CompSchema, - Property: PropReadOnly, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.WriteOnly.ValueNode != nil { - lnv = lSchema.WriteOnly.ValueNode - } - if rSchema != nil && rSchema.WriteOnly.ValueNode != nil { - rnv = rSchema.WriteOnly.ValueNode - } - // WriteOnly - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.WriteOnlyLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropWriteOnly), - Component: CompSchema, - Property: PropWriteOnly, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.Example.ValueNode != nil { - lnv = lSchema.Example.ValueNode - } - if rSchema != nil && rSchema.Example.ValueNode != nil { - rnv = rSchema.Example.ValueNode - } - // Example - CheckPropertyAdditionOrRemovalWithEncoding(lnv, rnv, - v3.ExampleLabel, changes, false, lSchema, rSchema) - CheckForModificationWithEncoding(lnv, rnv, - v3.ExampleLabel, changes, false, lSchema, rSchema) - lnv = nil - rnv = nil - - if lSchema != nil && lSchema.Deprecated.ValueNode != nil { - lnv = lSchema.Deprecated.ValueNode - } - if rSchema != nil && rSchema.Deprecated.ValueNode != nil { - rnv = rSchema.Deprecated.ValueNode - } - // Deprecated - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.DeprecatedLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropDeprecated), - Component: CompSchema, - Property: PropDeprecated, - Original: lSchema, - New: rSchema, - }) - - // Required - j := make(map[string]int) - k := make(map[string]int) - if lSchema != nil { - for i := range lSchema.Required.Value { - j[lSchema.Required.Value[i].Value] = i - } - } - if rSchema != nil { - for i := range rSchema.Required.Value { - k[rSchema.Required.Value[i].Value] = i - } - } - for g := range k { - if _, ok := j[g]; !ok { - CreateChange(changes, PropertyAdded, v3.RequiredLabel, - nil, rSchema.Required.Value[k[g]].GetValueNode(), BreakingAdded(CompSchema, PropRequired), nil, - rSchema.Required.Value[k[g]].GetValue) - } - } - for g := range j { - if _, ok := k[g]; !ok { - CreateChange(changes, PropertyRemoved, v3.RequiredLabel, - lSchema.Required.Value[j[g]].GetValueNode(), nil, BreakingRemoved(CompSchema, PropRequired), lSchema.Required.Value[j[g]].GetValue, - nil) - } - } - - // Enums - j = make(map[string]int) - k = make(map[string]int) - if lSchema != nil { - for i := range lSchema.Enum.Value { - j[toString(lSchema.Enum.Value[i].Value.Value)] = i - } - } - if rSchema != nil { - for i := range rSchema.Enum.Value { - k[toString(rSchema.Enum.Value[i].Value.Value)] = i - } - } - for g := range k { - if _, ok := j[g]; !ok { - CreateChange(changes, PropertyAdded, v3.EnumLabel, - nil, rSchema.Enum.Value[k[g]].GetValueNode(), BreakingAdded(CompSchema, PropEnum), nil, - rSchema.Enum.Value[k[g]].GetValue) - } - } - for g := range j { - if _, ok := k[g]; !ok { - CreateChange(changes, PropertyRemoved, v3.EnumLabel, - lSchema.Enum.Value[j[g]].GetValueNode(), nil, BreakingRemoved(CompSchema, PropEnum), lSchema.Enum.Value[j[g]].GetValue, - nil) - } - } - - // Discriminator - if (lSchema != nil && lSchema.Discriminator.Value != nil) && (rSchema != nil && rSchema.Discriminator.Value != nil) { - // check if hash matches, if not then compare. - if lSchema.Discriminator.Value.Hash() != rSchema.Discriminator.Value.Hash() { - sc.DiscriminatorChanges = CompareDiscriminator(lSchema.Discriminator.Value, rSchema.Discriminator.Value) - } - } - // added Discriminator - if (lSchema == nil || lSchema.Discriminator.Value == nil) && (rSchema != nil && rSchema.Discriminator.Value != nil) { - CreateChange(changes, ObjectAdded, v3.DiscriminatorLabel, - nil, rSchema.Discriminator.ValueNode, BreakingAdded(CompSchema, PropDiscriminator), nil, rSchema.Discriminator.Value) - } - // removed Discriminator - if (lSchema != nil && lSchema.Discriminator.Value != nil) && (rSchema == nil || rSchema.Discriminator.Value == nil) { - CreateChange(changes, ObjectRemoved, v3.DiscriminatorLabel, - lSchema.Discriminator.ValueNode, nil, BreakingRemoved(CompSchema, PropDiscriminator), lSchema.Discriminator.Value, nil) - } - - // ExternalDocs - if (lSchema != nil && lSchema.ExternalDocs.Value != nil) && (rSchema != nil && rSchema.ExternalDocs.Value != nil) { - // check if hash matches, if not then compare. - if lSchema.ExternalDocs.Value.Hash() != rSchema.ExternalDocs.Value.Hash() { - sc.ExternalDocChanges = CompareExternalDocs(lSchema.ExternalDocs.Value, rSchema.ExternalDocs.Value) - } - } - // added ExternalDocs - if (lSchema == nil || lSchema.ExternalDocs.Value == nil) && (rSchema != nil && rSchema.ExternalDocs.Value != nil) { - CreateChange(changes, ObjectAdded, v3.ExternalDocsLabel, - nil, rSchema.ExternalDocs.ValueNode, BreakingAdded(CompSchema, PropExternalDocs), nil, rSchema.ExternalDocs.Value) - } - // removed ExternalDocs - if (lSchema != nil && lSchema.ExternalDocs.Value != nil) && (rSchema == nil || rSchema.ExternalDocs.Value == nil) { - CreateChange(changes, ObjectRemoved, v3.ExternalDocsLabel, - lSchema.ExternalDocs.ValueNode, nil, BreakingRemoved(CompSchema, PropExternalDocs), lSchema.ExternalDocs.Value, nil) - } - - // 3.1 properties - // If - if (lSchema != nil && lSchema.If.Value != nil) && (rSchema != nil && rSchema.If.Value != nil) { - if !low.AreEqual(lSchema.If.Value, rSchema.If.Value) { - sc.IfChanges = CompareSchemas(lSchema.If.Value, rSchema.If.Value) - } - } - // added If - if (lSchema == nil || lSchema.If.Value == nil) && (rSchema != nil && rSchema.If.Value != nil) { - CreateChange(changes, ObjectAdded, v3.IfLabel, - nil, rSchema.If.ValueNode, BreakingAdded(CompSchema, PropIf), nil, rSchema.If.Value) - } - // removed If - if (lSchema != nil && lSchema.If.Value != nil) && (rSchema == nil || rSchema.If.Value == nil) { - CreateChange(changes, ObjectRemoved, v3.IfLabel, - lSchema.If.ValueNode, nil, BreakingRemoved(CompSchema, PropIf), lSchema.If.Value, nil) - } - // Else - if (lSchema != nil && lSchema.Else.Value != nil) && (rSchema == nil || rSchema.Else.Value != nil) { - if !low.AreEqual(lSchema.Else.Value, rSchema.Else.Value) { - sc.ElseChanges = CompareSchemas(lSchema.Else.Value, rSchema.Else.Value) - } - } - // added Else - if (lSchema == nil || lSchema.Else.Value == nil) && (rSchema != nil && rSchema.Else.Value != nil) { - CreateChange(changes, ObjectAdded, v3.ElseLabel, - nil, rSchema.Else.ValueNode, BreakingAdded(CompSchema, PropElse), nil, rSchema.Else.Value) - } - // removed Else - if (lSchema != nil && lSchema.Else.Value != nil) && (rSchema == nil || rSchema.Else.Value == nil) { - CreateChange(changes, ObjectRemoved, v3.ElseLabel, - lSchema.Else.ValueNode, nil, BreakingRemoved(CompSchema, PropElse), lSchema.Else.Value, nil) - } - // Then - if (lSchema != nil && lSchema.Then.Value != nil) && (rSchema != nil && rSchema.Then.Value != nil) { - if !low.AreEqual(lSchema.Then.Value, rSchema.Then.Value) { - sc.ThenChanges = CompareSchemas(lSchema.Then.Value, rSchema.Then.Value) - } - } - // added Then - if (lSchema == nil || lSchema.Then.Value == nil) && (rSchema != nil && rSchema.Then.Value != nil) { - CreateChange(changes, ObjectAdded, v3.ThenLabel, - nil, rSchema.Then.ValueNode, BreakingAdded(CompSchema, PropThen), nil, rSchema.Then.Value) - } - // removed Then - if (lSchema != nil && lSchema.Then.Value != nil) && (rSchema == nil || rSchema.Then.Value == nil) { - CreateChange(changes, ObjectRemoved, v3.ThenLabel, - lSchema.Then.ValueNode, nil, BreakingRemoved(CompSchema, PropThen), lSchema.Then.Value, nil) - } - // PropertyNames - if (lSchema != nil && lSchema.PropertyNames.Value != nil) && (rSchema != nil && rSchema.PropertyNames.Value != nil) { - if !low.AreEqual(lSchema.PropertyNames.Value, rSchema.PropertyNames.Value) { - sc.PropertyNamesChanges = CompareSchemas(lSchema.PropertyNames.Value, rSchema.PropertyNames.Value) - } - } - // added PropertyNames - if (lSchema == nil || lSchema.PropertyNames.Value == nil) && (rSchema != nil && rSchema.PropertyNames.Value != nil) { - CreateChange(changes, ObjectAdded, v3.PropertyNamesLabel, - nil, rSchema.PropertyNames.ValueNode, BreakingAdded(CompSchema, PropPropertyNames), nil, rSchema.PropertyNames.Value) - } - // removed PropertyNames - if (lSchema != nil && lSchema.PropertyNames.Value != nil) && (rSchema == nil || rSchema.PropertyNames.Value == nil) { - CreateChange(changes, ObjectRemoved, v3.PropertyNamesLabel, - lSchema.PropertyNames.ValueNode, nil, BreakingRemoved(CompSchema, PropPropertyNames), lSchema.PropertyNames.Value, nil) - } - // Contains - if (lSchema != nil && lSchema.Contains.Value != nil) && (rSchema != nil && rSchema.Contains.Value != nil) { - if !low.AreEqual(lSchema.Contains.Value, rSchema.Contains.Value) { - sc.ContainsChanges = CompareSchemas(lSchema.Contains.Value, rSchema.Contains.Value) - } - } - // added Contains - if (lSchema == nil || lSchema.Contains.Value == nil) && (rSchema != nil && rSchema.Contains.Value != nil) { - CreateChange(changes, ObjectAdded, v3.ContainsLabel, - nil, rSchema.Contains.ValueNode, BreakingAdded(CompSchema, PropContains), nil, rSchema.Contains.Value) - } - // removed Contains - if (lSchema != nil && lSchema.Contains.Value != nil) && (rSchema == nil || rSchema.Contains.Value == nil) { - CreateChange(changes, ObjectRemoved, v3.ContainsLabel, - lSchema.Contains.ValueNode, nil, BreakingRemoved(CompSchema, PropContains), lSchema.Contains.Value, nil) - } - // UnevaluatedItems - if (lSchema != nil && lSchema.UnevaluatedItems.Value != nil) && (rSchema != nil && rSchema.UnevaluatedItems.Value != nil) { - if !low.AreEqual(lSchema.UnevaluatedItems.Value, rSchema.UnevaluatedItems.Value) { - sc.UnevaluatedItemsChanges = CompareSchemas(lSchema.UnevaluatedItems.Value, rSchema.UnevaluatedItems.Value) - } - } - // added UnevaluatedItems - if (lSchema == nil || lSchema.UnevaluatedItems.Value == nil) && (rSchema != nil && rSchema.UnevaluatedItems.Value != nil) { - CreateChange(changes, ObjectAdded, v3.UnevaluatedItemsLabel, - nil, rSchema.UnevaluatedItems.ValueNode, BreakingAdded(CompSchema, PropUnevaluatedItems), nil, rSchema.UnevaluatedItems.Value) - } - // removed UnevaluatedItems - if (lSchema != nil && lSchema.UnevaluatedItems.Value != nil) && (rSchema == nil || rSchema.UnevaluatedItems.Value == nil) { - CreateChange(changes, ObjectRemoved, v3.UnevaluatedItemsLabel, - lSchema.UnevaluatedItems.ValueNode, nil, BreakingRemoved(CompSchema, PropUnevaluatedItems), lSchema.UnevaluatedItems.Value, nil) - } - - // UnevaluatedProperties - if (lSchema != nil && lSchema.UnevaluatedProperties.Value != nil) && (rSchema != nil && rSchema.UnevaluatedProperties.Value != nil) { - if lSchema.UnevaluatedProperties.Value.IsA() && rSchema.UnevaluatedProperties.Value.IsA() { - if !low.AreEqual(lSchema.UnevaluatedProperties.Value.A, rSchema.UnevaluatedProperties.Value.A) { - sc.UnevaluatedPropertiesChanges = CompareSchemas(lSchema.UnevaluatedProperties.Value.A, rSchema.UnevaluatedProperties.Value.A) - } - } else { - if lSchema.UnevaluatedProperties.Value.IsB() && rSchema.UnevaluatedProperties.Value.IsB() { - if lSchema.UnevaluatedProperties.Value.B != rSchema.UnevaluatedProperties.Value.B { - CreateChange(changes, Modified, v3.UnevaluatedPropertiesLabel, - lSchema.UnevaluatedProperties.ValueNode, rSchema.UnevaluatedProperties.ValueNode, BreakingModified(CompSchema, PropUnevaluatedProps), - lSchema.UnevaluatedProperties.Value.B, rSchema.UnevaluatedProperties.Value.B) - } - } else { - CreateChange(changes, Modified, v3.UnevaluatedPropertiesLabel, - lSchema.UnevaluatedProperties.ValueNode, rSchema.UnevaluatedProperties.ValueNode, BreakingModified(CompSchema, PropUnevaluatedProps), - lSchema.UnevaluatedProperties.Value.B, rSchema.UnevaluatedProperties.Value.B) - } - } - } - - // added UnevaluatedProperties - if (lSchema == nil || lSchema.UnevaluatedProperties.Value == nil) && (rSchema != nil && rSchema.UnevaluatedProperties.Value != nil) { - CreateChange(changes, ObjectAdded, v3.UnevaluatedPropertiesLabel, - nil, rSchema.UnevaluatedProperties.ValueNode, BreakingAdded(CompSchema, PropUnevaluatedProps), nil, rSchema.UnevaluatedProperties.Value) - } - // removed UnevaluatedProperties - if (lSchema != nil && lSchema.UnevaluatedProperties.Value != nil) && (rSchema == nil || rSchema.UnevaluatedProperties.Value == nil) { - CreateChange(changes, ObjectRemoved, v3.UnevaluatedPropertiesLabel, - lSchema.UnevaluatedProperties.ValueNode, nil, BreakingRemoved(CompSchema, PropUnevaluatedProps), lSchema.UnevaluatedProperties.Value, nil) - } - - // Not - if (lSchema != nil && lSchema.Not.Value != nil) && (rSchema != nil && rSchema.Not.Value != nil) { - if !low.AreEqual(lSchema.Not.Value, rSchema.Not.Value) { - sc.NotChanges = CompareSchemas(lSchema.Not.Value, rSchema.Not.Value) - } - } - // added Not - if (lSchema == nil || lSchema.Not.Value == nil) && (rSchema != nil && rSchema.Not.Value != nil) { - CreateChange(changes, ObjectAdded, v3.NotLabel, - nil, rSchema.Not.ValueNode, BreakingAdded(CompSchema, PropNot), nil, rSchema.Not.Value) - } - // removed not - if (lSchema != nil && lSchema.Not.Value != nil) && (rSchema == nil || rSchema.Not.Value == nil) { - CreateChange(changes, ObjectRemoved, v3.NotLabel, - lSchema.Not.ValueNode, nil, BreakingRemoved(CompSchema, PropNot), lSchema.Not.Value, nil) - } - - // items - if (lSchema != nil && lSchema.Items.Value != nil) && (rSchema != nil && rSchema.Items.Value != nil) { - if lSchema.Items.Value.IsA() && rSchema.Items.Value.IsA() { - if !low.AreEqual(lSchema.Items.Value.A, rSchema.Items.Value.A) { - sc.ItemsChanges = CompareSchemas(lSchema.Items.Value.A, rSchema.Items.Value.A) - } - } else { - CreateChange(changes, Modified, v3.ItemsLabel, - lSchema.Items.ValueNode, rSchema.Items.ValueNode, BreakingModified(CompSchema, PropItems), lSchema.Items.Value.B, rSchema.Items.Value.B) - } - } - // added Items - if (lSchema == nil || lSchema.Items.Value == nil) && (rSchema != nil && rSchema.Items.Value != nil) { - CreateChange(changes, ObjectAdded, v3.ItemsLabel, - nil, rSchema.Items.ValueNode, BreakingAdded(CompSchema, PropItems), nil, rSchema.Items.Value) - } - // removed Items - if (lSchema != nil && lSchema.Items.Value != nil) && (rSchema == nil || rSchema.Items.Value == nil) { - CreateChange(changes, ObjectRemoved, v3.ItemsLabel, - lSchema.Items.ValueNode, nil, BreakingRemoved(CompSchema, PropItems), lSchema.Items.Value, nil) - } - - // $dynamicAnchor (JSON Schema 2020-12) - lnv = nil - rnv = nil - if lSchema != nil && lSchema.DynamicAnchor.ValueNode != nil { - lnv = lSchema.DynamicAnchor.ValueNode - } - if rSchema != nil && rSchema.DynamicAnchor.ValueNode != nil { - rnv = rSchema.DynamicAnchor.ValueNode - } - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.DynamicAnchorLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropDynamicAnchor), - Component: CompSchema, - Property: PropDynamicAnchor, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - // $dynamicRef (JSON Schema 2020-12) - if lSchema != nil && lSchema.DynamicRef.ValueNode != nil { - lnv = lSchema.DynamicRef.ValueNode - } - if rSchema != nil && rSchema.DynamicRef.ValueNode != nil { - rnv = rSchema.DynamicRef.ValueNode - } - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: v3.DynamicRefLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropDynamicRef), - Component: CompSchema, - Property: PropDynamicRef, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - // $id (JSON Schema 2020-12) - if lSchema != nil && lSchema.Id.ValueNode != nil { - lnv = lSchema.Id.ValueNode - } - if rSchema != nil && rSchema.Id.ValueNode != nil { - rnv = rSchema.Id.ValueNode - } - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: base.IdLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropId), - Component: CompSchema, - Property: PropId, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - // $comment (JSON Schema 2020-12) - if lSchema != nil && lSchema.Comment.ValueNode != nil { - lnv = lSchema.Comment.ValueNode - } - if rSchema != nil && rSchema.Comment.ValueNode != nil { - rnv = rSchema.Comment.ValueNode - } - props = append(props, &PropertyCheck{ - LeftNode: lnv, - RightNode: rnv, - Label: base.CommentLabel, - Changes: changes, - Breaking: BreakingModified(CompSchema, PropComment), - Component: CompSchema, - Property: PropComment, - Original: lSchema, - New: rSchema, - }) - lnv = nil - rnv = nil - - // contentSchema (JSON Schema 2020-12) - recursive schema comparison - if lSchema != nil && !lSchema.ContentSchema.IsEmpty() && rSchema != nil && !rSchema.ContentSchema.IsEmpty() { - sc.ContentSchemaChanges = CompareSchemas(lSchema.ContentSchema.Value, rSchema.ContentSchema.Value) - } - if lSchema != nil && !lSchema.ContentSchema.IsEmpty() && (rSchema == nil || rSchema.ContentSchema.IsEmpty()) { - CreateChange(changes, PropertyRemoved, base.ContentSchemaLabel, - lSchema.ContentSchema.ValueNode, nil, - BreakingRemoved(CompSchema, PropContentSchema), - lSchema.ContentSchema.Value, nil) - } - if (lSchema == nil || lSchema.ContentSchema.IsEmpty()) && rSchema != nil && !rSchema.ContentSchema.IsEmpty() { - CreateChange(changes, PropertyAdded, base.ContentSchemaLabel, - nil, rSchema.ContentSchema.ValueNode, - BreakingAdded(CompSchema, PropContentSchema), - nil, rSchema.ContentSchema.Value) - } - - // $vocabulary (JSON Schema 2020-12) - map comparison - // note: vocabulary changes are stored in VocabularyChanges and counted separately - // in TotalChanges(), so they should NOT be appended to the main changes slice - var lVocab, rVocab *orderedmap.Map[low.KeyReference[string], low.ValueReference[bool]] - if lSchema != nil { - lVocab = lSchema.Vocabulary.Value - } - if rSchema != nil { - rVocab = rSchema.Vocabulary.Value - } - if lVocab != nil || rVocab != nil { - sc.VocabularyChanges = checkVocabularyChanges(lVocab, rVocab) - } - - // check extensions - var lext *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - var rext *orderedmap.Map[low.KeyReference[string], low.ValueReference[*yaml.Node]] - if lSchema != nil { - lext = lSchema.Extensions - } - if rSchema != nil { - rext = rSchema.Extensions - } - if lext != nil && rext != nil { - sc.ExtensionChanges = CompareExtensions(lext, rext) - } - - // check core properties - CheckProperties(props) - - // Post-process: Update context line numbers for Type changes to use schema KeyNode for better context - // This provides line where "schema:" is defined, not "type: value" - if changes != nil && len(*changes) > 0 { - for _, change := range *changes { - if change.Property == v3.TypeLabel && change.Context != nil { - if lProxy != nil && lProxy.GetKeyNode() != nil { - line := lProxy.GetKeyNode().Line - col := lProxy.GetKeyNode().Column - change.Context.OriginalLine = &line - change.Context.OriginalColumn = &col - } - if rProxy != nil && rProxy.GetKeyNode() != nil { - line := rProxy.GetKeyNode().Line - col := rProxy.GetKeyNode().Column - change.Context.NewLine = &line - change.Context.NewColumn = &col - } - break // found the type change, no need to continue - } - } - } -} - -func checkExamples(lSchema *base.Schema, rSchema *base.Schema, changes *[]*Change) { - if lSchema == nil && rSchema == nil { - return - } - - // check examples (3.1+) - var lExampKey, rExampKey []string - lExampN := make(map[string]*yaml.Node) - rExampN := make(map[string]*yaml.Node) - lExampVal := make(map[string]any) - rExampVal := make(map[string]any) - - // create keys by hashing values - if lSchema != nil && lSchema.Examples.ValueNode != nil { - for i := range lSchema.Examples.ValueNode.Content { - key := low.GenerateHashString(lSchema.Examples.ValueNode.Content[i].Value) - lExampKey = append(lExampKey, key) - lExampVal[key] = lSchema.Examples.ValueNode.Content[i].Value - lExampN[key] = lSchema.Examples.ValueNode.Content[i] - - } - } - if rSchema != nil && rSchema.Examples.ValueNode != nil { - for i := range rSchema.Examples.ValueNode.Content { - key := low.GenerateHashString(rSchema.Examples.ValueNode.Content[i].Value) - rExampKey = append(rExampKey, key) - rExampVal[key] = rSchema.Examples.ValueNode.Content[i].Value - rExampN[key] = rSchema.Examples.ValueNode.Content[i] - } - } - - // if examples equal lengths, check for equality - if len(lExampKey) == len(rExampKey) { - for i := range lExampKey { - if lExampKey[i] != rExampKey[i] { - CreateChangeWithEncoding(changes, Modified, v3.ExamplesLabel, - lExampN[lExampKey[i]], rExampN[rExampKey[i]], BreakingModified(CompSchema, PropExamples), - lExampVal[lExampKey[i]], rExampVal[rExampKey[i]]) - } - } - } - // examples were removed. - if len(lExampKey) > len(rExampKey) { - for i := range lExampKey { - if i < len(rExampKey) && lExampKey[i] != rExampKey[i] { - CreateChangeWithEncoding(changes, Modified, v3.ExamplesLabel, - lExampN[lExampKey[i]], rExampN[rExampKey[i]], BreakingModified(CompSchema, PropExamples), - lExampVal[lExampKey[i]], rExampVal[rExampKey[i]]) - } - if i >= len(rExampKey) { - CreateChangeWithEncoding(changes, ObjectRemoved, v3.ExamplesLabel, - lExampN[lExampKey[i]], nil, BreakingRemoved(CompSchema, PropExamples), - lExampVal[lExampKey[i]], nil) - } - } - } - - // examples were added - if len(lExampKey) < len(rExampKey) { - for i := range rExampKey { - if i < len(lExampKey) && lExampKey[i] != rExampKey[i] { - CreateChangeWithEncoding(changes, Modified, v3.ExamplesLabel, - lExampN[lExampKey[i]], rExampN[rExampKey[i]], BreakingModified(CompSchema, PropExamples), - lExampVal[lExampKey[i]], rExampVal[rExampKey[i]]) - } - if i >= len(lExampKey) { - CreateChangeWithEncoding(changes, ObjectAdded, v3.ExamplesLabel, - nil, rExampN[rExampKey[i]], BreakingAdded(CompSchema, PropExamples), - nil, rExampVal[rExampKey[i]]) - } - } - } -} - -func extractSchemaChanges( - lSchema []low.ValueReference[*base.SchemaProxy], - rSchema []low.ValueReference[*base.SchemaProxy], - label string, - sc *[]*SchemaChanges, - changes *[]*Change, -) { - // if there is nothing here, there is nothing to do. - if lSchema == nil && rSchema == nil { - return - } - - x := "%x" - // create hash key maps to check equality - lKeys := make([]string, 0, len(lSchema)) - rKeys := make([]string, 0, len(rSchema)) - lEntities := make(map[string]*base.SchemaProxy) - rEntities := make(map[string]*base.SchemaProxy) - - for h := range lSchema { - q := lSchema[h].Value - z := fmt.Sprintf(x, q.Hash()) - lKeys = append(lKeys, z) - lEntities[z] = q - } - for h := range rSchema { - q := rSchema[h].Value - z := fmt.Sprintf(x, q.Hash()) - rKeys = append(rKeys, z) - rEntities[z] = q - } - - // check for identical lengths - if len(lKeys) == len(rKeys) { - for w := range lKeys { - // keys are different, which means there are changes. - if lKeys[w] != rKeys[w] { - *sc = append(*sc, CompareSchemas(lEntities[lKeys[w]], rEntities[rKeys[w]])) - } - } - } - - // things were removed - if len(lKeys) > len(rKeys) { - for w := range lKeys { - if w < len(rKeys) && lKeys[w] != rKeys[w] { - *sc = append(*sc, CompareSchemas(lEntities[lKeys[w]], rEntities[rKeys[w]])) - } - if w >= len(rKeys) { - // determine breaking status based on label - breaking := true - switch label { - case v3.AllOfLabel: - breaking = BreakingRemoved(CompSchema, PropAllOf) - case v3.AnyOfLabel: - breaking = BreakingRemoved(CompSchema, PropAnyOf) - case v3.OneOfLabel: - breaking = BreakingRemoved(CompSchema, PropOneOf) - case v3.PrefixItemsLabel: - breaking = BreakingRemoved(CompSchema, PropPrefixItems) - } - CreateChange(changes, ObjectRemoved, label, - lEntities[lKeys[w]].GetValueNode(), nil, breaking, lEntities[lKeys[w]], nil) - } - } - } - - // things were added - if len(rKeys) > len(lKeys) { - for w := range rKeys { - if w < len(lKeys) && rKeys[w] != lKeys[w] { - *sc = append(*sc, CompareSchemas(lEntities[lKeys[w]], rEntities[rKeys[w]])) - } - if w >= len(lKeys) { - // determine breaking status based on label - breaking := false - switch label { - case v3.AllOfLabel: - breaking = BreakingAdded(CompSchema, PropAllOf) - case v3.AnyOfLabel: - breaking = BreakingAdded(CompSchema, PropAnyOf) - case v3.OneOfLabel: - breaking = BreakingAdded(CompSchema, PropOneOf) - case v3.PrefixItemsLabel: - breaking = BreakingAdded(CompSchema, PropPrefixItems) - } - CreateChange(changes, ObjectAdded, label, - nil, rEntities[rKeys[w]].GetValueNode(), breaking, nil, rEntities[rKeys[w]]) - } - } - } -} - -// checkDependentRequiredChanges compares two DependentRequired maps and returns any changes found -func checkDependentRequiredChanges( - left, right *orderedmap.Map[low.KeyReference[string], low.ValueReference[[]string]], -) []*Change { - // If both are nil, no changes - if left == nil && right == nil { - return nil - } - - var changes []*Change - - leftMap := make(map[string][]string) - rightMap := make(map[string][]string) - - // Build left map - if left != nil { - for prop, reqArray := range left.FromOldest() { - leftMap[prop.Value] = reqArray.Value - } - } - - // Build right map - if right != nil { - for prop, reqArray := range right.FromOldest() { - rightMap[prop.Value] = reqArray.Value - } - } - - // Check for property additions and modifications - for prop, rightReqs := range rightMap { - if leftReqs, exists := leftMap[prop]; exists { - // Property exists in both, check if requirements changed - if !slicesEqual(leftReqs, rightReqs) { - CreateChange(&changes, Modified, prop, - getNodeForProperty(left, prop), getNodeForProperty(right, prop), - BreakingModified(CompSchema, PropDependentRequired), leftReqs, rightReqs) - } - } else { - // Property added - CreateChange(&changes, PropertyAdded, prop, - nil, getNodeForProperty(right, prop), - BreakingAdded(CompSchema, PropDependentRequired), nil, rightReqs) - } - } - - // Check for property removals - for prop, leftReqs := range leftMap { - if _, exists := rightMap[prop]; !exists { - CreateChange(&changes, PropertyRemoved, prop, - getNodeForProperty(left, prop), nil, - BreakingRemoved(CompSchema, PropDependentRequired), leftReqs, nil) - } - } - - return changes -} - -// slicesEqual compares two string slices for equality (order matters) -func slicesEqual(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if b[i] != v { - return false - } - } - return true -} - -// getNodeForProperty gets the YAML node for a specific property in a DependentRequired map -func getNodeForProperty(depMap *orderedmap.Map[low.KeyReference[string], low.ValueReference[[]string]], prop string) *yaml.Node { - if depMap == nil { - return nil - } - for key, value := range depMap.FromOldest() { - if key.Value == prop { - return value.ValueNode - } - } - return nil -} - -// checkVocabularyChanges compares $vocabulary maps and returns a list of changes. -// the caller is responsible for appending the returned changes to their main changes slice. -func checkVocabularyChanges(lVocab, rVocab *orderedmap.Map[low.KeyReference[string], low.ValueReference[bool]]) []*Change { - if lVocab == nil && rVocab == nil { - return nil - } - - // pre-allocate maps with size hints for better memory efficiency - lSize := orderedmap.Len(lVocab) - rSize := orderedmap.Len(rVocab) - - lVocabMap := make(map[string]bool, lSize) - lVocabNodes := make(map[string]*yaml.Node, lSize) - rVocabMap := make(map[string]bool, rSize) - rVocabNodes := make(map[string]*yaml.Node, rSize) - - if lVocab != nil { - for k, v := range lVocab.FromOldest() { - lVocabMap[k.Value] = v.Value - lVocabNodes[k.Value] = v.ValueNode - } - } - if rVocab != nil { - for k, v := range rVocab.FromOldest() { - rVocabMap[k.Value] = v.Value - rVocabNodes[k.Value] = v.ValueNode - } - } - - // pre-allocate result slice with reasonable capacity - var vocabChanges []*Change - - // check for removed or modified vocabularies - for uri, lVal := range lVocabMap { - if rVal, ok := rVocabMap[uri]; ok { - // vocabulary exists in both - check if value changed - if lVal != rVal { - c := &Change{ - Property: base.VocabularyLabel, - ChangeType: Modified, - Original: fmt.Sprintf("%s=%v", uri, lVal), - New: fmt.Sprintf("%s=%v", uri, rVal), - Breaking: BreakingModified(CompSchema, PropVocabulary), - OriginalObject: lVocabMap, - NewObject: rVocabMap, - } - if lVocabNodes[uri] != nil { - c.Context = CreateContext(lVocabNodes[uri], rVocabNodes[uri]) - } - vocabChanges = append(vocabChanges, c) - } - } else { - // vocabulary was removed - c := &Change{ - Property: base.VocabularyLabel, - ChangeType: PropertyRemoved, - Original: uri, - Breaking: BreakingRemoved(CompSchema, PropVocabulary), - OriginalObject: lVocabMap, - } - if lVocabNodes[uri] != nil { - c.Context = CreateContext(lVocabNodes[uri], nil) - } - vocabChanges = append(vocabChanges, c) - } - } - - // check for added vocabularies - for uri := range rVocabMap { - if _, ok := lVocabMap[uri]; !ok { - // vocabulary was added - c := &Change{ - Property: base.VocabularyLabel, - ChangeType: PropertyAdded, - New: uri, - Breaking: BreakingAdded(CompSchema, PropVocabulary), - NewObject: rVocabMap, - } - if rVocabNodes[uri] != nil { - c.Context = CreateContext(nil, rVocabNodes[uri]) - } - vocabChanges = append(vocabChanges, c) - } - } - - return vocabChanges -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/scopes.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/scopes.go deleted file mode 100644 index 4e38c9814c0..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/scopes.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - v2 "github.com/pb33f/libopenapi/datamodel/low/v2" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// ScopesChanges represents changes between two Swagger Scopes Objects -type ScopesChanges struct { - *PropertyChanges - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Scopes objects -func (s *ScopesChanges) GetAllChanges() []*Change { - if s == nil { - return nil - } - var changes []*Change - changes = append(changes, s.Changes...) - if s.ExtensionChanges != nil { - changes = append(changes, s.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns the total changes found between two Swagger Scopes objects. -func (s *ScopesChanges) TotalChanges() int { - if s == nil { - return 0 - } - c := s.PropertyChanges.TotalChanges() - if s.ExtensionChanges != nil { - c += s.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of breaking changes between two Swagger Scopes objects. -func (s *ScopesChanges) TotalBreakingChanges() int { - return s.PropertyChanges.TotalBreakingChanges() -} - -// CompareScopes compares a left and right Swagger Scopes objects for changes. If anything is found, returns -// a pointer to ScopesChanges, or returns nil if nothing is found. -func CompareScopes(l, r *v2.Scopes) *ScopesChanges { - if low.AreEqual(l, r) { - return nil - } - var changes []*Change - for k, v := range l.Values.FromOldest() { - if r != nil && r.FindScope(k.Value) == nil { - CreateChange(&changes, ObjectRemoved, v3.Scopes, - v.ValueNode, nil, BreakingRemoved(CompOAuthFlow, PropScopes), - k.Value, nil) - continue - } - if r != nil && r.FindScope(k.Value) != nil { - if v.Value != r.FindScope(k.Value).Value { - CreateChange(&changes, Modified, v3.Scopes, - v.ValueNode, r.FindScope(k.Value).ValueNode, BreakingModified(CompOAuthFlow, PropScopes), - v.Value, r.FindScope(k.Value).Value) - } - } - } - for k, v := range r.Values.FromOldest() { - if l != nil && l.FindScope(k.Value) == nil { - CreateChange(&changes, ObjectAdded, v3.Scopes, - nil, v.ValueNode, BreakingAdded(CompOAuthFlow, PropScopes), - nil, k.Value) - } - } - - sc := new(ScopesChanges) - sc.PropertyChanges = NewPropertyChanges(changes) - sc.ExtensionChanges = CompareExtensions(l.Extensions, r.Extensions) - return sc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/security_requirement.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/security_requirement.go deleted file mode 100644 index 78a5d2e7b3d..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/security_requirement.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - "github.com/pb33f/libopenapi/orderedmap" - "go.yaml.in/yaml/v4" -) - -// SecurityRequirementChanges represents changes found between two SecurityRequirement Objects. -type SecurityRequirementChanges struct { - *PropertyChanges -} - -// GetAllChanges returns a slice of all changes made between SecurityRequirement objects -func (s *SecurityRequirementChanges) GetAllChanges() []*Change { - if s == nil { - return nil - } - return s.Changes -} - -// TotalChanges returns the total number of changes between two SecurityRequirement Objects. -func (s *SecurityRequirementChanges) TotalChanges() int { - if s == nil { - return 0 - } - return s.PropertyChanges.TotalChanges() -} - -// TotalBreakingChanges returns the total number of breaking changes between two SecurityRequirement Objects. -func (s *SecurityRequirementChanges) TotalBreakingChanges() int { - return s.PropertyChanges.TotalBreakingChanges() -} - -// CompareSecurityRequirement compares left and right SecurityRequirement objects for changes. If anything -// is found, then a pointer to SecurityRequirementChanges is returned, otherwise nil. -func CompareSecurityRequirement(l, r *base.SecurityRequirement) *SecurityRequirementChanges { - var changes []*Change - sc := new(SecurityRequirementChanges) - - if low.AreEqual(l, r) { - return nil - } - checkSecurityRequirement(l.Requirements.Value, r.Requirements.Value, &changes) - sc.PropertyChanges = NewPropertyChanges(changes) - return sc -} - -func removedSecurityRequirement(vn *yaml.Node, schemeName, scopeName string, changes *[]*Change) { - property := schemeName - value := scopeName - var node *yaml.Node = vn - breaking := BreakingRemoved(CompSecurityRequirement, PropSchemes) - if scopeName == "" { - // entire scheme was removed, use scheme name as value - value = schemeName - // Don't use the node for entire scheme removal, as it may be an empty array [] - node = nil - } else { - // scope was removed - breaking = BreakingRemoved(CompSecurityRequirement, PropScopes) - } - CreateChange(changes, ObjectRemoved, property, - node, nil, breaking, value, nil) -} - -func addedSecurityRequirement(vn *yaml.Node, schemeName, scopeName string, changes *[]*Change) { - property := schemeName - value := scopeName - var node *yaml.Node = vn - breaking := BreakingAdded(CompSecurityRequirement, PropSchemes) - if scopeName == "" { - // entire scheme was added, use scheme name as value - value = schemeName - // Don't use the node for entire scheme addition, as it may be an empty array [] - node = nil - } else { - // scope was added - breaking = BreakingAdded(CompSecurityRequirement, PropScopes) - } - CreateChange(changes, ObjectAdded, property, - nil, node, breaking, nil, value) -} - -// tricky to do this correctly, this is my solution. -func checkSecurityRequirement(lSec, rSec *orderedmap.Map[low.KeyReference[string], low.ValueReference[[]low.ValueReference[string]]], - changes *[]*Change, -) { - lKeys := make([]string, orderedmap.Len(lSec)) - rKeys := make([]string, orderedmap.Len(rSec)) - lValues := make(map[string]low.ValueReference[[]low.ValueReference[string]]) - rValues := make(map[string]low.ValueReference[[]low.ValueReference[string]]) - var n, z int - for k, v := range lSec.FromOldest() { - lKeys[n] = k.Value - lValues[k.Value] = v - n++ - } - for k, v := range rSec.FromOldest() { - rKeys[z] = k.Value - rValues[k.Value] = v - z++ - } - - for z = range lKeys { - if z < len(rKeys) { - if _, ok := rValues[lKeys[z]]; !ok { - removedSecurityRequirement(lValues[lKeys[z]].ValueNode, lKeys[z], "", changes) - continue - } - - lValue := lValues[lKeys[z]].Value - rValue := rValues[lKeys[z]].Value - - // check if actual values match up - lRoleKeys := make([]string, len(lValue)) - rRoleKeys := make([]string, len(rValue)) - lRoleValues := make(map[string]low.ValueReference[string]) - rRoleValues := make(map[string]low.ValueReference[string]) - var t, k int - for i := range lValue { - if lValue[i].Value == "" { - continue // Skip empty scope values (from malformed YAML) - } - lRoleKeys[t] = lValue[i].Value - lRoleValues[lValue[i].Value] = lValue[i] - t++ - } - lRoleKeys = lRoleKeys[:t] // Trim to actual size - - for i := range rValue { - if rValue[i].Value == "" { - continue // Skip empty scope values (from malformed YAML) - } - rRoleKeys[k] = rValue[i].Value - rRoleValues[rValue[i].Value] = rValue[i] - k++ - } - rRoleKeys = rRoleKeys[:k] // Trim to actual size - - for t = range lRoleKeys { - if t < len(rRoleKeys) { - if _, ok := rRoleValues[lRoleKeys[t]]; !ok { - removedSecurityRequirement(lRoleValues[lRoleKeys[t]].ValueNode, lKeys[z], lRoleKeys[t], changes) - continue - } - } - if t >= len(rRoleKeys) { - if _, ok := rRoleValues[lRoleKeys[t]]; !ok { - removedSecurityRequirement(lRoleValues[lRoleKeys[t]].ValueNode, lKeys[z], lRoleKeys[t], changes) - } - } - } - for t = range rRoleKeys { - if t < len(lRoleKeys) { - if _, ok := lRoleValues[rRoleKeys[t]]; !ok { - addedSecurityRequirement(rRoleValues[rRoleKeys[t]].ValueNode, rKeys[z], rRoleKeys[t], changes) - continue - } - } - if t >= len(lRoleKeys) { - if _, ok := lRoleValues[rRoleKeys[t]]; !ok { - addedSecurityRequirement(rRoleValues[rRoleKeys[t]].ValueNode, rKeys[z], rRoleKeys[t], changes) - } - } - } - - } - if z >= len(rKeys) { - if _, ok := rValues[lKeys[z]]; !ok { - removedSecurityRequirement(lValues[lKeys[z]].ValueNode, lKeys[z], "", changes) - } - } - } - for z = range rKeys { - if z < len(lKeys) { - if _, ok := lValues[rKeys[z]]; !ok { - addedSecurityRequirement(rValues[rKeys[z]].ValueNode, rKeys[z], "", changes) - continue - } - } - if z >= len(lKeys) { - if _, ok := lValues[rKeys[z]]; !ok { - addedSecurityRequirement(rValues[rKeys[z]].ValueNode, rKeys[z], "", changes) - } - } - } -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/security_scheme.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/security_scheme.go deleted file mode 100644 index 4b6b5d47189..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/security_scheme.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2022-2025 Princess Beef Heavy Industries, LLC / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "reflect" - - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// SecuritySchemeChanges represents changes made between Swagger or OpenAPI SecurityScheme Objects. -type SecuritySchemeChanges struct { - *PropertyChanges - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` - - // OpenAPI Version - OAuthFlowChanges *OAuthFlowsChanges `json:"oAuthFlow,omitempty" yaml:"oAuthFlow,omitempty"` - - // Swagger Version - ScopesChanges *ScopesChanges `json:"scopes,omitempty" yaml:"scopes,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between SecurityRequirement objects -func (ss *SecuritySchemeChanges) GetAllChanges() []*Change { - if ss == nil { - return nil - } - var changes []*Change - changes = append(changes, ss.Changes...) - if ss.OAuthFlowChanges != nil { - changes = append(changes, ss.OAuthFlowChanges.GetAllChanges()...) - } - if ss.ScopesChanges != nil { - changes = append(changes, ss.ScopesChanges.GetAllChanges()...) - } - if ss.ExtensionChanges != nil { - changes = append(changes, ss.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges represents total changes found between two Swagger or OpenAPI SecurityScheme instances. -func (ss *SecuritySchemeChanges) TotalChanges() int { - if ss == nil { - return 0 - } - c := ss.PropertyChanges.TotalChanges() - if ss.OAuthFlowChanges != nil { - c += ss.OAuthFlowChanges.TotalChanges() - } - if ss.ScopesChanges != nil { - c += ss.ScopesChanges.TotalChanges() - } - if ss.ExtensionChanges != nil { - c += ss.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns total number of breaking changes between two SecurityScheme Objects. -func (ss *SecuritySchemeChanges) TotalBreakingChanges() int { - c := ss.PropertyChanges.TotalBreakingChanges() - if ss.OAuthFlowChanges != nil { - c += ss.OAuthFlowChanges.TotalBreakingChanges() - } - if ss.ScopesChanges != nil { - c += ss.ScopesChanges.TotalBreakingChanges() - } - return c -} - -// CompareSecuritySchemesV2 is a Swagger type safe proxy for CompareSecuritySchemes -func CompareSecuritySchemesV2(l, r *v2.SecurityScheme) *SecuritySchemeChanges { - return CompareSecuritySchemes(l, r) -} - -// CompareSecuritySchemesV3 is an OpenAPI type safe proxt for CompareSecuritySchemes -func CompareSecuritySchemesV3(l, r *v3.SecurityScheme) *SecuritySchemeChanges { - return CompareSecuritySchemes(l, r) -} - -// CompareSecuritySchemes compares left and right Swagger or OpenAPI Security Scheme objects for changes. -// If anything is found, returns a pointer to *SecuritySchemeChanges or nil if nothing is found. -func CompareSecuritySchemes(l, r any) *SecuritySchemeChanges { - var props []*PropertyCheck - var changes []*Change - - sc := new(SecuritySchemeChanges) - if reflect.TypeOf(&v2.SecurityScheme{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v2.SecurityScheme{}) == reflect.TypeOf(r) { - - lSS := l.(*v2.SecurityScheme) - rSS := r.(*v2.SecurityScheme) - - if low.AreEqual(lSS, rSS) { - return nil - } - addPropertyCheck(&props, lSS.Type.ValueNode, rSS.Type.ValueNode, - lSS.Type.Value, rSS.Type.Value, &changes, v3.TypeLabel, true, CompSecurityScheme, PropType) - - addPropertyCheck(&props, lSS.Description.ValueNode, rSS.Description.ValueNode, - lSS.Description.Value, rSS.Description.Value, &changes, v3.DescriptionLabel, false, CompSecurityScheme, PropDescription) - - addPropertyCheck(&props, lSS.Name.ValueNode, rSS.Name.ValueNode, - lSS.Name.Value, rSS.Name.Value, &changes, v3.NameLabel, true, CompSecurityScheme, PropName) - - addPropertyCheck(&props, lSS.In.ValueNode, rSS.In.ValueNode, - lSS.In.Value, rSS.In.Value, &changes, v3.InLabel, true, CompSecurityScheme, PropIn) - - addPropertyCheck(&props, lSS.Flow.ValueNode, rSS.Flow.ValueNode, - lSS.Flow.Value, rSS.Flow.Value, &changes, v3.FlowLabel, true, CompSecurityScheme, PropFlow) - - addPropertyCheck(&props, lSS.AuthorizationUrl.ValueNode, rSS.AuthorizationUrl.ValueNode, - lSS.AuthorizationUrl.Value, rSS.AuthorizationUrl.Value, &changes, v3.AuthorizationUrlLabel, true, CompSecurityScheme, PropAuthorizationURL) - - addPropertyCheck(&props, lSS.TokenUrl.ValueNode, rSS.TokenUrl.ValueNode, - lSS.TokenUrl.Value, rSS.TokenUrl.Value, &changes, v3.TokenUrlLabel, true, CompSecurityScheme, PropTokenURL) - - if !lSS.Scopes.IsEmpty() && !rSS.Scopes.IsEmpty() { - if !low.AreEqual(lSS.Scopes.Value, rSS.Scopes.Value) { - sc.ScopesChanges = CompareScopes(lSS.Scopes.Value, rSS.Scopes.Value) - } - } - if lSS.Scopes.IsEmpty() && !rSS.Scopes.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.ScopesLabel, - nil, rSS.Scopes.ValueNode, BreakingAdded(CompSecurityScheme, PropScopes), nil, rSS.Scopes.Value) - } - if !lSS.Scopes.IsEmpty() && rSS.Scopes.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.ScopesLabel, - lSS.Scopes.ValueNode, nil, BreakingRemoved(CompSecurityScheme, PropScopes), lSS.Scopes.Value, nil) - } - - sc.ExtensionChanges = CompareExtensions(lSS.Extensions, rSS.Extensions) - } - - if reflect.TypeOf(&v3.SecurityScheme{}) == reflect.TypeOf(l) && - reflect.TypeOf(&v3.SecurityScheme{}) == reflect.TypeOf(r) { - - lSS := l.(*v3.SecurityScheme) - rSS := r.(*v3.SecurityScheme) - - if low.AreEqual(lSS, rSS) { - return nil - } - addPropertyCheck(&props, lSS.Type.ValueNode, rSS.Type.ValueNode, - lSS.Type.Value, rSS.Type.Value, &changes, v3.TypeLabel, - BreakingModified(CompSecurityScheme, PropType), CompSecurityScheme, PropType) - - addPropertyCheck(&props, lSS.Description.ValueNode, rSS.Description.ValueNode, - lSS.Description.Value, rSS.Description.Value, &changes, v3.DescriptionLabel, - BreakingModified(CompSecurityScheme, PropDescription), CompSecurityScheme, PropDescription) - - addPropertyCheck(&props, lSS.Name.ValueNode, rSS.Name.ValueNode, - lSS.Name.Value, rSS.Name.Value, &changes, v3.NameLabel, - BreakingModified(CompSecurityScheme, PropName), CompSecurityScheme, PropName) - - addPropertyCheck(&props, lSS.In.ValueNode, rSS.In.ValueNode, - lSS.In.Value, rSS.In.Value, &changes, v3.InLabel, - BreakingModified(CompSecurityScheme, PropIn), CompSecurityScheme, PropIn) - - addPropertyCheck(&props, lSS.Scheme.ValueNode, rSS.Scheme.ValueNode, - lSS.Scheme.Value, rSS.Scheme.Value, &changes, v3.SchemeLabel, - BreakingModified(CompSecurityScheme, PropScheme), CompSecurityScheme, PropScheme) - - addPropertyCheck(&props, lSS.BearerFormat.ValueNode, rSS.BearerFormat.ValueNode, - lSS.BearerFormat.Value, rSS.BearerFormat.Value, &changes, v3.SchemeLabel, - BreakingModified(CompSecurityScheme, PropBearerFormat), CompSecurityScheme, PropBearerFormat) - - addPropertyCheck(&props, lSS.OpenIdConnectUrl.ValueNode, rSS.OpenIdConnectUrl.ValueNode, - lSS.OpenIdConnectUrl.Value, rSS.OpenIdConnectUrl.Value, &changes, v3.OpenIdConnectUrlLabel, - BreakingModified(CompSecurityScheme, PropOpenIDConnectURL), CompSecurityScheme, PropOpenIDConnectURL) - - // OpenAPI 3.2+ fields - addPropertyCheck(&props, lSS.OAuth2MetadataUrl.ValueNode, rSS.OAuth2MetadataUrl.ValueNode, - lSS.OAuth2MetadataUrl.Value, rSS.OAuth2MetadataUrl.Value, &changes, v3.OAuth2MetadataUrlLabel, - BreakingModified(CompSecurityScheme, PropOAuth2MetadataUrl), CompSecurityScheme, PropOAuth2MetadataUrl) - - addPropertyCheck(&props, lSS.Deprecated.ValueNode, rSS.Deprecated.ValueNode, - lSS.Deprecated.Value, rSS.Deprecated.Value, &changes, v3.DeprecatedLabel, - BreakingModified(CompSecurityScheme, PropDeprecated), CompSecurityScheme, PropDeprecated) - - if !lSS.Flows.IsEmpty() && !rSS.Flows.IsEmpty() { - if !low.AreEqual(lSS.Flows.Value, rSS.Flows.Value) { - sc.OAuthFlowChanges = CompareOAuthFlows(lSS.Flows.Value, rSS.Flows.Value) - } - } - if lSS.Flows.IsEmpty() && !rSS.Flows.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.FlowsLabel, - nil, rSS.Flows.ValueNode, BreakingAdded(CompSecurityScheme, PropFlows), nil, rSS.Flows.Value) - } - if !lSS.Flows.IsEmpty() && rSS.Flows.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.FlowsLabel, - lSS.Flows.ValueNode, nil, BreakingRemoved(CompSecurityScheme, PropFlows), lSS.Flows.Value, nil) - } - sc.ExtensionChanges = CompareExtensions(lSS.Extensions, rSS.Extensions) - } - CheckProperties(props) - sc.PropertyChanges = NewPropertyChanges(changes) - return sc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/server.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/server.go deleted file mode 100644 index 86914a061f5..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/server.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// ServerChanges represents changes found between two OpenAPI Server Objects -type ServerChanges struct { - *PropertyChanges - Server *v3.Server - ServerVariableChanges map[string]*ServerVariableChanges `json:"serverVariables,omitempty" yaml:"serverVariables,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between SecurityRequirement objects -func (s *ServerChanges) GetAllChanges() []*Change { - if s == nil { - return nil - } - var changes []*Change - changes = append(changes, s.Changes...) - for k := range s.ServerVariableChanges { - changes = append(changes, s.ServerVariableChanges[k].GetAllChanges()...) - } - if s.ExtensionChanges != nil { - changes = append(changes, s.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns total changes found between two OpenAPI Server Objects -func (s *ServerChanges) TotalChanges() int { - if s == nil { - return 0 - } - c := s.PropertyChanges.TotalChanges() - for k := range s.ServerVariableChanges { - c += s.ServerVariableChanges[k].TotalChanges() - } - if s.ExtensionChanges != nil { - c += s.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the total number of breaking changes found between two OpenAPI Server objects. -func (s *ServerChanges) TotalBreakingChanges() int { - c := s.PropertyChanges.TotalBreakingChanges() - for k := range s.ServerVariableChanges { - c += s.ServerVariableChanges[k].TotalBreakingChanges() - } - return c -} - -// CompareServers compares two OpenAPI Server objects for any changes. If anything is found, returns a pointer -// to a ServerChanges instance, or returns nil if nothing is found. -func CompareServers(l, r *v3.Server) *ServerChanges { - if low.AreEqual(l, r) { - return nil - } - var changes []*Change - props := make([]*PropertyCheck, 0, 3) - - props = append(props, - NewPropertyCheck(CompServer, PropName, - l.Name.ValueNode, r.Name.ValueNode, - v3.NameLabel, &changes, l, r), - NewPropertyCheck(CompServer, PropURL, - l.URL.ValueNode, r.URL.ValueNode, - v3.URLLabel, &changes, l, r), - NewPropertyCheck(CompServer, PropDescription, - l.Description.ValueNode, r.Description.ValueNode, - v3.DescriptionLabel, &changes, l, r), - ) - - CheckProperties(props) - sc := new(ServerChanges) - sc.PropertyChanges = NewPropertyChanges(changes) - sc.ServerVariableChanges = CheckMapForChanges(l.Variables.Value, r.Variables.Value, - &changes, v3.VariablesLabel, CompareServerVariables) - - sc.ExtensionChanges = CompareExtensions(l.Extensions, r.Extensions) - sc.Server = r - return sc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/server_variable.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/server_variable.go deleted file mode 100644 index a7a96518f54..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/server_variable.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// ServerVariableChanges represents changes found between two OpenAPI ServerVariable Objects -type ServerVariableChanges struct { - *PropertyChanges -} - -// GetAllChanges returns a slice of all changes made between SecurityRequirement objects -func (s *ServerVariableChanges) GetAllChanges() []*Change { - if s == nil { - return nil - } - return s.Changes -} - -// CompareServerVariables compares a left and right OpenAPI ServerVariable object for changes. -// If anything is found, returns a pointer to a ServerVariableChanges instance, otherwise returns nil. -func CompareServerVariables(l, r *v3.ServerVariable) *ServerVariableChanges { - if low.AreEqual(l, r) { - return nil - } - - var changes []*Change - - lValues := make(map[string]low.NodeReference[string]) - rValues := make(map[string]low.NodeReference[string]) - for i := range l.Enum { - lValues[l.Enum[i].Value] = l.Enum[i] - } - for i := range r.Enum { - rValues[r.Enum[i].Value] = r.Enum[i] - } - for k := range lValues { - if _, ok := rValues[k]; !ok { - CreateChange(&changes, ObjectRemoved, v3.EnumLabel, - lValues[k].ValueNode, nil, BreakingRemoved(CompServerVariable, PropEnum), - lValues[k].Value, nil) - continue - } - } - for k := range rValues { - if _, ok := lValues[k]; !ok { - CreateChange(&changes, ObjectAdded, v3.EnumLabel, - lValues[k].ValueNode, rValues[k].ValueNode, BreakingAdded(CompServerVariable, PropEnum), - lValues[k].Value, rValues[k].Value) - } - } - - props := make([]*PropertyCheck, 0, 2) - props = append(props, - NewPropertyCheck(CompServerVariable, PropDefault, - l.Default.ValueNode, r.Default.ValueNode, - v3.DefaultLabel, &changes, l, r), - NewPropertyCheck(CompServerVariable, PropDescription, - l.Description.ValueNode, r.Description.ValueNode, - v3.DescriptionLabel, &changes, l, r), - ) - - CheckProperties(props) - sc := new(ServerVariableChanges) - sc.PropertyChanges = NewPropertyChanges(changes) - return sc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/tags.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/tags.go deleted file mode 100644 index d839c4052cb..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/tags.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low" - "github.com/pb33f/libopenapi/datamodel/low/base" - "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// TagChanges represents changes made to the Tags object of an OpenAPI document. -type TagChanges struct { - *PropertyChanges - ExternalDocs *ExternalDocChanges `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between Tag objects -func (t *TagChanges) GetAllChanges() []*Change { - if t == nil { - return nil - } - var changes []*Change - changes = append(changes, t.Changes...) - if t.ExternalDocs != nil { - changes = append(changes, t.ExternalDocs.GetAllChanges()...) - } - if t.ExtensionChanges != nil { - changes = append(changes, t.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns a count of everything that changed within tags. -func (t *TagChanges) TotalChanges() int { - if t == nil { - return 0 - } - c := t.PropertyChanges.TotalChanges() - if t.ExternalDocs != nil { - c += t.ExternalDocs.TotalChanges() - } - if t.ExtensionChanges != nil { - c += t.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the number of breaking changes made by Tags -func (t *TagChanges) TotalBreakingChanges() int { - return t.PropertyChanges.TotalBreakingChanges() -} - -// CompareTags will compare a left (original) and a right (new) slice of ValueReference nodes for -// any changes between them. If there are changes, a pointer to TagChanges is returned, if not then -// nil is returned instead. -func CompareTags(l, r []low.ValueReference[*base.Tag]) []*TagChanges { - var tagResults []*TagChanges - - // look at the original and then look through the new. - seenLeft := make(map[string]*low.ValueReference[*base.Tag]) - seenRight := make(map[string]*low.ValueReference[*base.Tag]) - for i := range l { - h := l[i] - seenLeft[l[i].Value.Name.Value] = &h - } - for i := range r { - h := r[i] - seenRight[r[i].Value.Name.Value] = &h - } - - // var changes []*Change - - // check for removals, modifications and moves - for i := range seenLeft { - tc := new(TagChanges) - var changes []*Change - - CheckForObjectAdditionOrRemoval[*base.Tag](seenLeft, seenRight, i, &changes, BreakingAdded(CompTags, ""), BreakingRemoved(CompTags, "")) - - // if the existing tag exists, let's check it. - if seenRight[i] != nil { - lTag := seenLeft[i].Value - rTag := seenRight[i].Value - props := make([]*PropertyCheck, 0, 5) - - props = append(props, - NewPropertyCheck(CompTag, PropName, - lTag.Name.ValueNode, rTag.Name.ValueNode, - v3.NameLabel, &changes, lTag, rTag), - NewPropertyCheck(CompTag, PropSummary, - lTag.Summary.ValueNode, rTag.Summary.ValueNode, - v3.SummaryLabel, &changes, lTag, rTag), - NewPropertyCheck(CompTag, PropDescription, - lTag.Description.ValueNode, rTag.Description.ValueNode, - v3.DescriptionLabel, &changes, lTag, rTag), - NewPropertyCheck(CompTag, PropParent, - lTag.Parent.ValueNode, rTag.Parent.ValueNode, - v3.ParentLabel, &changes, lTag, rTag), - NewPropertyCheck(CompTag, PropKind, - lTag.Kind.ValueNode, rTag.Kind.ValueNode, - v3.KindLabel, &changes, lTag, rTag), - ) - - // check properties - CheckProperties(props) - - // compare external docs - if !seenLeft[i].Value.ExternalDocs.IsEmpty() && !seenRight[i].Value.ExternalDocs.IsEmpty() { - tc.ExternalDocs = CompareExternalDocs(seenLeft[i].Value.ExternalDocs.Value, - seenRight[i].Value.ExternalDocs.Value) - } - if seenLeft[i].Value.ExternalDocs.IsEmpty() && !seenRight[i].Value.ExternalDocs.IsEmpty() { - CreateChange(&changes, ObjectAdded, v3.ExternalDocsLabel, nil, seenRight[i].GetValueNode(), - BreakingAdded(CompTag, PropExternalDocs), nil, seenRight[i].Value.ExternalDocs.Value) - } - if !seenLeft[i].Value.ExternalDocs.IsEmpty() && seenRight[i].Value.ExternalDocs.IsEmpty() { - CreateChange(&changes, ObjectRemoved, v3.ExternalDocsLabel, seenLeft[i].GetValueNode(), nil, - BreakingRemoved(CompTag, PropExternalDocs), seenLeft[i].Value.ExternalDocs.Value, nil) - } - - // check extensions - tc.ExtensionChanges = CompareExtensions(seenLeft[i].Value.Extensions, seenRight[i].Value.Extensions) - tc.PropertyChanges = NewPropertyChanges(changes) - if tc.TotalChanges() > 0 { - tagResults = append(tagResults, tc) - } - continue - } - - if len(changes) > 0 { - tc.PropertyChanges = NewPropertyChanges(changes) - tagResults = append(tagResults, tc) - } - - } - for i := range seenRight { - if seenLeft[i] == nil { - tc := new(TagChanges) - var changes []*Change - - CreateChange(&changes, ObjectAdded, i, nil, seenRight[i].GetValueNode(), - BreakingAdded(CompTags, ""), nil, seenRight[i].GetValue()) - - tc.PropertyChanges = NewPropertyChanges(changes) - tagResults = append(tagResults, tc) - - } - } - return tagResults -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/model/xml.go b/vendor/github.com/pb33f/libopenapi/what-changed/model/xml.go deleted file mode 100644 index 5b9c71c6e4a..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/model/xml.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -package model - -import ( - "github.com/pb33f/libopenapi/datamodel/low/base" - v3 "github.com/pb33f/libopenapi/datamodel/low/v3" -) - -// XMLChanges represents changes made to the XML object of an OpenAPI document. -type XMLChanges struct { - *PropertyChanges - ExtensionChanges *ExtensionChanges `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// GetAllChanges returns a slice of all changes made between XML objects -func (x *XMLChanges) GetAllChanges() []*Change { - if x == nil { - return nil - } - var changes []*Change - changes = append(changes, x.Changes...) - if x.ExtensionChanges != nil { - changes = append(changes, x.ExtensionChanges.GetAllChanges()...) - } - return changes -} - -// TotalChanges returns a count of everything that was changed within an XML object. -func (x *XMLChanges) TotalChanges() int { - if x == nil { - return 0 - } - c := x.PropertyChanges.TotalChanges() - if x.ExtensionChanges != nil { - c += x.ExtensionChanges.TotalChanges() - } - return c -} - -// TotalBreakingChanges returns the number of breaking changes made by the XML object. -func (x *XMLChanges) TotalBreakingChanges() int { - return x.PropertyChanges.TotalBreakingChanges() -} - -// CompareXML will compare a left (original) and a right (new) XML instance, and check for -// any changes between them. If changes are found, the function returns a pointer to XMLChanges, -// otherwise, if nothing changed - it will return nil -func CompareXML(l, r *base.XML) *XMLChanges { - xc := new(XMLChanges) - var changes []*Change - props := make([]*PropertyCheck, 0, 6) - - props = append(props, - NewPropertyCheck(CompXML, PropName, - l.Name.ValueNode, r.Name.ValueNode, - v3.NameLabel, &changes, l, r), - NewPropertyCheck(CompXML, PropNamespace, - l.Namespace.ValueNode, r.Namespace.ValueNode, - v3.NamespaceLabel, &changes, l, r), - NewPropertyCheck(CompXML, PropPrefix, - l.Prefix.ValueNode, r.Prefix.ValueNode, - v3.PrefixLabel, &changes, l, r), - NewPropertyCheck(CompXML, PropAttribute, - l.Attribute.ValueNode, r.Attribute.ValueNode, - v3.AttributeLabel, &changes, l, r), - NewPropertyCheck(CompXML, PropNodeType, - l.NodeType.ValueNode, r.NodeType.ValueNode, - base.NodeTypeLabel, &changes, l, r), - NewPropertyCheck(CompXML, PropWrapped, - l.Wrapped.ValueNode, r.Wrapped.ValueNode, - v3.WrappedLabel, &changes, l, r), - ) - - CheckProperties(props) - - // check extensions - xc.ExtensionChanges = CheckExtensions(l, r) - xc.PropertyChanges = NewPropertyChanges(changes) - if xc.TotalChanges() <= 0 { - return nil - } - return xc -} diff --git a/vendor/github.com/pb33f/libopenapi/what-changed/what_changed.go b/vendor/github.com/pb33f/libopenapi/what-changed/what_changed.go deleted file mode 100644 index d076a7ca8c9..00000000000 --- a/vendor/github.com/pb33f/libopenapi/what-changed/what_changed.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2022 Princess B33f Heavy Industries / Dave Shanley -// SPDX-License-Identifier: MIT - -// Package what_changed -// -// what changed is a feature that performs an accurate and deep analysis of what has changed between two OpenAPI -// documents. The report generated outlines every single change made between two specifications (left and right) -// rendered in the document hierarchy, so exploring it is the same as exploring the document model. -// -// There are two main functions, one of generating a report for Swagger documents (OpenAPI 2) -// And OpenAPI 3+ documents. -// -// This package uses a combined model for OpenAPI and Swagger changes, it does not break them out into separate -// versions like the datamodel package. The reason for this is to prevent sprawl across versions and to provide -// a single API and model for any application that wants to use this feature. -package what_changed - -import ( - "github.com/pb33f/libopenapi/datamodel/low/v2" - "github.com/pb33f/libopenapi/datamodel/low/v3" - "github.com/pb33f/libopenapi/what-changed/model" -) - -// CompareOpenAPIDocuments will compare left (original) and right (updated) OpenAPI 3+ documents and extract every change -// made across the entire specification. The report outlines every property changed, everything that was added, -// or removed and which of those changes were breaking. -func CompareOpenAPIDocuments(original, updated *v3.Document) *model.DocumentChanges { - return model.CompareDocuments(original, updated) -} - -// CompareSwaggerDocuments will compare left (original) and a right (updated) Swagger documents and extract every change -// made across the entire specification. The report outlines every property changes, everything that was added, -// or removed and which of those changes were breaking. -func CompareSwaggerDocuments(original, updated *v2.Swagger) *model.DocumentChanges { - return model.CompareDocuments(original, updated) -} diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index b390a4a6291..c813cd28378 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -83,6 +83,13 @@ func Load(s string, logger *slog.Logger) (*Config, error) { return nil, err } + // When the config body is empty, UnmarshalYAML is never called, so + // TSDBConfig may still be nil. + if cfg.StorageConfig.TSDBConfig == nil { + retention := DefaultTSDBRetentionConfig + cfg.StorageConfig.TSDBConfig = &TSDBConfig{Retention: &retention} + } + b := labels.NewScratchBuilder(0) cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) { newV := os.Expand(v.Value, func(s string) string { @@ -276,6 +283,9 @@ var ( // For backwards compatibility. LabelNamePreserveMultipleUnderscores: true, } + + // DefaultTSDBRetentionConfig is the default TSDB retention configuration. + DefaultTSDBRetentionConfig TSDBRetentionConfig ) // Config is the top-level configuration for Prometheus's config files. @@ -405,6 +415,13 @@ func (c *Config) UnmarshalYAML(unmarshal func(any) error) error { c.Runtime = DefaultRuntimeConfig } + // If no storage.tsdb section is present, TSDBConfig is nil and its + // UnmarshalYAML never runs. Inject the default retention here. + if c.StorageConfig.TSDBConfig == nil { + retention := DefaultTSDBRetentionConfig + c.StorageConfig.TSDBConfig = &TSDBConfig{Retention: &retention} + } + for _, rf := range c.RuleFiles { if !patRulePath.MatchString(rf) { return fmt.Errorf("invalid rule file path %q", rf) @@ -1094,7 +1111,23 @@ type TSDBRetentionConfig struct { Size units.Base2Bytes `yaml:"size,omitempty"` // Maximum percentage of disk used for TSDB storage. - Percentage uint `yaml:"percentage,omitempty"` + Percentage float64 `yaml:"percentage,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (t *TSDBRetentionConfig) UnmarshalYAML(unmarshal func(any) error) error { + *t = TSDBRetentionConfig{} + type plain TSDBRetentionConfig + if err := unmarshal((*plain)(t)); err != nil { + return err + } + if t.Size < 0 { + return fmt.Errorf("'storage.tsdb.retention.size' must be greater than or equal to 0, got %v", t.Size) + } + if t.Percentage < 0 || t.Percentage > 100 { + return fmt.Errorf("'storage.tsdb.retention.percentage' must be in the range [0, 100], got %v", t.Percentage) + } + return nil } // TSDBConfig configures runtime reloadable configuration options. @@ -1127,6 +1160,11 @@ func (t *TSDBConfig) UnmarshalYAML(unmarshal func(any) error) error { t.OutOfOrderTimeWindow = time.Duration(t.OutOfOrderTimeWindowFlag).Milliseconds() + if t.Retention == nil { + retention := DefaultTSDBRetentionConfig + t.Retention = &retention + } + return nil } diff --git a/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go b/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go index 0a185c29155..f80eb29901a 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go +++ b/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean.go @@ -34,29 +34,58 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" ) +// metaLabelPrefix is the meta prefix used for all meta labels. const ( - doLabel = model.MetaLabelPrefix + "digitalocean_" - doLabelID = doLabel + "droplet_id" - doLabelName = doLabel + "droplet_name" - doLabelImage = doLabel + "image" - doLabelImageName = doLabel + "image_name" - doLabelPrivateIPv4 = doLabel + "private_ipv4" - doLabelPublicIPv4 = doLabel + "public_ipv4" - doLabelPublicIPv6 = doLabel + "public_ipv6" - doLabelRegion = doLabel + "region" - doLabelSize = doLabel + "size" - doLabelStatus = doLabel + "status" - doLabelFeatures = doLabel + "features" - doLabelTags = doLabel + "tags" - doLabelVPC = doLabel + "vpc" - separator = "," + metaLabelPrefix = model.MetaLabelPrefix + "digitalocean_" + separator = "," ) +const ( + doLabelID = metaLabelPrefix + "droplet_id" + doLabelName = metaLabelPrefix + "droplet_name" + doLabelImage = metaLabelPrefix + "image" + doLabelImageName = metaLabelPrefix + "image_name" + doLabelPrivateIPv4 = metaLabelPrefix + "private_ipv4" + doLabelPublicIPv4 = metaLabelPrefix + "public_ipv4" + doLabelPublicIPv6 = metaLabelPrefix + "public_ipv6" + doLabelRegion = metaLabelPrefix + "region" + doLabelSize = metaLabelPrefix + "size" + doLabelStatus = metaLabelPrefix + "status" + doLabelFeatures = metaLabelPrefix + "features" + doLabelTags = metaLabelPrefix + "tags" + doLabelVPC = metaLabelPrefix + "vpc" +) + +// Role is the role of the target within the DigitalOcean ecosystem. +type Role string + +const ( + // DropletsRole discovers targets from DigitalOcean Droplets. + DropletsRole Role = "droplets" + + // DatabasesRole discovers targets from DigitalOcean Managed Databases. + DatabasesRole Role = "databases" +) + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *Role) UnmarshalYAML(unmarshal func(any) error) error { + if err := unmarshal((*string)(c)); err != nil { + return err + } + switch *c { + case DropletsRole, DatabasesRole: + return nil + default: + return fmt.Errorf("unknown DigitalOcean SD role %q", *c) + } +} + // DefaultSDConfig is the default DigitalOcean SD configuration. var DefaultSDConfig = SDConfig{ Port: 80, RefreshInterval: model.Duration(60 * time.Second), HTTPClientConfig: config.DefaultHTTPClientConfig, + Role: DropletsRole, } func init() { @@ -76,6 +105,10 @@ type SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval"` Port int `yaml:"port"` + Role Role `yaml:"role"` + + // Internal field for testing. + HTTPClient *http.Client } // Name returns the name of the Config. @@ -99,58 +132,78 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { if err != nil { return err } - return c.HTTPClientConfig.Validate() -} -// Discovery periodically performs DigitalOcean requests. It implements -// the Discoverer interface. -type Discovery struct { - *refresh.Discovery - client *godo.Client - port int + if c.Role == "" { + return errors.New("role missing (one of: droplets, databases)") + } + + return c.HTTPClientConfig.Validate() } // NewDiscovery returns a new Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, opts discovery.DiscovererOptions) (*Discovery, error) { +func NewDiscovery(conf *SDConfig, opts discovery.DiscovererOptions) (discovery.Discoverer, error) { m, ok := opts.Metrics.(*digitaloceanMetrics) if !ok { return nil, errors.New("invalid discovery metrics type") } - d := &Discovery{ - port: conf.Port, - } - - rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd") + r, err := newRefresher(conf) if err != nil { return nil, err } - d.client, err = godo.New( - &http.Client{ - Transport: rt, - Timeout: time.Duration(conf.RefreshInterval), - }, - godo.SetUserAgent(version.PrometheusUserAgent()), - ) - if err != nil { - return nil, fmt.Errorf("error setting up digital ocean agent: %w", err) - } - - d.Discovery = refresh.NewDiscovery( + return refresh.NewDiscovery( refresh.Options{ Logger: opts.Logger, Mech: "digitalocean", SetName: opts.SetName, Interval: time.Duration(conf.RefreshInterval), - RefreshF: d.refresh, + RefreshF: r.refresh, MetricsInstantiator: m.refreshMetrics, }, + ), nil +} + +type refresher interface { + refresh(context.Context) ([]*targetgroup.Group, error) +} + +func newRefresher(conf *SDConfig) (refresher, error) { + httpClient := conf.HTTPClient + if httpClient == nil { + rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd") + if err != nil { + return nil, err + } + httpClient = &http.Client{ + Transport: rt, + Timeout: time.Duration(conf.RefreshInterval), + } + } + + client, err := godo.New( + httpClient, + godo.SetUserAgent(version.PrometheusUserAgent()), ) - return d, nil + if err != nil { + return nil, fmt.Errorf("error setting up digital ocean agent: %w", err) + } + + switch conf.Role { + case DropletsRole: + return &dropletsDiscovery{client: client, port: conf.Port}, nil + case DatabasesRole: + return &databasesDiscovery{client: client, port: conf.Port}, nil + } + return nil, fmt.Errorf("unknown DigitalOcean SD role %q", conf.Role) +} + +type dropletsDiscovery struct { + client *godo.Client + port int } -func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *dropletsDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { tg := &targetgroup.Group{ Source: "DigitalOcean", } @@ -213,7 +266,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { return []*targetgroup.Group{tg}, nil } -func (d *Discovery) listDroplets(ctx context.Context) ([]godo.Droplet, error) { +func (d *dropletsDiscovery) listDroplets(ctx context.Context) ([]godo.Droplet, error) { var ( droplets []godo.Droplet opts = &godo.ListOptions{} diff --git a/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean_db.go b/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean_db.go new file mode 100644 index 00000000000..9be5b65fb1d --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/discovery/digitalocean/digitalocean_db.go @@ -0,0 +1,124 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digitalocean + +import ( + "context" + "fmt" + "net" + "strconv" + + "github.com/digitalocean/godo" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const ( + dbLabelID = metaLabelPrefix + "db_id" + dbLabelName = metaLabelPrefix + "db_name" + dbLabelEngine = metaLabelPrefix + "db_engine" + dbLabelVersion = metaLabelPrefix + "db_version" + dbLabelStatus = metaLabelPrefix + "db_status" + dbLabelRegion = metaLabelPrefix + "db_region" + dbLabelSize = metaLabelPrefix + "db_size" + dbLabelNumNodes = metaLabelPrefix + "db_num_nodes" + dbLabelHost = metaLabelPrefix + "db_host" + dbLabelPrivateHost = metaLabelPrefix + "db_private_host" + dbLabelTagPrefix = metaLabelPrefix + "db_tag_" +) + +type databasesDiscovery struct { + client *godo.Client + port int +} + +func (d *databasesDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { + tg := &targetgroup.Group{ + Source: "DigitalOcean Databases", + } + + clusters, err := d.listClusters(ctx) + if err != nil { + return nil, err + } + for _, cluster := range clusters { + labels := model.LabelSet{ + dbLabelID: model.LabelValue(cluster.ID), + dbLabelName: model.LabelValue(cluster.Name), + dbLabelEngine: model.LabelValue(cluster.EngineSlug), + dbLabelVersion: model.LabelValue(cluster.VersionSlug), + dbLabelStatus: model.LabelValue(cluster.Status), + dbLabelRegion: model.LabelValue(cluster.RegionSlug), + dbLabelSize: model.LabelValue(cluster.SizeSlug), + dbLabelNumNodes: model.LabelValue(strconv.Itoa(cluster.NumNodes)), + } + + host := "" + if cluster.PrivateConnection != nil { + host = cluster.PrivateConnection.Host + labels[dbLabelPrivateHost] = model.LabelValue(host) + } + + if cluster.Connection != nil { + labels[dbLabelHost] = model.LabelValue(cluster.Connection.Host) + if host == "" { + host = cluster.Connection.Host + } + } + + if host != "" { + addr := net.JoinHostPort(host, strconv.FormatUint(uint64(d.port), 10)) + labels[model.AddressLabel] = model.LabelValue(addr) + } + + for _, tag := range cluster.Tags { + labels[dbLabelTagPrefix+model.LabelName(tag)] = "true" + } + + tg.Targets = append(tg.Targets, labels) + } + return []*targetgroup.Group{tg}, nil +} + +func (d *databasesDiscovery) listClusters(ctx context.Context) ([]godo.Database, error) { + var ( + clusters []godo.Database + opts = &godo.ListOptions{ + Page: 1, + PerPage: 100, + } + ) + for { + paginatedClusters, resp, err := d.client.Databases.List(ctx, opts) + if err != nil { + return nil, fmt.Errorf("error while listing database clusters page %d: %w", opts.Page, err) + } + if len(paginatedClusters) == 0 { + break + } + clusters = append(clusters, paginatedClusters...) + + if resp.Links != nil && !resp.Links.IsLastPage() { + page, err := resp.Links.CurrentPage() + if err == nil { + opts.Page = page + 1 + continue + } + } + + opts.Page++ + } + return clusters, nil +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/discovery.go b/vendor/github.com/prometheus/prometheus/discovery/discovery.go index c4f8c8d458b..5ad51a394ce 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/discovery.go +++ b/vendor/github.com/prometheus/prometheus/discovery/discovery.go @@ -83,6 +83,13 @@ type RefreshMetricsInstantiator interface { type RefreshMetricsManager interface { DiscovererMetrics RefreshMetricsInstantiator + DeleteLabelValues(mech, config string) +} + +// SDMetrics holds all metrics for service discovery mechanisms. +type SDMetrics struct { + MechanismMetrics map[string]DiscovererMetrics + RefreshManager RefreshMetricsManager } // A Config provides the configuration and constructor for a Discoverer. diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go index cbc69dd0ca6..08e2c604cf8 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go @@ -20,6 +20,7 @@ import ( "log/slog" "net" "strconv" + "strings" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -160,6 +161,7 @@ func nodeSourceFromName(name string) string { const ( nodeProviderIDLabel = metaLabelPrefix + "node_provider_id" + nodeConditionPrefix = metaLabelPrefix + "node_condition_" nodeAddressPrefix = metaLabelPrefix + "node_address_" ) @@ -169,6 +171,13 @@ func nodeLabels(n *apiv1.Node) model.LabelSet { ls[nodeProviderIDLabel] = lv(n.Spec.ProviderID) + // Export all node conditions as individual meta labels + for _, condition := range n.Status.Conditions { + conditionType := strings.ToLower(string(condition.Type)) + labelName := nodeConditionPrefix + strutil.SanitizeLabelName(conditionType) + ls[model.LabelName(labelName)] = lv(strings.ToLower(string(condition.Status))) + } + addObjectMetaLabels(ls, n.ObjectMeta, RoleNode) return ls diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index 9994e0bf746..fa52e164f72 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -22,6 +22,7 @@ import ( "sync" "time" + "github.com/cenkalti/backoff/v5" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/promslog" @@ -70,32 +71,38 @@ func (p *Provider) Config() any { // CreateAndRegisterSDMetrics registers the metrics needed for SD mechanisms. // Does not register the metrics for the Discovery Manager. -// TODO(ptodev): Add ability to unregister the metrics? -func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) { +func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (*SDMetrics, error) { // Some SD mechanisms use the "refresh" package, which has its own metrics. refreshSdMetrics := NewRefreshMetrics(reg) // Register the metrics specific for each SD mechanism, and the ones for the refresh package. - sdMetrics, err := RegisterSDMetrics(reg, refreshSdMetrics) + mechanismMetrics, err := RegisterSDMetrics(reg, refreshSdMetrics) if err != nil { return nil, fmt.Errorf("failed to register service discovery metrics: %w", err) } - return sdMetrics, nil + return &SDMetrics{ + MechanismMetrics: mechanismMetrics, + RefreshManager: refreshSdMetrics, + }, nil } // NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { +func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.Registerer, sdMetrics *SDMetrics, options ...func(*Manager)) *Manager { if logger == nil { logger = promslog.NewNopLogger() } + if sdMetrics == nil || sdMetrics.RefreshManager == nil { + logger.Error("Failed to create discovery manager: sdMetrics.RefreshManager must be set") + return nil + } mgr := &Manager{ logger: logger, syncCh: make(chan map[string][]*targetgroup.Group), targets: make(map[poolKey]map[string]*targetgroup.Group), ctx: ctx, updatert: 5 * time.Second, - triggerSend: make(chan struct{}, 1), + triggerSend: make(chan struct{}, 1), // At least one element to ensure we can do a delayed read. registerer: registerer, sdMetrics: sdMetrics, } @@ -191,7 +198,7 @@ type Manager struct { registerer prometheus.Registerer metrics *Metrics - sdMetrics map[string]DiscovererMetrics + sdMetrics *SDMetrics // featureRegistry is used to track which service discovery providers are configured. featureRegistry features.Collector @@ -251,6 +258,19 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { prov.cancel() prov.mu.RUnlock() + + // Clear up refresh metrics associated with this cancelled provider (sub means scrape job name). + m.targetsMtx.Lock() + for s := range prov.subs { + // Also clean up discovered targets metric. targetsMtx lock needed for safe access to m.targets. + delete(m.targets, poolKey{s, prov.name}) + m.metrics.DiscoveredTargets.DeleteLabelValues(s) + + if cfg, ok := prov.config.(Config); ok { + m.sdMetrics.RefreshManager.DeleteLabelValues(cfg.Name(), s) + } + } + m.targetsMtx.Unlock() continue } prov.mu.RUnlock() @@ -266,7 +286,13 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { // Remove obsolete subs' targets. if _, ok := prov.newSubs[s]; !ok { delete(m.targets, poolKey{s, prov.name}) - m.metrics.DiscoveredTargets.DeleteLabelValues(m.name, s) + m.metrics.DiscoveredTargets.DeleteLabelValues(s) + + // Also clean up refresh metrics for subs that are being removed from a provider that is still running. + cfg, ok := prov.config.(Config) + if ok { + m.sdMetrics.RefreshManager.DeleteLabelValues(cfg.Name(), s) + } } } // Set metrics and targets for new subs. @@ -383,24 +409,34 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ } func (m *Manager) sender() { - ticker := time.NewTicker(m.updatert) defer func() { - ticker.Stop() close(m.syncCh) }() + // Some discoverers send updates too often, so we throttle these with a backoff interval that + // increases the interval up to m.updatert delay. + lastSent := time.Now().Add(-1 * m.updatert) + b := &backoff.ExponentialBackOff{ + InitialInterval: 100 * time.Millisecond, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: m.updatert, + } + for { select { case <-m.ctx.Done(): return - case <-ticker.C: // Some discoverers send updates too often, so we throttle these with the ticker. + case <-time.After(b.NextBackOff()): select { case <-m.triggerSend: m.metrics.SentUpdates.Inc() select { case m.syncCh <- m.allGroups(): + lastSent = time.Now() default: m.metrics.DelayedUpdates.Inc() m.logger.Debug("Discovery receiver's channel was full so will retry the next cycle") + // Ensure we don't miss this update. select { case m.triggerSend <- struct{}{}: default: @@ -408,6 +444,9 @@ func (m *Manager) sender() { } default: } + if time.Since(lastSent) > m.updatert { + b.Reset() // Nothing happened for a while, start again from low interval for prompt updates. + } } } } @@ -499,7 +538,7 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { d, err := cfg.NewDiscoverer(DiscovererOptions{ Logger: m.logger.With("discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, - Metrics: m.sdMetrics[typ], + Metrics: m.sdMetrics.MechanismMetrics[typ], SetName: setName, }) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/discovery/metrics_refresh.go b/vendor/github.com/prometheus/prometheus/discovery/metrics_refresh.go index 11092d9f96a..bb334050cd0 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/metrics_refresh.go +++ b/vendor/github.com/prometheus/prometheus/discovery/metrics_refresh.go @@ -88,3 +88,9 @@ func (m *RefreshMetricsVecs) Register() error { func (m *RefreshMetricsVecs) Unregister() { m.metricRegisterer.UnregisterMetrics() } + +// DeleteLabelValues deletes refresh metrics for a specific mechanism and config. Smart to use this when a scrape job is removed. +func (m *RefreshMetricsVecs) DeleteLabelValues(mech, config string) { + m.failuresVec.DeleteLabelValues(mech, config) + m.durationVec.DeleteLabelValues(mech, config) +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/moby/docker.go b/vendor/github.com/prometheus/prometheus/discovery/moby/docker.go index aa1cd2eb428..46ed842fa9d 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/moby/docker.go +++ b/vendor/github.com/prometheus/prometheus/discovery/moby/docker.go @@ -24,10 +24,9 @@ import ( "strconv" "time" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/client" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/network" + "github.com/moby/moby/client" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -123,7 +122,7 @@ type DockerDiscovery struct { client *client.Client port int hostNetworkingHost string - filters filters.Args + filters client.Filters matchFirstNetwork bool } @@ -147,10 +146,9 @@ func NewDockerDiscovery(conf *DockerSDConfig, opts discovery.DiscovererOptions) clientOpts := []client.Opt{ client.WithHost(conf.Host), - client.WithAPIVersionNegotiation(), } - d.filters = filters.NewArgs() + d.filters = make(client.Filters) for _, f := range conf.Filters { for _, v := range f.Values { d.filters.Add(f.Name, v) @@ -177,7 +175,7 @@ func NewDockerDiscovery(conf *DockerSDConfig, opts discovery.DiscovererOptions) ) } - d.client, err = client.NewClientWithOpts(clientOpts...) + d.client, err = client.New(clientOpts...) if err != nil { return nil, fmt.Errorf("error setting up docker client: %w", err) } @@ -200,7 +198,7 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er Source: "Docker", } - containers, err := d.client.ContainerList(ctx, container.ListOptions{Filters: d.filters}) + containers, err := d.client.ContainerList(ctx, client.ContainerListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing containers: %w", err) } @@ -211,11 +209,11 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er } allContainers := make(map[string]container.Summary) - for _, c := range containers { + for _, c := range containers.Items { allContainers[c.ID] = c } - for _, c := range containers { + for _, c := range containers.Items { if len(c.Names) == 0 { continue } @@ -276,14 +274,23 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er continue } + ipAddr := "" + if n.IPAddress.IsValid() { + ipAddr = n.IPAddress.String() + } + labels := model.LabelSet{ - dockerLabelNetworkIP: model.LabelValue(n.IPAddress), + dockerLabelNetworkIP: model.LabelValue(ipAddr), dockerLabelPortPrivate: model.LabelValue(strconv.FormatUint(uint64(p.PrivatePort), 10)), } if p.PublicPort > 0 { labels[dockerLabelPortPublic] = model.LabelValue(strconv.FormatUint(uint64(p.PublicPort), 10)) - labels[dockerLabelPortPublicIP] = model.LabelValue(p.IP) + publicIP := "" + if p.IP.IsValid() { + publicIP = p.IP.String() + } + labels[dockerLabelPortPublicIP] = model.LabelValue(publicIP) } for k, v := range commonLabels { @@ -294,7 +301,7 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er labels[model.LabelName(k)] = model.LabelValue(v) } - addr := net.JoinHostPort(n.IPAddress, strconv.FormatUint(uint64(p.PrivatePort), 10)) + addr := net.JoinHostPort(ipAddr, strconv.FormatUint(uint64(p.PrivatePort), 10)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) added = true @@ -302,8 +309,13 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er if !added { // Use fallback port when no exposed ports are available or if all are non-TCP + ipAddr := "" + if n.IPAddress.IsValid() { + ipAddr = n.IPAddress.String() + } + labels := model.LabelSet{ - dockerLabelNetworkIP: model.LabelValue(n.IPAddress), + dockerLabelNetworkIP: model.LabelValue(ipAddr), } for k, v := range commonLabels { @@ -318,7 +330,7 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er // so they only end up here, not in the previous loop. var addr string if c.HostConfig.NetworkMode != "host" { - addr = net.JoinHostPort(n.IPAddress, strconv.FormatUint(uint64(d.port), 10)) + addr = net.JoinHostPort(ipAddr, strconv.FormatUint(uint64(d.port), 10)) } else { addr = d.hostNetworkingHost } diff --git a/vendor/github.com/prometheus/prometheus/discovery/moby/dockerswarm.go b/vendor/github.com/prometheus/prometheus/discovery/moby/dockerswarm.go index 5cb12279d89..3605bc4900d 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/moby/dockerswarm.go +++ b/vendor/github.com/prometheus/prometheus/discovery/moby/dockerswarm.go @@ -21,8 +21,7 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/client" + "github.com/moby/moby/client" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -119,7 +118,7 @@ type Discovery struct { client *client.Client role string port int - filters filters.Args + filters client.Filters } // NewDiscovery returns a new Discovery which periodically refreshes its targets. @@ -141,10 +140,9 @@ func NewDiscovery(conf *DockerSwarmSDConfig, opts discovery.DiscovererOptions) ( clientOpts := []client.Opt{ client.WithHost(conf.Host), - client.WithAPIVersionNegotiation(), } - d.filters = filters.NewArgs() + d.filters = make(client.Filters) for _, f := range conf.Filters { for _, v := range f.Values { d.filters.Add(f.Name, v) @@ -171,7 +169,7 @@ func NewDiscovery(conf *DockerSwarmSDConfig, opts discovery.DiscovererOptions) ( ) } - d.client, err = client.NewClientWithOpts(clientOpts...) + d.client, err = client.New(clientOpts...) if err != nil { return nil, fmt.Errorf("error setting up docker swarm client: %w", err) } diff --git a/vendor/github.com/prometheus/prometheus/discovery/moby/network.go b/vendor/github.com/prometheus/prometheus/discovery/moby/network.go index 02db2b8a12b..33a83b1eef1 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/moby/network.go +++ b/vendor/github.com/prometheus/prometheus/discovery/moby/network.go @@ -17,8 +17,7 @@ import ( "context" "strconv" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/client" + "github.com/moby/moby/client" "github.com/prometheus/prometheus/util/strutil" ) @@ -33,13 +32,13 @@ const ( labelNetworkLabelPrefix = labelNetworkPrefix + "label_" ) -func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix string) (map[string]map[string]string, error) { - networks, err := client.NetworkList(ctx, network.ListOptions{}) +func getNetworksLabels(ctx context.Context, c *client.Client, labelPrefix string) (map[string]map[string]string, error) { + networks, err := c.NetworkList(ctx, client.NetworkListOptions{}) if err != nil { return nil, err } - labels := make(map[string]map[string]string, len(networks)) - for _, network := range networks { + labels := make(map[string]map[string]string, len(networks.Items)) + for _, network := range networks.Items { labels[network.ID] = map[string]string{ labelPrefix + labelNetworkID: network.ID, labelPrefix + labelNetworkName: network.Name, diff --git a/vendor/github.com/prometheus/prometheus/discovery/moby/nodes.go b/vendor/github.com/prometheus/prometheus/discovery/moby/nodes.go index 76e090c8032..9001e88e026 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/moby/nodes.go +++ b/vendor/github.com/prometheus/prometheus/discovery/moby/nodes.go @@ -19,7 +19,7 @@ import ( "net" "strconv" - "github.com/docker/docker/api/types/swarm" + "github.com/moby/moby/client" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -48,12 +48,12 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err Source: "DockerSwarm", } - nodes, err := d.client.NodeList(ctx, swarm.NodeListOptions{Filters: d.filters}) + nodes, err := d.client.NodeList(ctx, client.NodeListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing swarm nodes: %w", err) } - for _, n := range nodes { + for _, n := range nodes.Items { labels := model.LabelSet{ swarmLabelNodeID: model.LabelValue(n.ID), swarmLabelNodeRole: model.LabelValue(n.Spec.Role), @@ -85,12 +85,12 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err } func (d *Discovery) getNodesLabels(ctx context.Context) (map[string]map[string]string, error) { - nodes, err := d.client.NodeList(ctx, swarm.NodeListOptions{}) + nodes, err := d.client.NodeList(ctx, client.NodeListOptions{}) if err != nil { return nil, fmt.Errorf("error while listing swarm nodes: %w", err) } - labels := make(map[string]map[string]string, len(nodes)) - for _, n := range nodes { + labels := make(map[string]map[string]string, len(nodes.Items)) + for _, n := range nodes.Items { labels[n.ID] = map[string]string{ swarmLabelNodeID: n.ID, swarmLabelNodeRole: string(n.Spec.Role), diff --git a/vendor/github.com/prometheus/prometheus/discovery/moby/services.go b/vendor/github.com/prometheus/prometheus/discovery/moby/services.go index 558d544e256..3254ffd1726 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/moby/services.go +++ b/vendor/github.com/prometheus/prometheus/discovery/moby/services.go @@ -19,7 +19,9 @@ import ( "net" "strconv" - "github.com/docker/docker/api/types/swarm" + "github.com/moby/moby/api/types/network" + "github.com/moby/moby/api/types/swarm" + "github.com/moby/moby/client" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -45,7 +47,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group, Source: "DockerSwarm", } - services, err := d.client.ServiceList(ctx, swarm.ServiceListOptions{Filters: d.filters}) + services, err := d.client.ServiceList(ctx, client.ServiceListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing swarm services: %w", err) } @@ -55,7 +57,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group, return nil, fmt.Errorf("error while computing swarm network labels: %w", err) } - for _, s := range services { + for _, s := range services.Items { commonLabels := map[string]string{ swarmLabelServiceID: s.ID, swarmLabelServiceName: s.Spec.Name, @@ -75,13 +77,13 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group, for _, p := range s.Endpoint.VirtualIPs { var added bool - ip, _, err := net.ParseCIDR(p.Addr) + ip, _, err := net.ParseCIDR(p.Addr.String()) if err != nil { return nil, fmt.Errorf("error while parsing address %s: %w", p.Addr, err) } for _, e := range s.Endpoint.Ports { - if e.Protocol != swarm.PortConfigProtocolTCP { + if e.Protocol != network.TCP { continue } labels := model.LabelSet{ @@ -126,13 +128,13 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group, } func (d *Discovery) getServicesLabelsAndPorts(ctx context.Context) (map[string]map[string]string, map[string][]swarm.PortConfig, error) { - services, err := d.client.ServiceList(ctx, swarm.ServiceListOptions{}) + services, err := d.client.ServiceList(ctx, client.ServiceListOptions{}) if err != nil { return nil, nil, err } - servicesLabels := make(map[string]map[string]string, len(services)) - servicesPorts := make(map[string][]swarm.PortConfig, len(services)) - for _, s := range services { + servicesLabels := make(map[string]map[string]string, len(services.Items)) + servicesPorts := make(map[string][]swarm.PortConfig, len(services.Items)) + for _, s := range services.Items { servicesLabels[s.ID] = map[string]string{ swarmLabelServiceID: s.ID, swarmLabelServiceName: s.Spec.Name, diff --git a/vendor/github.com/prometheus/prometheus/discovery/moby/tasks.go b/vendor/github.com/prometheus/prometheus/discovery/moby/tasks.go index d4e3678ee53..ec247d1d302 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/moby/tasks.go +++ b/vendor/github.com/prometheus/prometheus/discovery/moby/tasks.go @@ -20,7 +20,8 @@ import ( "net" "strconv" - "github.com/docker/docker/api/types/swarm" + mobynetwork "github.com/moby/moby/api/types/network" + "github.com/moby/moby/client" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -43,7 +44,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err Source: "DockerSwarm", } - tasks, err := d.client.TaskList(ctx, swarm.TaskListOptions{Filters: d.filters}) + tasks, err := d.client.TaskList(ctx, client.TaskListOptions{Filters: d.filters}) if err != nil { return nil, fmt.Errorf("error while listing swarm services: %w", err) } @@ -63,7 +64,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err return nil, fmt.Errorf("error while computing swarm network labels: %w", err) } - for _, s := range tasks { + for _, s := range tasks.Items { commonLabels := map[string]string{ swarmLabelTaskID: s.ID, swarmLabelTaskDesiredState: string(s.DesiredState), @@ -87,7 +88,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err maps.Copy(commonLabels, nodeLabels[s.NodeID]) for _, p := range s.Status.PortStatus.Ports { - if p.Protocol != swarm.PortConfigProtocolTCP { + if p.Protocol != mobynetwork.TCP { continue } @@ -108,13 +109,13 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err for _, address := range network.Addresses { var added bool - ip, _, err := net.ParseCIDR(address) + ip, _, err := net.ParseCIDR(address.String()) if err != nil { return nil, fmt.Errorf("error while parsing address %s: %w", address, err) } for _, p := range servicePorts[s.ServiceID] { - if p.Protocol != swarm.PortConfigProtocolTCP { + if p.Protocol != mobynetwork.TCP { continue } labels := model.LabelSet{ diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go index 9ec9e9cd4b6..287d914d649 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go @@ -332,10 +332,18 @@ func compactBuckets[IBC InternalBucketCount]( spans = spans[:iSpan] iSpan = 0 + // If all spans were zero-length, no buckets remain valid. + if len(spans) == 0 { + if compensationBuckets != nil { + compensationBuckets = compensationBuckets[:0] + } + return primaryBuckets[:0], compensationBuckets, spans + } + // Cut out empty buckets from start and end of spans, no matter // what. Also cut out empty buckets from the middle of a span but only // if there are more than maxEmptyBuckets consecutive empty buckets. - for iBucket < len(primaryBuckets) { + for iBucket < len(primaryBuckets) && iSpan < len(spans) { if deltaBuckets { currentBucketAbsolute += primaryBuckets[iBucket] } else { diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index d284a14c40e..68d34aec2fc 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "strings" "time" @@ -339,7 +340,13 @@ func testTemplateParsing(rl *Rule) (errs []error) { } // Parse parses and validates a set of rules. -func Parse(content []byte, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme, p parser.Parser) (*RuleGroups, []error) { +func Parse( + content []byte, + ignoreUnknownFields bool, + nameValidationScheme model.ValidationScheme, + p parser.Parser, + logger *slog.Logger, +) (*RuleGroups, []error) { var ( groups RuleGroups node ruleGroups @@ -355,6 +362,13 @@ func Parse(content []byte, ignoreUnknownFields bool, nameValidationScheme model. if err != nil && !errors.Is(err, io.EOF) { errs = append(errs, err) } + // Check for a second document. + var secondDoc any + err = decoder.Decode(&secondDoc) + if !errors.Is(err, io.EOF) { + logger.Warn("Multiple document yaml rules files are not supported, only the first document is processed") + } + err = yaml.Unmarshal(content, &node) if err != nil { errs = append(errs, err) @@ -368,12 +382,18 @@ func Parse(content []byte, ignoreUnknownFields bool, nameValidationScheme model. } // ParseFile reads and parses rules from a file. -func ParseFile(file string, ignoreUnknownFields bool, nameValidationScheme model.ValidationScheme, p parser.Parser) (*RuleGroups, []error) { +func ParseFile( + file string, + ignoreUnknownFields bool, + nameValidationScheme model.ValidationScheme, + p parser.Parser, + logger *slog.Logger, +) (*RuleGroups, []error) { b, err := os.ReadFile(file) if err != nil { return nil, []error{fmt.Errorf("%s: %w", file, err)} } - rgs, errs := Parse(b, ignoreUnknownFields, nameValidationScheme, p) + rgs, errs := Parse(b, ignoreUnknownFields, nameValidationScheme, p, logger) for i := range errs { errs[i] = fmt.Errorf("%s: %w", file, errs[i]) } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go index 08d9a080a70..d2a3c94e2f6 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go @@ -76,9 +76,9 @@ type Parser interface { // retrieved (including the case where no exemplars exist at all). Exemplar(l *exemplar.Exemplar) bool - // StartTimestamp returns the created timestamp (in milliseconds) for the + // StartTimestamp returns the start timestamp (in milliseconds) for the // current sample. It returns 0 if it is unknown e.g. if it wasn't set or - // if the scrape protocol or metric type does not support created timestamps. + // if the scrape protocol or metric type does not support start timestamps. StartTimestamp() int64 // Next advances the parser to the next sample. diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index 2f6671eb621..9678d6d152a 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -102,7 +102,7 @@ type OpenMetricsParser struct { exemplarTs int64 hasExemplarTs bool - // Created timestamp parsing state. + // Start timestamp parsing state. st int64 stHashSet uint64 // ignoreExemplar instructs the parser to not overwrite exemplars (to keep them while peeking ahead). @@ -122,11 +122,11 @@ type openMetricsParserOptions struct { type OpenMetricsOption func(*openMetricsParserOptions) // WithOMParserSTSeriesSkipped turns off exposing _created lines -// as series, which makes those only used for parsing created timestamp +// as series, which makes those only used for parsing start timestamp // for `StartTimestamp` method purposes. // // It's recommended to use this option to avoid using _created lines for other -// purposes than created timestamp, but leave false by default for the +// purposes than start timestamp, but leave false by default for the // best-effort compatibility. func WithOMParserSTSeriesSkipped() OpenMetricsOption { return func(o *openMetricsParserOptions) { @@ -285,7 +285,7 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { return true } -// StartTimestamp returns the created timestamp for a current Metric if exists or nil. +// StartTimestamp returns the start timestamp for a current Metric if exists or nil. // NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series. func (p *OpenMetricsParser) StartTimestamp() int64 { if !typeRequiresST(p.mtype) { diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go index 637ae7b7475..f0537b212f0 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -404,11 +404,11 @@ func (p *ProtobufParser) StartTimestamp() int64 { var st *types.Timestamp switch p.dec.GetType() { case dto.MetricType_COUNTER: - st = p.dec.GetCounter().GetCreatedTimestamp() + st = p.dec.GetCounter().GetStartTimestamp() case dto.MetricType_SUMMARY: - st = p.dec.GetSummary().GetCreatedTimestamp() + st = p.dec.GetSummary().GetStartTimestamp() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: - st = p.dec.GetHistogram().GetCreatedTimestamp() + st = p.dec.GetHistogram().GetStartTimestamp() default: } if st == nil { @@ -484,6 +484,9 @@ func (p *ProtobufParser) Next() (Entry, error) { p.fieldPos = -3 // We have not returned anything, let p.Next() increment it to -2. return p.Next() } + if err := checkNativeHistogramConsistency(p.dec.GetHistogram()); err != nil { + return EntryInvalid, fmt.Errorf("histogram %q: %w", p.dec.GetName(), err) + } p.state = EntryHistogram } else { p.state = EntrySeries @@ -527,6 +530,9 @@ func (p *ProtobufParser) Next() (Entry, error) { // it means we might need to do NHCB conversion. if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { if !isClassicHistogram { + if err := checkNativeHistogramConsistency(p.dec.GetHistogram()); err != nil { + return EntryInvalid, fmt.Errorf("histogram %q: %w", p.dec.GetName(), err) + } p.state = EntryHistogram } else if p.convertClassicHistogramsToNHCB { // We still need to spit out the NHCB. @@ -674,6 +680,10 @@ func (p *ProtobufParser) getMagicLabel() (bool, string, string) { switch p.dec.GetType() { case dto.MetricType_SUMMARY: qq := p.dec.GetSummary().GetQuantile() + if p.fieldPos >= len(qq) { + p.fieldsDone = true + return false, "", "" + } q := qq[p.fieldPos] p.fieldsDone = p.fieldPos == len(qq)-1 return true, model.QuantileLabel, labels.FormatOpenMetricsFloat(q.GetQuantile()) @@ -744,3 +754,36 @@ func (p *ProtobufParser) convertToNHCB(t dto.MetricType) (*histogram.Histogram, } return ch, cfh, nil } + +// checkNativeHistogramConsistency returns an error if the span bucket counts +// do not match the number of bucket values in a native histogram protobuf +// message. It catches malformed input before it reaches compactBuckets, where +// a mismatch would cause a panic. +func checkNativeHistogramConsistency(h *dto.Histogram) error { + isFloat := h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 + var positiveBuckets, negativeBuckets int + if isFloat { + positiveBuckets = len(h.GetPositiveCount()) + negativeBuckets = len(h.GetNegativeCount()) + } else { + positiveBuckets = len(h.GetPositiveDelta()) + negativeBuckets = len(h.GetNegativeDelta()) + } + if err := checkProtoSpanBucketConsistency("positive", h.GetPositiveSpan(), positiveBuckets); err != nil { + return err + } + return checkProtoSpanBucketConsistency("negative", h.GetNegativeSpan(), negativeBuckets) +} + +// checkProtoSpanBucketConsistency returns an error when the total length +// described by spans does not match numBuckets. +func checkProtoSpanBucketConsistency(side string, spans []dto.BucketSpan, numBuckets int) error { + var total int + for _, s := range spans { + total += int(s.GetLength()) + } + if total != numBuckets { + return fmt.Errorf("%s side: spans require %d buckets, have %d", side, total, numBuckets) + } + return nil +} diff --git a/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go b/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go index 0f27314e57d..27b5bbc4640 100644 --- a/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go +++ b/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go @@ -20,12 +20,12 @@ import ( // FromTime returns a new millisecond timestamp from a time. func FromTime(t time.Time) int64 { - return t.Unix()*1000 + int64(t.Nanosecond())/int64(time.Millisecond) + return t.UnixMilli() } -// Time returns a new time.Time object from a millisecond timestamp. +// Time returns a new time.Time object from a millisecond timestamp, in UTC. func Time(ts int64) time.Time { - return time.Unix(ts/1000, (ts%1000)*int64(time.Millisecond)).UTC() + return time.UnixMilli(ts).UTC() } // FromFloatSeconds returns a millisecond timestamp from float seconds. diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go index de7184c4b5d..9d1586dd2ee 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/decoder.go @@ -114,7 +114,7 @@ func (m *MetricStreamingDecoder) resetMetric() { // TODO(bwplotka): Autogenerate reset functions. if m.Counter != nil { m.Counter.Value = 0 - m.Counter.CreatedTimestamp = nil + m.Counter.StartTimestamp = nil m.Counter.Exemplar = nil } if m.Gauge != nil { @@ -125,7 +125,7 @@ func (m *MetricStreamingDecoder) resetMetric() { m.Histogram.SampleCountFloat = 0 m.Histogram.SampleSum = 0 m.Histogram.Bucket = m.Histogram.Bucket[:0] - m.Histogram.CreatedTimestamp = nil + m.Histogram.StartTimestamp = nil m.Histogram.Schema = 0 m.Histogram.ZeroThreshold = 0 m.Histogram.ZeroCount = 0 @@ -142,7 +142,7 @@ func (m *MetricStreamingDecoder) resetMetric() { m.Summary.SampleCount = 0 m.Summary.SampleSum = 0 m.Summary.Quantile = m.Summary.Quantile[:0] - m.Summary.CreatedTimestamp = nil + m.Summary.StartTimestamp = nil } } diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go index 17cab6081e2..5b47756c626 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go @@ -174,7 +174,7 @@ func (m *Gauge) GetValue() float64 { type Counter struct { Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` - CreatedTimestamp *types.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + StartTimestamp *types.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -227,9 +227,9 @@ func (m *Counter) GetExemplar() *Exemplar { return nil } -func (m *Counter) GetCreatedTimestamp() *types.Timestamp { +func (m *Counter) GetStartTimestamp() *types.Timestamp { if m != nil { - return m.CreatedTimestamp + return m.StartTimestamp } return nil } @@ -293,7 +293,7 @@ type Summary struct { SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"` - CreatedTimestamp *types.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + StartTimestamp *types.Timestamp `protobuf:"bytes,4,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -353,9 +353,9 @@ func (m *Summary) GetQuantile() []Quantile { return nil } -func (m *Summary) GetCreatedTimestamp() *types.Timestamp { +func (m *Summary) GetStartTimestamp() *types.Timestamp { if m != nil { - return m.CreatedTimestamp + return m.StartTimestamp } return nil } @@ -412,8 +412,8 @@ type Histogram struct { SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"` SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` // Buckets for the classic histogram. - Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"` - CreatedTimestamp *types.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"` + StartTimestamp *types.Timestamp `protobuf:"bytes,15,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. @@ -508,9 +508,9 @@ func (m *Histogram) GetBucket() []Bucket { return nil } -func (m *Histogram) GetCreatedTimestamp() *types.Timestamp { +func (m *Histogram) GetStartTimestamp() *types.Timestamp { if m != nil { - return m.CreatedTimestamp + return m.StartTimestamp } return nil } @@ -983,68 +983,68 @@ func init() { var fileDescriptor_d1e5ddb18987a258 = []byte{ // 982 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x8f, 0xdb, 0x44, - 0x18, 0xae, 0x9b, 0x4f, 0xbf, 0xd9, 0x6c, 0xbd, 0x43, 0x54, 0x59, 0x0b, 0xbb, 0x09, 0x96, 0x90, - 0x16, 0x84, 0x12, 0x01, 0x45, 0xa0, 0xb2, 0x48, 0xec, 0xb6, 0xdb, 0x14, 0x95, 0xb4, 0x65, 0x92, - 0x1c, 0xca, 0xc5, 0x9a, 0x24, 0xb3, 0x8e, 0x85, 0xbf, 0xb0, 0xc7, 0x15, 0xcb, 0x9d, 0xdf, 0xc0, - 0x1f, 0xe0, 0x67, 0x70, 0x46, 0x3d, 0x72, 0xe2, 0x88, 0xd0, 0xfe, 0x0e, 0x0e, 0x68, 0xbe, 0xec, - 0x6c, 0xe5, 0x2c, 0x2c, 0xdc, 0x3c, 0x8f, 0x9f, 0x67, 0xe6, 0x79, 0x1f, 0xdb, 0xef, 0x6b, 0x70, - 0xfc, 0x78, 0x94, 0xa4, 0x71, 0x48, 0xd9, 0x9a, 0xe6, 0xd9, 0x68, 0x19, 0xf8, 0x34, 0x62, 0xa3, - 0x90, 0xb2, 0xd4, 0x5f, 0x66, 0xc3, 0x24, 0x8d, 0x59, 0x8c, 0x7a, 0x7e, 0x3c, 0x2c, 0x39, 0x43, - 0xc9, 0xd9, 0xef, 0x79, 0xb1, 0x17, 0x0b, 0xc2, 0x88, 0x5f, 0x49, 0xee, 0x7e, 0xdf, 0x8b, 0x63, - 0x2f, 0xa0, 0x23, 0xb1, 0x5a, 0xe4, 0xe7, 0x23, 0xe6, 0x87, 0x34, 0x63, 0x24, 0x4c, 0x24, 0xc1, - 0xf9, 0x18, 0xcc, 0xaf, 0xc8, 0x82, 0x06, 0xcf, 0x89, 0x9f, 0x22, 0x04, 0xf5, 0x88, 0x84, 0xd4, - 0x36, 0x06, 0xc6, 0x91, 0x89, 0xc5, 0x35, 0xea, 0x41, 0xe3, 0x25, 0x09, 0x72, 0x6a, 0xdf, 0x16, - 0xa0, 0x5c, 0x38, 0x07, 0xd0, 0x18, 0x93, 0xdc, 0xdb, 0xb8, 0xcd, 0x35, 0x86, 0xbe, 0xfd, 0xb3, - 0x01, 0xad, 0x07, 0x71, 0x1e, 0x31, 0x9a, 0x56, 0x33, 0xd0, 0x7d, 0x68, 0xd3, 0xef, 0x69, 0x98, - 0x04, 0x24, 0x15, 0x3b, 0x77, 0x3e, 0x3c, 0x1c, 0x56, 0xd5, 0x35, 0x3c, 0x53, 0x2c, 0x5c, 0xf0, - 0xd1, 0x18, 0xf6, 0x96, 0x29, 0x25, 0x8c, 0xae, 0xdc, 0xa2, 0x1c, 0xbb, 0x26, 0x36, 0xd9, 0x1f, - 0xca, 0x82, 0x87, 0xba, 0xe0, 0xe1, 0x4c, 0x33, 0xb0, 0xa5, 0x44, 0x05, 0xe2, 0x1c, 0x43, 0xfb, - 0xeb, 0x9c, 0x44, 0xcc, 0x0f, 0x28, 0xda, 0x87, 0xf6, 0x77, 0xea, 0x5a, 0x39, 0x2d, 0xd6, 0x57, - 0x33, 0x28, 0x8a, 0xfc, 0xdd, 0x80, 0xd6, 0x34, 0x0f, 0x43, 0x92, 0x5e, 0xa0, 0xb7, 0x61, 0x27, - 0x23, 0x61, 0x12, 0x50, 0x77, 0xc9, 0xcb, 0x16, 0x3b, 0xd4, 0x71, 0x47, 0x62, 0x22, 0x09, 0x74, - 0x00, 0xa0, 0x28, 0x59, 0x1e, 0xaa, 0x9d, 0x4c, 0x89, 0x4c, 0xf3, 0x10, 0x7d, 0xb1, 0x71, 0x7e, - 0x6d, 0x50, 0xdb, 0x1e, 0x88, 0x76, 0x7c, 0x5a, 0x7f, 0xf5, 0x47, 0xff, 0xd6, 0x86, 0xcb, 0xca, - 0x58, 0xea, 0xff, 0x21, 0x96, 0x3e, 0xb4, 0xe6, 0x11, 0xbb, 0x48, 0xe8, 0x6a, 0xcb, 0xe3, 0xfd, - 0xab, 0x01, 0xe6, 0x63, 0x3f, 0x63, 0xb1, 0x97, 0x92, 0xf0, 0xdf, 0xd4, 0xfe, 0x3e, 0xa0, 0x4d, - 0x8a, 0x7b, 0x1e, 0xc4, 0x84, 0x09, 0x6f, 0x06, 0xb6, 0x36, 0x88, 0x8f, 0x38, 0xfe, 0x4f, 0x49, - 0xdd, 0x87, 0xe6, 0x22, 0x5f, 0x7e, 0x4b, 0x99, 0xca, 0xe9, 0xad, 0xea, 0x9c, 0x4e, 0x05, 0x47, - 0xa5, 0xa4, 0x14, 0xd5, 0x19, 0xdd, 0xb9, 0x79, 0x46, 0xe8, 0x2e, 0x34, 0xb3, 0xe5, 0x9a, 0x86, - 0xc4, 0x6e, 0x0c, 0x8c, 0xa3, 0x3d, 0xac, 0x56, 0xe8, 0x1d, 0xd8, 0xfd, 0x81, 0xa6, 0xb1, 0xcb, - 0xd6, 0x29, 0xcd, 0xd6, 0x71, 0xb0, 0xb2, 0x9b, 0xc2, 0x7f, 0x97, 0xa3, 0x33, 0x0d, 0xf2, 0x12, - 0x05, 0x4d, 0x26, 0xd6, 0x12, 0x89, 0x99, 0x1c, 0x91, 0x79, 0x1d, 0x81, 0x55, 0xde, 0x56, 0x69, - 0xb5, 0xc5, 0x3e, 0xbb, 0x05, 0x49, 0x66, 0xf5, 0x04, 0xba, 0x11, 0xf5, 0x08, 0xf3, 0x5f, 0x52, - 0x37, 0x4b, 0x48, 0x64, 0x9b, 0x22, 0x93, 0xc1, 0x75, 0x99, 0x4c, 0x13, 0x12, 0xa9, 0x5c, 0x76, - 0xb4, 0x98, 0x63, 0xdc, 0x7c, 0xb1, 0xd9, 0x8a, 0x06, 0x8c, 0xd8, 0x30, 0xa8, 0x1d, 0x21, 0x5c, - 0x1c, 0xf1, 0x90, 0x83, 0x57, 0x68, 0xb2, 0x80, 0xce, 0xa0, 0xc6, 0x6b, 0xd4, 0xa8, 0x2c, 0xe2, - 0x09, 0x74, 0x93, 0x38, 0xf3, 0x4b, 0x6b, 0x3b, 0x37, 0xb3, 0xa6, 0xc5, 0xda, 0x5a, 0xb1, 0x99, - 0xb4, 0xd6, 0x95, 0xd6, 0x34, 0x5a, 0x58, 0x2b, 0x68, 0xd2, 0xda, 0xae, 0xb4, 0xa6, 0x51, 0x69, - 0xed, 0x18, 0x4c, 0xdd, 0x4d, 0x32, 0xdb, 0xba, 0xee, 0x6b, 0x2b, 0xda, 0x4f, 0x29, 0x70, 0x7e, - 0x35, 0xa0, 0x29, 0xed, 0xa2, 0x77, 0xc1, 0x5a, 0xe6, 0x61, 0x1e, 0x6c, 0x86, 0x21, 0xdf, 0xff, - 0x3b, 0x25, 0x2e, 0xcf, 0xbc, 0x07, 0x77, 0x5f, 0xa7, 0x5e, 0xf9, 0x0e, 0x7a, 0xaf, 0x09, 0xe4, - 0xf3, 0xed, 0x43, 0x27, 0x4f, 0x12, 0x9a, 0xba, 0x8b, 0x38, 0x8f, 0x56, 0xea, 0x63, 0x00, 0x01, - 0x9d, 0x72, 0xe4, 0x4a, 0x23, 0xad, 0xdd, 0xac, 0x91, 0x3a, 0xc7, 0x00, 0x65, 0xec, 0xfc, 0x95, - 0x8e, 0xcf, 0xcf, 0x33, 0x2a, 0x2b, 0xd8, 0xc3, 0x6a, 0xc5, 0xf1, 0x80, 0x46, 0x1e, 0x5b, 0x8b, - 0xd3, 0xbb, 0x58, 0xad, 0x9c, 0x9f, 0x0c, 0x68, 0xeb, 0x4d, 0xd1, 0x67, 0xd0, 0x08, 0xf8, 0x1c, - 0xb1, 0x0d, 0x91, 0x66, 0xbf, 0xda, 0x43, 0x31, 0x6a, 0xd4, 0x33, 0x96, 0x9a, 0xea, 0xfe, 0x8a, - 0x3e, 0x05, 0xf3, 0x26, 0xed, 0xbd, 0x24, 0x3b, 0x3f, 0xd6, 0xa0, 0x39, 0x11, 0x33, 0xf3, 0xff, - 0xf9, 0xfa, 0x00, 0x1a, 0x1e, 0x9f, 0x72, 0x6a, 0x42, 0xbd, 0x59, 0x2d, 0x16, 0x83, 0x10, 0x4b, - 0x26, 0xfa, 0x04, 0x5a, 0x4b, 0x39, 0xf8, 0x94, 0xe5, 0x83, 0x6a, 0x91, 0x9a, 0x8e, 0x58, 0xb3, - 0xb9, 0x30, 0x93, 0xc3, 0x44, 0xf5, 0xec, 0x2d, 0x42, 0x35, 0x71, 0xb0, 0x66, 0x73, 0x61, 0x2e, - 0xbb, 0xb5, 0x68, 0x45, 0x5b, 0x85, 0xaa, 0xa5, 0x63, 0xcd, 0x46, 0x9f, 0x83, 0xb9, 0xd6, 0x4d, - 0x5c, 0xb4, 0xa0, 0xad, 0xf1, 0x14, 0xbd, 0x1e, 0x97, 0x0a, 0xde, 0xf6, 0x8b, 0xc4, 0xdd, 0x30, - 0x13, 0x7d, 0xae, 0x86, 0x3b, 0x05, 0x36, 0xc9, 0x9c, 0x5f, 0x0c, 0xd8, 0x91, 0xcf, 0xe1, 0x11, - 0x09, 0xfd, 0xe0, 0xa2, 0xf2, 0x07, 0x03, 0x41, 0x7d, 0x4d, 0x83, 0x44, 0xfd, 0x5f, 0x88, 0x6b, - 0x74, 0x0f, 0xea, 0xdc, 0xa3, 0x88, 0x70, 0x77, 0x5b, 0xc7, 0x90, 0x3b, 0xcf, 0x2e, 0x12, 0x8a, - 0x05, 0x9b, 0x0f, 0x06, 0xf9, 0xa7, 0x64, 0xd7, 0xaf, 0x1b, 0x0c, 0x52, 0xa7, 0x07, 0x83, 0x54, - 0x70, 0x17, 0x79, 0xe4, 0x33, 0x11, 0xa1, 0x89, 0xc5, 0xf5, 0x7b, 0x0b, 0x80, 0xf2, 0x0c, 0xd4, - 0x81, 0xd6, 0x83, 0x67, 0xf3, 0xa7, 0xb3, 0x33, 0x6c, 0xdd, 0x42, 0x26, 0x34, 0xc6, 0x27, 0xf3, - 0xf1, 0x99, 0x65, 0x70, 0x7c, 0x3a, 0x9f, 0x4c, 0x4e, 0xf0, 0x0b, 0xeb, 0x36, 0x5f, 0xcc, 0x9f, - 0xce, 0x5e, 0x3c, 0x3f, 0x7b, 0x68, 0xd5, 0x50, 0x17, 0xcc, 0xc7, 0x5f, 0x4e, 0x67, 0xcf, 0xc6, - 0xf8, 0x64, 0x62, 0xd5, 0xd1, 0x1b, 0x70, 0x47, 0x68, 0xdc, 0x12, 0x6c, 0x9c, 0x3a, 0xaf, 0x2e, - 0x0f, 0x8d, 0xdf, 0x2e, 0x0f, 0x8d, 0x3f, 0x2f, 0x0f, 0x8d, 0x6f, 0x7a, 0x7e, 0xec, 0x96, 0x86, - 0x5d, 0x69, 0x78, 0xd1, 0x14, 0x6f, 0xfb, 0x47, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x1c, 0xe1, - 0xcf, 0xb8, 0x1d, 0x0a, 0x00, 0x00, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdf, 0x6e, 0xe3, 0xc4, + 0x1b, 0x5d, 0x6f, 0xfe, 0xfa, 0x4b, 0x93, 0x66, 0xe7, 0x17, 0xad, 0xac, 0xfe, 0x68, 0x13, 0x2c, + 0x21, 0x15, 0x84, 0x12, 0x01, 0x8b, 0x40, 0x4b, 0x91, 0x68, 0xbb, 0xdd, 0x2e, 0x5a, 0xb2, 0xbb, + 0x4c, 0x92, 0x8b, 0xe5, 0xc6, 0x9a, 0xa4, 0xd3, 0xc4, 0xc2, 0xf6, 0x18, 0x7b, 0xbc, 0xa2, 0xdc, + 0xf3, 0x0c, 0xdc, 0xf2, 0x20, 0x5c, 0xa3, 0xbd, 0x44, 0x3c, 0x00, 0x42, 0x7d, 0x0a, 0x2e, 0xd1, + 0xfc, 0xb3, 0xd3, 0x95, 0x53, 0x88, 0xb8, 0xf3, 0x1c, 0x9f, 0x33, 0x73, 0xbe, 0x63, 0xfb, 0xfb, + 0x0c, 0xae, 0xcf, 0x46, 0x71, 0xc2, 0x42, 0xca, 0x57, 0x34, 0x4b, 0x47, 0x8b, 0xc0, 0xa7, 0x11, + 0x1f, 0x85, 0x94, 0x27, 0xfe, 0x22, 0x1d, 0xc6, 0x09, 0xe3, 0x0c, 0xf5, 0x7c, 0x36, 0x2c, 0x38, + 0x43, 0xc5, 0xd9, 0xeb, 0x2d, 0xd9, 0x92, 0x49, 0xc2, 0x48, 0x5c, 0x29, 0xee, 0x5e, 0x7f, 0xc9, + 0xd8, 0x32, 0xa0, 0x23, 0xb9, 0x9a, 0x67, 0x97, 0x23, 0xee, 0x87, 0x34, 0xe5, 0x24, 0x8c, 0x15, + 0xc1, 0xfd, 0x18, 0xec, 0xaf, 0xc8, 0x9c, 0x06, 0x2f, 0x88, 0x9f, 0x20, 0x04, 0xd5, 0x88, 0x84, + 0xd4, 0xb1, 0x06, 0xd6, 0xa1, 0x8d, 0xe5, 0x35, 0xea, 0x41, 0xed, 0x15, 0x09, 0x32, 0xea, 0xdc, + 0x95, 0xa0, 0x5a, 0xb8, 0xfb, 0x50, 0x3b, 0x27, 0xd9, 0x72, 0xed, 0xb6, 0xd0, 0x58, 0xe6, 0xf6, + 0xcf, 0x16, 0x34, 0x4e, 0x59, 0x16, 0x71, 0x9a, 0x94, 0x33, 0xd0, 0x43, 0x68, 0xd2, 0xef, 0x69, + 0x18, 0x07, 0x24, 0x91, 0x3b, 0xb7, 0x3e, 0x3c, 0x18, 0x96, 0xd5, 0x35, 0x3c, 0xd3, 0x2c, 0x9c, + 0xf3, 0xd1, 0x29, 0xec, 0xa6, 0x9c, 0x24, 0xdc, 0xcb, 0x8b, 0x71, 0x2a, 0x72, 0x8b, 0xbd, 0xa1, + 0x2a, 0x77, 0x68, 0xca, 0x1d, 0x4e, 0x0d, 0x03, 0x77, 0xa4, 0x24, 0x5f, 0xbb, 0x47, 0xd0, 0xfc, + 0x3a, 0x23, 0x11, 0xf7, 0x03, 0x8a, 0xf6, 0xa0, 0xf9, 0x9d, 0xbe, 0xd6, 0x2e, 0xf3, 0xf5, 0xcd, + 0xfa, 0xf3, 0x02, 0x7f, 0xb7, 0xa0, 0x31, 0xc9, 0xc2, 0x90, 0x24, 0x57, 0xe8, 0x6d, 0xd8, 0x49, + 0x49, 0x18, 0x07, 0xd4, 0x5b, 0x88, 0x92, 0xe5, 0x0e, 0x55, 0xdc, 0x52, 0x98, 0x4c, 0x01, 0xed, + 0x03, 0x68, 0x4a, 0x9a, 0x85, 0x7a, 0x27, 0x5b, 0x21, 0x93, 0x2c, 0x44, 0x5f, 0xac, 0x9d, 0x5f, + 0x19, 0x54, 0x36, 0x87, 0x61, 0x1c, 0x9f, 0x54, 0x5f, 0xff, 0xd1, 0xbf, 0xb3, 0xe6, 0xb2, 0x24, + 0x92, 0xea, 0xd6, 0x91, 0xf4, 0xa1, 0x31, 0x8b, 0xf8, 0x55, 0x4c, 0x2f, 0x36, 0x3c, 0xd6, 0xbf, + 0x6a, 0x60, 0x3f, 0xf1, 0x53, 0xce, 0x96, 0x09, 0x09, 0xff, 0x4d, 0xdd, 0xef, 0x03, 0x5a, 0xa7, + 0x78, 0x97, 0x01, 0x23, 0x5c, 0x3a, 0xb3, 0x70, 0x77, 0x8d, 0xf8, 0x58, 0xe0, 0xff, 0x94, 0xd2, + 0x43, 0xa8, 0xcf, 0xb3, 0xc5, 0xb7, 0x94, 0xeb, 0x8c, 0xde, 0x2a, 0xcf, 0xe8, 0x44, 0x72, 0x74, + 0x42, 0x5a, 0x51, 0x96, 0xcf, 0xee, 0xb6, 0xf9, 0xa0, 0xfb, 0x50, 0x4f, 0x17, 0x2b, 0x1a, 0x12, + 0xa7, 0x36, 0xb0, 0x0e, 0xef, 0x61, 0xbd, 0x42, 0xef, 0x40, 0xe7, 0x07, 0x9a, 0x30, 0x8f, 0xaf, + 0x12, 0x9a, 0xae, 0x58, 0x70, 0xe1, 0xd4, 0xa5, 0xf7, 0xb6, 0x40, 0xa7, 0x06, 0x14, 0xe5, 0x49, + 0x9a, 0x4a, 0xab, 0x21, 0xd3, 0xb2, 0x05, 0xa2, 0xb2, 0x3a, 0x84, 0x6e, 0x71, 0x5b, 0x27, 0xd5, + 0x94, 0xfb, 0x74, 0x72, 0x92, 0xca, 0xe9, 0x29, 0xb4, 0x23, 0xba, 0x24, 0xdc, 0x7f, 0x45, 0xbd, + 0x34, 0x26, 0x91, 0x63, 0xcb, 0x3c, 0x06, 0xb7, 0xe5, 0x31, 0x89, 0x49, 0xa4, 0x33, 0xd9, 0x31, + 0x62, 0x81, 0x09, 0xf3, 0xf9, 0x66, 0x17, 0x34, 0xe0, 0xc4, 0x81, 0x41, 0xe5, 0x10, 0xe1, 0xfc, + 0x88, 0x47, 0x02, 0xbc, 0x41, 0x53, 0x05, 0xb4, 0x06, 0x15, 0x51, 0xa3, 0x41, 0x55, 0x11, 0x4f, + 0xa1, 0x1d, 0xb3, 0xd4, 0x2f, 0xac, 0xed, 0x6c, 0x67, 0xcd, 0x88, 0x8d, 0xb5, 0x7c, 0x33, 0x65, + 0xad, 0xad, 0xac, 0x19, 0x34, 0xb7, 0x96, 0xd3, 0x94, 0xb5, 0x8e, 0xb2, 0x66, 0x50, 0x65, 0xed, + 0x08, 0x6c, 0xd3, 0x41, 0x52, 0xa7, 0x7b, 0xdb, 0x57, 0x96, 0xb7, 0x9c, 0x42, 0xe0, 0xfe, 0x6a, + 0x41, 0x5d, 0xd9, 0x45, 0xef, 0x42, 0x77, 0x91, 0x85, 0x59, 0xb0, 0x1e, 0x86, 0x7a, 0xf7, 0x77, + 0x0b, 0x5c, 0x9d, 0xf9, 0x00, 0xee, 0xbf, 0x49, 0xbd, 0xf1, 0x0d, 0xf4, 0xde, 0x10, 0xa8, 0xe7, + 0xdb, 0x87, 0x56, 0x16, 0xc7, 0x34, 0xf1, 0xe6, 0x2c, 0x8b, 0x2e, 0xf4, 0x87, 0x00, 0x12, 0x3a, + 0x11, 0xc8, 0x8d, 0xe6, 0x59, 0xd9, 0xae, 0x79, 0xba, 0x47, 0x00, 0x45, 0xec, 0xe2, 0x95, 0x66, + 0x97, 0x97, 0x29, 0x55, 0x15, 0xdc, 0xc3, 0x7a, 0x25, 0xf0, 0x80, 0x46, 0x4b, 0xbe, 0x92, 0xa7, + 0xb7, 0xb1, 0x5e, 0xb9, 0x3f, 0x59, 0xd0, 0x34, 0x9b, 0xa2, 0xcf, 0xa0, 0x16, 0x88, 0xd9, 0xe1, + 0x58, 0x32, 0xcd, 0x7e, 0xb9, 0x87, 0x7c, 0xbc, 0xe8, 0x67, 0xac, 0x34, 0xe5, 0x7d, 0x15, 0x7d, + 0x0a, 0xf6, 0x36, 0x4d, 0xbd, 0x20, 0xbb, 0x3f, 0x56, 0xa0, 0x3e, 0x96, 0x73, 0xf2, 0xbf, 0xf9, + 0xfa, 0x00, 0x6a, 0x4b, 0x31, 0xd9, 0xf4, 0x54, 0xfa, 0x7f, 0xb9, 0x58, 0x0e, 0x3f, 0xac, 0x98, + 0xe8, 0x13, 0x68, 0x2c, 0xd4, 0xb0, 0xd3, 0x96, 0xf7, 0xcb, 0x45, 0x7a, 0x22, 0x62, 0xc3, 0x16, + 0xc2, 0x54, 0x0d, 0x11, 0xdd, 0xad, 0x37, 0x08, 0xf5, 0xa4, 0xc1, 0x86, 0x2d, 0x84, 0x99, 0xea, + 0xd4, 0xb2, 0x15, 0x6d, 0x14, 0xea, 0x76, 0x8e, 0x0d, 0x1b, 0x7d, 0x0e, 0xf6, 0xca, 0x34, 0x70, + 0xd9, 0x82, 0x36, 0xc6, 0x93, 0xf7, 0x79, 0x5c, 0x28, 0x44, 0xcb, 0xcf, 0x13, 0xf7, 0xc2, 0x54, + 0xf6, 0xb9, 0x0a, 0x6e, 0xe5, 0xd8, 0x38, 0x75, 0x7f, 0xb1, 0x60, 0x47, 0x3d, 0x87, 0xc7, 0x24, + 0xf4, 0x83, 0xab, 0xd2, 0x9f, 0x0a, 0x04, 0xd5, 0x15, 0x0d, 0x62, 0xfd, 0x4f, 0x21, 0xaf, 0xd1, + 0x03, 0xa8, 0x0a, 0x8f, 0x32, 0xc2, 0xce, 0xa6, 0x8e, 0xa1, 0x76, 0x9e, 0x5e, 0xc5, 0x14, 0x4b, + 0xb6, 0x18, 0x0a, 0xea, 0xef, 0xc8, 0xa9, 0xde, 0x36, 0x14, 0x94, 0xce, 0x0c, 0x05, 0xa5, 0x10, + 0x2e, 0xb2, 0xc8, 0xe7, 0x32, 0x42, 0x1b, 0xcb, 0xeb, 0xf7, 0xe6, 0x00, 0xc5, 0x19, 0xa8, 0x05, + 0x8d, 0xd3, 0xe7, 0xb3, 0x67, 0xd3, 0x33, 0xdc, 0xbd, 0x83, 0x6c, 0xa8, 0x9d, 0x1f, 0xcf, 0xce, + 0xcf, 0xba, 0x96, 0xc0, 0x27, 0xb3, 0xf1, 0xf8, 0x18, 0xbf, 0xec, 0xde, 0x15, 0x8b, 0xd9, 0xb3, + 0xe9, 0xcb, 0x17, 0x67, 0x8f, 0xba, 0x15, 0xd4, 0x06, 0xfb, 0xc9, 0x97, 0x93, 0xe9, 0xf3, 0x73, + 0x7c, 0x3c, 0xee, 0x56, 0xd1, 0xff, 0x60, 0x57, 0x6a, 0xbc, 0x02, 0xac, 0x9d, 0xb8, 0xaf, 0xaf, + 0x0f, 0xac, 0xdf, 0xae, 0x0f, 0xac, 0x3f, 0xaf, 0x0f, 0xac, 0x6f, 0x7a, 0x3e, 0xf3, 0x0a, 0xc3, + 0x9e, 0x32, 0x3c, 0xaf, 0xcb, 0xb7, 0xfd, 0xa3, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xb8, + 0x2d, 0xa0, 0x11, 0x0a, 0x00, 0x00, } func (m *LabelPair) Marshal() (dAtA []byte, err error) { @@ -1145,9 +1145,9 @@ func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.CreatedTimestamp != nil { + if m.StartTimestamp != nil { { - size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.StartTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1241,9 +1241,9 @@ func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.CreatedTimestamp != nil { + if m.StartTimestamp != nil { { - size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.StartTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1354,9 +1354,9 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x82 } } - if m.CreatedTimestamp != nil { + if m.StartTimestamp != nil { { - size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.StartTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1892,8 +1892,8 @@ func (m *Counter) Size() (n int) { l = m.Exemplar.Size() n += 1 + l + sovMetrics(uint64(l)) } - if m.CreatedTimestamp != nil { - l = m.CreatedTimestamp.Size() + if m.StartTimestamp != nil { + l = m.StartTimestamp.Size() n += 1 + l + sovMetrics(uint64(l)) } if m.XXX_unrecognized != nil { @@ -1938,8 +1938,8 @@ func (m *Summary) Size() (n int) { n += 1 + l + sovMetrics(uint64(l)) } } - if m.CreatedTimestamp != nil { - l = m.CreatedTimestamp.Size() + if m.StartTimestamp != nil { + l = m.StartTimestamp.Size() n += 1 + l + sovMetrics(uint64(l)) } if m.XXX_unrecognized != nil { @@ -2028,8 +2028,8 @@ func (m *Histogram) Size() (n int) { if len(m.PositiveCount) > 0 { n += 1 + sovMetrics(uint64(len(m.PositiveCount)*8)) + len(m.PositiveCount)*8 } - if m.CreatedTimestamp != nil { - l = m.CreatedTimestamp.Size() + if m.StartTimestamp != nil { + l = m.StartTimestamp.Size() n += 1 + l + sovMetrics(uint64(l)) } if len(m.Exemplars) > 0 { @@ -2447,7 +2447,7 @@ func (m *Counter) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2474,10 +2474,10 @@ func (m *Counter) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CreatedTimestamp == nil { - m.CreatedTimestamp = &types.Timestamp{} + if m.StartTimestamp == nil { + m.StartTimestamp = &types.Timestamp{} } - if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2671,7 +2671,7 @@ func (m *Summary) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2698,10 +2698,10 @@ func (m *Summary) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CreatedTimestamp == nil { - m.CreatedTimestamp = &types.Timestamp{} + if m.StartTimestamp == nil { + m.StartTimestamp = &types.Timestamp{} } - if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3289,7 +3289,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { } case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3316,10 +3316,10 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CreatedTimestamp == nil { - m.CreatedTimestamp = &types.Timestamp{} + if m.StartTimestamp == nil { + m.StartTimestamp = &types.Timestamp{} } - if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto index 681554a7caf..843964ca38b 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto @@ -52,7 +52,7 @@ message Counter { double value = 1; Exemplar exemplar = 2; - google.protobuf.Timestamp created_timestamp = 3; + google.protobuf.Timestamp start_timestamp = 3; } message Quantile { @@ -65,7 +65,7 @@ message Summary { double sample_sum = 2; repeated Quantile quantile = 3 [(gogoproto.nullable) = false]; - google.protobuf.Timestamp created_timestamp = 4; + google.protobuf.Timestamp start_timestamp = 4; } message Untyped { @@ -79,7 +79,7 @@ message Histogram { // Buckets for the classic histogram. repeated Bucket bucket = 3 [(gogoproto.nullable) = false]; // Ordered in increasing order of upper_bound, +Inf bucket is optional. - google.protobuf.Timestamp created_timestamp = 15; + google.protobuf.Timestamp start_timestamp = 15; // Everything below here is for native histograms (formerly known as sparse histograms). diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index fd0ac937419..feddc343931 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -2087,6 +2087,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, // offset; thus, they should keep the metric name. For all the // other range vector functions, the only change needed is to // drop the metric name in the output. + // However, if the input series (e.g., from a subquery) already has + // DropName set, we should respect that. dropName := (e.Func.Name != "last_over_time" && e.Func.Name != "first_over_time") vectorVals := make([]Vector, len(e.Args)-1) for i, s := range selVS.Series { @@ -2102,13 +2104,23 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, } chkIter = s.Iterator(chkIter) it.Reset(chkIter) + + // Check if input series already wants to drop the name (e.g., from subquery). + inputDropName := false + if storageSeries, ok := s.(*StorageSeries); ok { + inputDropName = storageSeries.series.DropName + } + + // Use OR logic: drop name if either the function wants it OR the input wants it. + seriesDropName := dropName || inputDropName + metric := selVS.Series[i].Labels() - if !ev.enableDelayedNameRemoval && dropName { + if !ev.enableDelayedNameRemoval && seriesDropName { metric = metric.DropReserved(schema.IsMetadataLabel) } ss := Series{ Metric: metric, - DropName: dropName, + DropName: seriesDropName, } inMatrix[0].Metric = selVS.Series[i].Labels() for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { diff --git a/vendor/github.com/prometheus/prometheus/promql/info.go b/vendor/github.com/prometheus/prometheus/promql/info.go index 97a79cd0f15..539e5ad6856 100644 --- a/vendor/github.com/prometheus/prometheus/promql/info.go +++ b/vendor/github.com/prometheus/prometheus/promql/info.go @@ -56,10 +56,18 @@ func (ev *evaluator) evalInfo(ctx context.Context, args parser.Expressions) (par } // Don't try to enrich info series. + effectiveNameMatchers := effectiveInfoNameMatchers(infoNameMatchers) ignoreSeries := map[uint64]struct{}{} for _, s := range mat { name := s.Metric.Get(model.MetricNameLabel) - if len(infoNameMatchers) > 0 && matchersMatch(infoNameMatchers, name) { + matchesAllMatchers := true + for _, m := range effectiveNameMatchers { + if !m.Matches(name) { + matchesAllMatchers = false + break + } + } + if matchesAllMatchers { ignoreSeries[s.Metric.Hash()] = struct{}{} } } @@ -76,13 +84,24 @@ func (ev *evaluator) evalInfo(ctx context.Context, args parser.Expressions) (par return res, annots } -func matchersMatch(matchers []*labels.Matcher, value string) bool { +// effectiveInfoNameMatchers returns the set of __name__ matchers that will +// actually be used to select info series. +// When positive matchers exist, all matchers (positive + negative) are returned. +// When only negative matchers exist, a synthetic .+_info matcher is prepended. +// When no matchers exist, a target_info equality matcher is returned. +func effectiveInfoNameMatchers(matchers []*labels.Matcher) []*labels.Matcher { for _, m := range matchers { - if !m.Matches(value) { - return false + if m.Type == labels.MatchEqual || m.Type == labels.MatchRegexp { + // There's at least one positive matcher - return as-is. + return matchers } } - return true + if len(matchers) > 0 { + // Only negative matchers: prepend a synthetic .+_info matcher. + return append([]*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+_info")}, matchers...) + } + + return []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, targetInfo)} } // infoSelectHints calculates the storage.SelectHints for selecting info series, given expr (first argument to info call). @@ -190,20 +209,18 @@ func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeri for name, re := range idLblRegexps { infoLabelMatchers = append(infoLabelMatchers, labels.MustNewMatcher(labels.MatchRegexp, name, re)) } - hasNameMatcher := false + var nameMatchers []*labels.Matcher for _, ms := range dataLabelMatchers { for _, m := range ms { if m.Name == model.MetricNameLabel { - hasNameMatcher = true + nameMatchers = append(nameMatchers, m) + continue } infoLabelMatchers = append(infoLabelMatchers, m) } } removeNameFromDataLabelMatchers() - if !hasNameMatcher { - // Default to using the target_info metric. - infoLabelMatchers = append([]*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, model.MetricNameLabel, targetInfo)}, infoLabelMatchers...) - } + infoLabelMatchers = append(infoLabelMatchers, effectiveInfoNameMatchers(nameMatchers)...) infoIt := ev.querier.Select(ctx, false, &selectHints, infoLabelMatchers...) infoSeries, ws, err := expandSeriesSet(ctx, infoIt) diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go index 8aa9e9dcbeb..13b101e7834 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go @@ -218,6 +218,27 @@ func init() { key["nan"] = NUMBER } +// Keywords returns all keyword strings recognised by the PromQL lexer, +// including aggregation operators, modifier keywords, histogram descriptor +// keys, and counter-reset hint values. +func Keywords() []string { + seen := make(map[string]struct{}) + for s := range key { + seen[s] = struct{}{} + } + for s := range histogramDesc { + seen[s] = struct{}{} + } + for s := range counterResetHints { + seen[s] = struct{}{} + } + result := make([]string, 0, len(seen)) + for s := range seen { + result = append(result, s) + } + return result +} + func (i ItemType) String() string { if s, ok := ItemTypeStr[i]; ok { return s diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go b/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go index a0ab9e12198..3f769b724b4 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go @@ -111,11 +111,16 @@ func (e *EvalStmt) Pretty(int) string { func (e Expressions) Pretty(level int) string { // Do not prefix the indent since respective nodes will indent itself. - s := "" + if len(e) == 0 { + return "" + } + + parts := make([]string, len(e)) for i := range e { - s += fmt.Sprintf("%s,\n", e[i].Pretty(level)) + parts[i] = e[i].Pretty(level) } - return s[:len(s)-2] + + return strings.Join(parts, ",\n") } func (e *ParenExpr) Pretty(level int) string { diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/README.md b/vendor/github.com/prometheus/prometheus/promql/promqltest/README.md deleted file mode 100644 index b4efd9c128b..00000000000 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/README.md +++ /dev/null @@ -1,238 +0,0 @@ -# The PromQL test scripting language - -This package contains two things: - -* an implementation of a test scripting language for PromQL engines -* a predefined set of tests written in that scripting language - -The predefined set of tests can be run against any PromQL engine implementation by calling `promqltest.RunBuiltinTests()`. -Any other test script can be run with `promqltest.RunTest()`. - -The rest of this document explains the test scripting language. - -Each test script is written in plain text. - -Comments can be given by prefixing the comment with a `#`, for example: - -``` -# This is a comment. -``` - -Each test file contains a series of commands. There are three kinds of commands: - -* `load` -* `clear` -* `eval` - -> **Note:** The `eval` command variants (`eval_fail`, `eval_warn`, `eval_info`, and `eval_ordered`) are deprecated. Use the new `expect` lines instead (explained in the [`eval` command](#eval-command) section). Additionally, `expected_fail_message` and `expected_fail_regexp` are also deprecated. - -Each command is executed in the order given in the file. - -## `load` command - -`load` adds some data to the test environment. - -The syntax is as follows: - -``` -load - - ... - -``` - -* `` is the step between points (eg. `1m` or `30s`) -* `` is a Prometheus series name in the usual `metric{label="value"}` syntax -* `` is a specification of the points to add for that series, following the same expanding syntax as for `promtool unittest` documented [here](../../docs/configuration/unit_testing_rules.md#series) - -For example: - -``` -load 1m - my_metric{env="prod"} 5 2+3x2 _ stale {{schema:1 sum:3 count:22 buckets:[5 10 7]}} -``` - -… will create a single series with labels `my_metric{env="prod"}`, with the following points: - -* t=0: value is 5 -* t=1m: value is 2 -* t=2m: value is 5 -* t=3m: value is 8 -* t=4m: no point -* t=5m: stale marker -* t=6m: native histogram with schema 1, sum -3, count 22 and bucket counts 5, 10 and 7 - -Each `load` command is additive - it does not replace any data loaded in a previous `load` command. -Use `clear` to remove all loaded data. - -### Native histograms with custom buckets (NHCB) - -When loading a batch of classic histogram float series, you can optionally append the suffix `_with_nhcb` to convert them to native histograms with custom buckets and load both the original float series and the new histogram series. - -## `clear` command - -`clear` removes all data previously loaded with `load` commands. - -## `eval` command - -`eval` runs a query against the test environment and asserts that the result is as expected. -It requires the query to succeed without any failures unless an `expect fail` line is provided. Previously `eval` expected no `info` or `warn` annotation, but now `expect no_info` and `expect no_warn` lines must be explicitly provided. - -Both instant and range queries are supported. - -The syntax is as follows: - -``` -# Instant query -eval instant at