diff --git a/Makefile b/Makefile index 83de699d5..a173f9b44 100644 --- a/Makefile +++ b/Makefile @@ -202,7 +202,7 @@ generate: $(CONTROLLER_GEN) #EXHELP Generate code containing DeepCopy, DeepCopyI $(CONTROLLER_GEN) --load-build-tags=$(GO_BUILD_TAGS) object:headerFile="hack/boilerplate.go.txt" paths="./..." .PHONY: verify -verify: k8s-pin kind-verify-versions fmt generate manifests update-tls-profiles crd-ref-docs verify-bingo #HELP Verify all generated code is up-to-date. Runs k8s-pin instead of just tidy. +verify: k8s-pin kind-verify-versions fmt generate manifests update-tls-profiles crd-ref-docs update-registryv1-bundle-schema verify-bingo #HELP Verify all generated code is up-to-date. Runs k8s-pin instead of just tidy. git diff --exit-code .PHONY: verify-bingo @@ -222,6 +222,10 @@ fmt: $(YAMLFMT) #EXHELP Formats code update-tls-profiles: $(GOJQ) #EXHELP Update TLS profiles from the Mozilla wiki env JQ=$(GOJQ) hack/tools/update-tls-profiles.sh +.PHONY: update-registryv1-bundle-schema +update-registryv1-bundle-schema: #EXHELP Update registry+v1 bundle configuration JSON schema + hack/tools/update-registryv1-bundle-schema.sh + .PHONY: verify-crd-compatibility CRD_DIFF_ORIGINAL_REF := git://main?path= CRD_DIFF_UPDATED_REF := file:// diff --git a/api/v1/clusterextension_types.go b/api/v1/clusterextension_types.go index d150d4997..f9a25bc77 100644 --- a/api/v1/clusterextension_types.go +++ b/api/v1/clusterextension_types.go @@ -500,12 +500,12 @@ type ClusterExtensionStatus struct { // When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. // // - // When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + // When the ClusterExtension is sourced from a catalog, it surfaces deprecation conditions based on catalog metadata. // These are indications from a package owner to guide users away from a particular package, channel, or bundle: - // - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - // - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - // - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - // - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + // - BundleDeprecated is True if the installed bundle is marked deprecated, False if not deprecated, or Unknown if no bundle is installed yet or if catalog data is unavailable. + // - ChannelDeprecated is True if any requested channel is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + // - PackageDeprecated is True if the requested package is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + // - Deprecated is a rollup condition that is True when any deprecation exists, False when none exist, or Unknown when catalog data is unavailable. // // +listType=map // +listMapKey=type diff --git a/api/v1/common_types.go b/api/v1/common_types.go index 57e030f0b..863336e0c 100644 --- a/api/v1/common_types.go +++ b/api/v1/common_types.go @@ -29,7 +29,9 @@ const ( ReasonBlocked = "Blocked" // Deprecation reasons - ReasonDeprecated = "Deprecated" + ReasonDeprecated = "Deprecated" + ReasonNotDeprecated = "NotDeprecated" + ReasonDeprecationStatusUnknown = "DeprecationStatusUnknown" // Common reasons ReasonSucceeded = "Succeeded" diff --git a/commitchecker.yaml b/commitchecker.yaml index cca809eb0..95201f018 100644 --- a/commitchecker.yaml +++ b/commitchecker.yaml @@ -1,4 +1,4 @@ -expectedMergeBase: 6e4f192699f5c039fa2b92b01372a150274447bd +expectedMergeBase: fbe909f7ba35a9f771da6ec0431bbde2ac45d5fb upstreamBranch: main upstreamOrg: operator-framework upstreamRepo: operator-controller diff --git a/docs/api-reference/olmv1-api-reference.md b/docs/api-reference/olmv1-api-reference.md index c9cff6e48..74a1f635a 100644 --- a/docs/api-reference/olmv1-api-reference.md +++ b/docs/api-reference/olmv1-api-reference.md @@ -360,7 +360,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions represents the current state of the ClusterExtension.
The set of condition types which apply to all spec.source variations are Installed and Progressing.
The Installed condition represents whether the bundle has been installed for this ClusterExtension:
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.

When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.

When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition.
These are indications from a package owner to guide users away from a particular package, channel, or bundle:
- BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
- ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
- PackageDeprecated is set if the requested package is marked deprecated in the catalog.
- Deprecated is a rollup condition that is present when any of the deprecated conditions are present. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions represents the current state of the ClusterExtension.
The set of condition types which apply to all spec.source variations are Installed and Progressing.
The Installed condition represents whether the bundle has been installed for this ClusterExtension:
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.

When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.

When the ClusterExtension is sourced from a catalog, it surfaces deprecation conditions based on catalog metadata.
These are indications from a package owner to guide users away from a particular package, channel, or bundle:
- BundleDeprecated is True if the installed bundle is marked deprecated, False if not deprecated, or Unknown if no bundle is installed yet or if catalog data is unavailable.
- ChannelDeprecated is True if any requested channel is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable.
- PackageDeprecated is True if the requested package is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable.
- Deprecated is a rollup condition that is True when any deprecation exists, False when none exist, or Unknown when catalog data is unavailable. | | | | `install` _[ClusterExtensionInstallStatus](#clusterextensioninstallstatus)_ | install is a representation of the current installation status for this ClusterExtension. | | | | `activeRevisions` _[RevisionStatus](#revisionstatus) array_ | activeRevisions holds a list of currently active (non-archived) ClusterExtensionRevisions,
including both installed and rolling out revisions.
| | | diff --git a/go.mod b/go.mod index 9bdd56829..42f35e741 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/golang-jwt/jwt/v5 v5.3.0 github.com/google/go-cmp v0.7.0 github.com/google/go-containerregistry v0.20.7 - github.com/google/renameio/v2 v2.0.1 + github.com/google/renameio/v2 v2.0.2 github.com/gorilla/handlers v1.5.2 github.com/klauspost/compress v1.18.2 github.com/opencontainers/go-digest v1.0.0 @@ -31,15 +31,15 @@ require ( github.com/stretchr/testify v1.11.1 go.podman.io/image/v5 v5.38.0 golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 - golang.org/x/mod v0.31.0 + golang.org/x/mod v0.32.0 golang.org/x/sync v0.19.0 - golang.org/x/tools v0.40.0 - helm.sh/helm/v3 v3.19.4 + golang.org/x/tools v0.41.0 + helm.sh/helm/v3 v3.20.0 k8s.io/api v0.35.0 k8s.io/apiextensions-apiserver v0.35.0 k8s.io/apimachinery v0.35.0 k8s.io/apiserver v0.35.0 - k8s.io/cli-runtime v0.34.2 + k8s.io/cli-runtime v0.35.0 k8s.io/client-go v0.35.0 k8s.io/component-base v0.35.0 k8s.io/klog/v2 v2.130.1 @@ -198,13 +198,13 @@ require ( github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/procfs v0.19.2 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/rubenv/sql-migrate v1.8.0 // indirect + github.com/rubenv/sql-migrate v1.8.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sigstore/fulcio v1.8.5 // indirect github.com/sigstore/protobuf-specs v0.5.0 // indirect - github.com/sigstore/sigstore v1.10.3 // indirect + github.com/sigstore/sigstore v1.10.4 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/smallstep/pkcs7 v0.2.1 // indirect github.com/spf13/cast v1.7.1 // indirect @@ -231,12 +231,12 @@ require ( go.podman.io/storage v1.61.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.46.0 // indirect - golang.org/x/net v0.48.0 // indirect + golang.org/x/crypto v0.47.0 // indirect + golang.org/x/net v0.49.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect - golang.org/x/sys v0.39.0 // indirect - golang.org/x/term v0.38.0 // indirect - golang.org/x/text v0.32.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/term v0.39.0 // indirect + golang.org/x/text v0.33.0 // indirect golang.org/x/time v0.14.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 // indirect @@ -250,7 +250,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/controller-manager v0.33.2 // indirect - k8s.io/kubectl v0.34.2 // indirect + k8s.io/kubectl v0.35.0 // indirect oras.land/oras-go/v2 v2.6.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect sigs.k8s.io/gateway-api v1.4.0 // indirect diff --git a/go.sum b/go.sum index ab53d1b29..01eee4701 100644 --- a/go.sum +++ b/go.sum @@ -144,8 +144,8 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= -github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/foxcpp/go-mockdns v1.2.0 h1:omK3OrHRD1IWJz1FuFBCFquhXslXoF17OvBS6JPzZF0= +github.com/foxcpp/go-mockdns v1.2.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= @@ -264,8 +264,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0= github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= -github.com/google/renameio/v2 v2.0.1 h1:HyOM6qd9gF9sf15AvhbptGHUnaLTpEI9akAFFU3VyW0= -github.com/google/renameio/v2 v2.0.1/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= +github.com/google/renameio/v2 v2.0.2 h1:qKZs+tfn+arruZZhQ7TKC/ergJunuJicWS6gLDt/dGw= +github.com/google/renameio/v2 v2.0.2/go.mod h1:OX+G6WHHpHq3NVj7cAOleLOwJfcQ1s3uUJQCrr78SWo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -454,8 +454,8 @@ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= -github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= +github.com/rubenv/sql-migrate v1.8.1 h1:EPNwCvjAowHI3TnZ+4fQu3a915OpnQoPAjTXCGOy2U0= +github.com/rubenv/sql-migrate v1.8.1/go.mod h1:BTIKBORjzyxZDS6dzoiw6eAFYJ1iNlGAtjn4LGeVjS8= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= @@ -470,8 +470,8 @@ github.com/sigstore/fulcio v1.8.5 h1:HYTD1/L5wlBp8JxsWxUf8hmfaNBBF/x3r3p5l6tZwbA github.com/sigstore/fulcio v1.8.5/go.mod h1:tSLYK3JsKvJpDW1BsIsVHZgHj+f8TjXARzqIUWSsSPQ= github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/sigstore v1.10.3 h1:s7fBYYOzW/2Vd0nND2ZdpWySb5vRF2u9eix/NZMHJm0= -github.com/sigstore/sigstore v1.10.3/go.mod h1:T26vXIkpnGEg391v3TaZ8EERcXbnjtZb/1erh5jbIQk= +github.com/sigstore/sigstore v1.10.4 h1:ytOmxMgLdcUed3w1SbbZOgcxqwMG61lh1TmZLN+WeZE= +github.com/sigstore/sigstore v1.10.4/go.mod h1:tDiyrdOref3q6qJxm2G+JHghqfmvifB7hw+EReAfnbI= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA= @@ -601,8 +601,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4= golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc= @@ -617,8 +617,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= -golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -636,8 +636,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= @@ -675,8 +675,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -686,8 +686,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -698,8 +698,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -715,8 +715,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= -golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= @@ -776,8 +776,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -helm.sh/helm/v3 v3.19.4 h1:E2yFBejmZBczWr5LblhjZbvAOAwVumfBO1AtN3nqI30= -helm.sh/helm/v3 v3.19.4/go.mod h1:PC1rk7PqacpkV4acUFMLStOOis7QM9Jq3DveHBInu4s= +helm.sh/helm/v3 v3.20.0 h1:2M+0qQwnbI1a2CxN7dbmfsWHg/MloeaFMnZCY56as50= +helm.sh/helm/v3 v3.20.0/go.mod h1:rTavWa0lagZOxGfdhu4vgk1OjH2UYCnrDKE2PVC4N0o= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= diff --git a/hack/tools/schema-generator/main.go b/hack/tools/schema-generator/main.go new file mode 100644 index 000000000..aedea32eb --- /dev/null +++ b/hack/tools/schema-generator/main.go @@ -0,0 +1,436 @@ +package main + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "time" +) + +const ( + schemaID = "https://operator-framework.io/schemas/registry-v1-bundle-config.json" + schemaDraft = "http://json-schema.org/draft-07/schema#" + schemaTitle = "Registry+v1 Bundle Configuration" + schemaDescription = "Configuration schema for registry+v1 bundles. Includes watchNamespace for controlling operator scope and deploymentConfig for customizing operator deployment (environment variables, resource scheduling, storage, and pod placement). The deploymentConfig follows the same structure and behavior as OLM v0's SubscriptionConfig. Note: The 'selector' field from v0's SubscriptionConfig is not included as it was never used." +) + +// OpenAPISpec represents the structure of Kubernetes OpenAPI v3 spec +type OpenAPISpec struct { + Components struct { + Schemas map[string]interface{} `json:"schemas"` + } `json:"components"` +} + +// Schema represents a JSON Schema Draft 7 document with OpenAPI v3 components +type Schema struct { + Schema string `json:"$schema"` + ID string `json:"$id"` + Title string `json:"title"` + Description string `json:"description"` + Type string `json:"type"` + Properties map[string]*SchemaField `json:"properties"` + AdditionalProperties bool `json:"additionalProperties"` + Components map[string]interface{} `json:"components,omitempty"` +} + +// SchemaField represents a single field in a JSON Schema +type SchemaField struct { + Type string `json:"type,omitempty"` + Description string `json:"description,omitempty"` + Properties map[string]*SchemaField `json:"properties,omitempty"` + AdditionalProperties interface{} `json:"additionalProperties,omitempty"` + Items interface{} `json:"items,omitempty"` + AnyOf []*SchemaField `json:"anyOf,omitempty"` + AllOf []*SchemaField `json:"allOf,omitempty"` + Ref string `json:"$ref,omitempty"` + + // Allow pass-through of unknown fields from OpenAPI schemas + Extra map[string]interface{} `json:"-"` +} + +// FieldInfo contains parsed information about a struct field +type FieldInfo struct { + JSONName string + TypeName string + TypePkg string + IsSlice bool + IsPtr bool + IsMap bool +} + +// schemaCollector tracks schemas that need to be included for $ref resolution +type schemaCollector struct { + openAPISpec *OpenAPISpec + collectedSchemas map[string]bool +} + +func main() { + if len(os.Args) != 4 { + fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) + os.Exit(1) + } + + k8sOpenAPISpecURL := os.Args[1] + subscriptionTypesFile := os.Args[2] + outputFile := os.Args[3] + + fmt.Printf("Fetching Kubernetes OpenAPI spec from %s...\n", k8sOpenAPISpecURL) + + // Fetch the Kubernetes OpenAPI spec + openAPISpec, err := fetchOpenAPISpec(k8sOpenAPISpecURL) + if err != nil { + fmt.Fprintf(os.Stderr, "Error fetching OpenAPI spec: %v\n", err) + os.Exit(1) + } + + fmt.Printf("Parsing SubscriptionConfig from %s...\n", subscriptionTypesFile) + + // Parse SubscriptionConfig structure + fields, err := parseSubscriptionConfig(subscriptionTypesFile) + if err != nil { + fmt.Fprintf(os.Stderr, "Error parsing SubscriptionConfig: %v\n", err) + os.Exit(1) + } + + fmt.Printf("Generating registry+v1 bundle configuration schema...\n") + + // Generate the schema + schema := generateBundleConfigSchema(openAPISpec, fields) + + // Marshal to JSON with indentation + data, err := json.MarshalIndent(schema, "", " ") + if err != nil { + fmt.Fprintf(os.Stderr, "Error marshaling schema: %v\n", err) + os.Exit(1) + } + + // Ensure output directory exists + dir := filepath.Dir(outputFile) + if err := os.MkdirAll(dir, 0755); err != nil { + fmt.Fprintf(os.Stderr, "Error creating output directory: %v\n", err) + os.Exit(1) + } + + // Write to file + if err := os.WriteFile(outputFile, data, 0600); err != nil { + fmt.Fprintf(os.Stderr, "Error writing schema file: %v\n", err) + os.Exit(1) + } + + fmt.Printf("Successfully generated schema at %s\n", outputFile) +} + +func fetchOpenAPISpec(url string) (*OpenAPISpec, error) { + // Create HTTP client with timeout to prevent hanging + client := &http.Client{ + Timeout: 30 * time.Second, + } + + resp, err := client.Get(url) + if err != nil { + return nil, fmt.Errorf("failed to fetch spec: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response: %w", err) + } + + var spec OpenAPISpec + if err := json.Unmarshal(body, &spec); err != nil { + return nil, fmt.Errorf("failed to unmarshal spec: %w", err) + } + + return &spec, nil +} + +func parseSubscriptionConfig(filePath string) ([]FieldInfo, error) { + fset := token.NewFileSet() + node, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments) + if err != nil { + return nil, err + } + + var fields []FieldInfo + + // Find the SubscriptionConfig struct + ast.Inspect(node, func(n ast.Node) bool { + typeSpec, ok := n.(*ast.TypeSpec) + if !ok || typeSpec.Name.Name != "SubscriptionConfig" { + return true + } + + structType, ok := typeSpec.Type.(*ast.StructType) + if !ok { + return true + } + + // Extract field information + for _, field := range structType.Fields.List { + if field.Names == nil { + continue + } + + fieldName := field.Names[0].Name + + // Skip Selector field + if fieldName == "Selector" { + continue + } + + // Get JSON tag + jsonName := extractJSONTag(field.Tag) + if jsonName == "" || jsonName == "-" { + continue + } + + // Parse the field type + fieldInfo := FieldInfo{ + JSONName: jsonName, + } + + parseFieldType(field.Type, &fieldInfo) + + fields = append(fields, fieldInfo) + } + + return false + }) + + return fields, nil +} + +func extractJSONTag(tag *ast.BasicLit) string { + if tag == nil { + return "" + } + + tagValue := strings.Trim(tag.Value, "`") + for _, part := range strings.Split(tagValue, " ") { + if strings.HasPrefix(part, "json:") { + jsonTag := strings.Trim(strings.TrimPrefix(part, "json:"), "\"") + return strings.Split(jsonTag, ",")[0] + } + } + + return "" +} + +func parseFieldType(expr ast.Expr, info *FieldInfo) { + switch t := expr.(type) { + case *ast.ArrayType: + info.IsSlice = true + parseFieldType(t.Elt, info) + + case *ast.StarExpr: + info.IsPtr = true + parseFieldType(t.X, info) + + case *ast.MapType: + info.IsMap = true + info.TypeName = "map[string]string" // Simplified for our use case + + case *ast.Ident: + info.TypeName = t.Name + + case *ast.SelectorExpr: + if pkg, ok := t.X.(*ast.Ident); ok { + info.TypePkg = pkg.Name + info.TypeName = t.Sel.Name + } + } +} + +func generateBundleConfigSchema(openAPISpec *OpenAPISpec, fields []FieldInfo) *Schema { + schema := &Schema{ + Schema: schemaDraft, + ID: schemaID, + Title: schemaTitle, + Description: schemaDescription, + Type: "object", + Properties: make(map[string]*SchemaField), + AdditionalProperties: false, + } + + // Track schemas we need to include (for resolving $ref dependencies) + collector := &schemaCollector{ + openAPISpec: openAPISpec, + collectedSchemas: make(map[string]bool), + } + + // Add watchNamespace property (base definition - will be modified at runtime) + schema.Properties["watchNamespace"] = &SchemaField{ + Description: "The namespace that the operator should watch for custom resources. The meaning and validation of this field depends on the operator's install modes. This field may be optional or required, and may have format constraints, based on the operator's supported install modes.", + AnyOf: []*SchemaField{ + {Type: "null"}, + {Type: "string"}, + }, + } + + // Create deploymentConfig property + deploymentConfigProps := make(map[string]*SchemaField) + + // Build deploymentConfig properties from parsed fields + for _, field := range fields { + fieldSchema := mapFieldToOpenAPISchema(field, openAPISpec, collector) + if fieldSchema != nil { + deploymentConfigProps[field.JSONName] = fieldSchema + } + } + + schema.Properties["deploymentConfig"] = &SchemaField{ + Type: "object", + Description: "Configuration for customizing operator deployment (environment variables, resources, volumes, etc.)", + Properties: deploymentConfigProps, + AdditionalProperties: false, + } + + // Add all collected schemas to the components/schemas section + // (OpenAPI v3 uses components/schemas for $ref resolution) + if len(collector.collectedSchemas) > 0 { + componentsSchemas := make(map[string]interface{}) + for schemaName := range collector.collectedSchemas { + if s, ok := openAPISpec.Components.Schemas[schemaName]; ok { + componentsSchemas[schemaName] = s + } + } + + schema.Components = map[string]interface{}{ + "schemas": componentsSchemas, + } + } + + return schema +} + +func mapFieldToOpenAPISchema(field FieldInfo, openAPISpec *OpenAPISpec, collector *schemaCollector) *SchemaField { + // Handle map types (nodeSelector, annotations) + if field.IsMap { + return &SchemaField{ + Type: "object", + AdditionalProperties: &SchemaField{ + Type: "string", + }, + } + } + + // Get the OpenAPI schema for the base type + openAPITypeName := getOpenAPITypeName(field) + if openAPITypeName == "" { + fmt.Fprintf(os.Stderr, "Warning: Could not map field %s (type: %s.%s) to OpenAPI schema\n", + field.JSONName, field.TypePkg, field.TypeName) + return nil + } + + baseSchema, ok := openAPISpec.Components.Schemas[openAPITypeName] + if !ok { + fmt.Fprintf(os.Stderr, "Warning: Schema for %s not found in OpenAPI spec\n", openAPITypeName) + return nil + } + + // Collect this schema and all its dependencies + collector.collectSchemaWithDependencies(openAPITypeName, baseSchema) + + // Use $ref to point to the schema in components/schemas. + // This preserves all validation keywords (required, enum, format, pattern, etc.) + // that would be lost if we copied the schema content via marshal/unmarshal. + schemaRef := &SchemaField{ + Ref: fmt.Sprintf("#/components/schemas/%s", openAPITypeName), + } + + // Wrap in array if it's a slice field + if field.IsSlice { + return &SchemaField{ + Type: "array", + Items: schemaRef, + } + } + + return schemaRef +} + +// collectSchemaWithDependencies recursively collects a schema and all schemas it references via $ref +func (c *schemaCollector) collectSchemaWithDependencies(schemaName string, schema interface{}) { + // Mark this schema as collected + if c.collectedSchemas[schemaName] { + return // Already processed + } + c.collectedSchemas[schemaName] = true + + // Recursively find all $ref references in this schema + c.findReferences(schema) +} + +// findReferences recursively walks a schema object to find all $ref pointers +func (c *schemaCollector) findReferences(obj interface{}) { + switch v := obj.(type) { + case map[string]interface{}: + // Check if this is a $ref and process it + c.processRef(v) + + // Recursively check all values in the map + for _, val := range v { + c.findReferences(val) + } + + case []interface{}: + // Recursively check all items in the array + for _, item := range v { + c.findReferences(item) + } + } +} + +// processRef extracts and collects schema dependencies from a $ref pointer +func (c *schemaCollector) processRef(v map[string]interface{}) { + ref, ok := v["$ref"].(string) + if !ok { + return + } + + // Extract the schema name from the $ref + // Format: "#/components/schemas/io.k8s.api.core.v1.NodeAffinity" + if !strings.HasPrefix(ref, "#/components/schemas/") { + return + } + + schemaName := strings.TrimPrefix(ref, "#/components/schemas/") + + // Skip if already collected + if c.collectedSchemas[schemaName] { + return + } + + // Collect the referenced schema recursively + refSchema, ok := c.openAPISpec.Components.Schemas[schemaName] + if ok { + c.collectSchemaWithDependencies(schemaName, refSchema) + } +} + +func getOpenAPITypeName(field FieldInfo) string { + // Map package names to OpenAPI prefixes + pkgMap := map[string]string{ + "corev1": "io.k8s.api.core.v1", + "v1": "io.k8s.api.core.v1", + } + + prefix, ok := pkgMap[field.TypePkg] + if !ok { + return "" + } + + return fmt.Sprintf("%s.%s", prefix, field.TypeName) +} diff --git a/hack/tools/schema-generator/main_test.go b/hack/tools/schema-generator/main_test.go new file mode 100644 index 000000000..41807d3a0 --- /dev/null +++ b/hack/tools/schema-generator/main_test.go @@ -0,0 +1,333 @@ +package main + +import ( + "encoding/json" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// getPackageDir returns the directory path of the specified Go package. +// It uses 'go list' which automatically handles both vendor mode and module cache. +func getPackageDir(t *testing.T, pkgPath string) string { + t.Helper() + cmd := exec.Command("go", "list", "-f", "{{.Dir}}", pkgPath) + out, err := cmd.Output() + require.NoError(t, err, "failed to find package %s", pkgPath) + return strings.TrimSpace(string(out)) +} + +// Mock OpenAPI spec for testing +func getMockOpenAPISpec() *OpenAPISpec { + return &OpenAPISpec{ + Components: struct { + Schemas map[string]interface{} `json:"schemas"` + }{ + Schemas: map[string]interface{}{ + "io.k8s.api.core.v1.Toleration": map[string]interface{}{ + "type": "object", + "description": "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", + "properties": map[string]interface{}{ + "key": map[string]string{"type": "string"}, + "operator": map[string]string{"type": "string"}, + "value": map[string]string{"type": "string"}, + "effect": map[string]string{"type": "string"}, + "tolerationSeconds": map[string]interface{}{"type": "integer", "format": "int64"}, + }, + }, + "io.k8s.api.core.v1.ResourceRequirements": map[string]interface{}{ + "type": "object", + "description": "ResourceRequirements describes the compute resource requirements.", + "properties": map[string]interface{}{ + "limits": map[string]interface{}{"type": "object"}, + "requests": map[string]interface{}{"type": "object"}, + }, + }, + "io.k8s.api.core.v1.EnvVar": map[string]interface{}{ + "type": "object", + "properties": map[string]interface{}{"name": map[string]string{"type": "string"}}, + }, + "io.k8s.api.core.v1.EnvFromSource": map[string]interface{}{ + "type": "object", + }, + "io.k8s.api.core.v1.Volume": map[string]interface{}{ + "type": "object", + }, + "io.k8s.api.core.v1.VolumeMount": map[string]interface{}{ + "type": "object", + }, + "io.k8s.api.core.v1.Affinity": map[string]interface{}{ + "type": "object", + }, + }, + }, + } +} + +func TestParseSubscriptionConfig(t *testing.T) { + // Get the package directory containing subscription_types.go + pkgDir := getPackageDir(t, "github.com/operator-framework/api/pkg/operators/v1alpha1") + subscriptionTypesFile := filepath.Join(pkgDir, "subscription_types.go") + + fields, err := parseSubscriptionConfig(subscriptionTypesFile) + require.NoError(t, err, "should successfully parse SubscriptionConfig") + require.NotEmpty(t, fields, "should find fields in SubscriptionConfig") + + // Create a map for easier checking + fieldMap := make(map[string]FieldInfo) + for _, field := range fields { + fieldMap[field.JSONName] = field + } + + t.Run("includes expected fields", func(t *testing.T) { + expectedFields := []string{ + "nodeSelector", + "tolerations", + "resources", + "env", + "envFrom", + "volumes", + "volumeMounts", + "affinity", + "annotations", + } + + for _, fieldName := range expectedFields { + assert.Contains(t, fieldMap, fieldName, "should include %s field", fieldName) + } + }) + + t.Run("excludes selector field", func(t *testing.T) { + assert.NotContains(t, fieldMap, "selector", "should exclude selector field per RFC requirement") + }) + + t.Run("parses field types correctly", func(t *testing.T) { + // Check tolerations is a slice + tolerations, ok := fieldMap["tolerations"] + require.True(t, ok, "tolerations should be present") + assert.True(t, tolerations.IsSlice, "tolerations should be a slice") + assert.Equal(t, "corev1", tolerations.TypePkg, "tolerations should be from corev1 package") + assert.Equal(t, "Toleration", tolerations.TypeName, "tolerations type should be Toleration") + + // Check nodeSelector is a map + nodeSelector, ok := fieldMap["nodeSelector"] + require.True(t, ok, "nodeSelector should be present") + assert.True(t, nodeSelector.IsMap, "nodeSelector should be a map") + + // Check resources is an object (pointer) + resources, ok := fieldMap["resources"] + require.True(t, ok, "resources should be present") + assert.Equal(t, "corev1", resources.TypePkg) + assert.Equal(t, "ResourceRequirements", resources.TypeName) + }) +} + +func TestGenerateBundleConfigSchema(t *testing.T) { + mockOpenAPI := getMockOpenAPISpec() + + // Create mock fields similar to what parseSubscriptionConfig would return + fields := []FieldInfo{ + {JSONName: "nodeSelector", IsMap: true}, + {JSONName: "tolerations", TypePkg: "corev1", TypeName: "Toleration", IsSlice: true}, + {JSONName: "resources", TypePkg: "corev1", TypeName: "ResourceRequirements"}, + {JSONName: "annotations", IsMap: true}, + } + + schema := generateBundleConfigSchema(mockOpenAPI, fields) + + t.Run("schema has correct metadata", func(t *testing.T) { + assert.Equal(t, "http://json-schema.org/draft-07/schema#", schema.Schema) + assert.Equal(t, schemaID, schema.ID) + assert.Equal(t, schemaTitle, schema.Title) + assert.NotEmpty(t, schema.Description) + assert.Equal(t, "object", schema.Type) + assert.False(t, schema.AdditionalProperties) + }) + + t.Run("includes watchNamespace property", func(t *testing.T) { + require.Contains(t, schema.Properties, "watchNamespace") + + watchNS := schema.Properties["watchNamespace"] + require.NotNil(t, watchNS) + + assert.NotEmpty(t, watchNS.Description) + assert.Len(t, watchNS.AnyOf, 2, "watchNamespace should have anyOf with null and string") + }) + + t.Run("includes deploymentConfig property", func(t *testing.T) { + require.Contains(t, schema.Properties, "deploymentConfig") + + deployConfig := schema.Properties["deploymentConfig"] + require.NotNil(t, deployConfig) + + assert.Equal(t, "object", deployConfig.Type) + assert.NotEmpty(t, deployConfig.Description) + assert.Equal(t, false, deployConfig.AdditionalProperties) + + // Check that our mock fields are present + assert.Contains(t, deployConfig.Properties, "nodeSelector") + assert.Contains(t, deployConfig.Properties, "tolerations") + assert.Contains(t, deployConfig.Properties, "resources") + assert.Contains(t, deployConfig.Properties, "annotations") + }) +} + +func TestMapFieldToOpenAPISchema(t *testing.T) { + mockOpenAPI := getMockOpenAPISpec() + collector := &schemaCollector{ + openAPISpec: mockOpenAPI, + collectedSchemas: make(map[string]bool), + } + + t.Run("maps map fields correctly", func(t *testing.T) { + field := FieldInfo{ + JSONName: "nodeSelector", + IsMap: true, + } + + schema := mapFieldToOpenAPISchema(field, mockOpenAPI, collector) + require.NotNil(t, schema) + + assert.Equal(t, "object", schema.Type) + assert.NotNil(t, schema.AdditionalProperties) + }) + + t.Run("maps slice fields correctly", func(t *testing.T) { + field := FieldInfo{ + JSONName: "tolerations", + TypePkg: "corev1", + TypeName: "Toleration", + IsSlice: true, + } + + schema := mapFieldToOpenAPISchema(field, mockOpenAPI, collector) + require.NotNil(t, schema) + + assert.Equal(t, "array", schema.Type) + assert.NotNil(t, schema.Items) + + // Items should be a *SchemaField with $ref + items, ok := schema.Items.(*SchemaField) + require.True(t, ok) + assert.Equal(t, "#/components/schemas/io.k8s.api.core.v1.Toleration", items.Ref) + }) + + t.Run("maps object fields correctly", func(t *testing.T) { + field := FieldInfo{ + JSONName: "resources", + TypePkg: "corev1", + TypeName: "ResourceRequirements", + } + + schema := mapFieldToOpenAPISchema(field, mockOpenAPI, collector) + require.NotNil(t, schema) + + // Should be a $ref to the schema in components/schemas + assert.Equal(t, "#/components/schemas/io.k8s.api.core.v1.ResourceRequirements", schema.Ref) + }) +} + +func TestGetOpenAPITypeName(t *testing.T) { + testCases := []struct { + name string + field FieldInfo + expected string + }{ + { + name: "corev1 package", + field: FieldInfo{TypePkg: "corev1", TypeName: "Toleration"}, + expected: "io.k8s.api.core.v1.Toleration", + }, + { + name: "v1 package", + field: FieldInfo{TypePkg: "v1", TypeName: "ResourceRequirements"}, + expected: "io.k8s.api.core.v1.ResourceRequirements", + }, + { + name: "unknown package", + field: FieldInfo{TypePkg: "unknown", TypeName: "SomeType"}, + expected: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := getOpenAPITypeName(tc.field) + assert.Equal(t, tc.expected, result) + }) + } +} + +// TestSchemaIsValidJSON verifies that the generated schema is valid JSON +func TestSchemaIsValidJSON(t *testing.T) { + mockOpenAPI := getMockOpenAPISpec() + fields := []FieldInfo{ + {JSONName: "tolerations", TypePkg: "corev1", TypeName: "Toleration", IsSlice: true}, + } + + schema := generateBundleConfigSchema(mockOpenAPI, fields) + + // Marshal to JSON + data, err := json.MarshalIndent(schema, "", " ") + require.NoError(t, err, "should marshal schema to JSON") + + // Unmarshal back to verify it's valid + var unmarshaled map[string]interface{} + err = json.Unmarshal(data, &unmarshaled) + require.NoError(t, err, "generated JSON should be valid and unmarshalable") + + // Verify key top-level fields exist + assert.Contains(t, unmarshaled, "$schema") + assert.Contains(t, unmarshaled, "$id") + assert.Contains(t, unmarshaled, "type") + assert.Contains(t, unmarshaled, "properties") +} + +// TestGeneratedSchemaMatchesActualOutput validates that the checked-in schema file +// has the expected structure and required fields. +func TestGeneratedSchemaMatchesActualOutput(t *testing.T) { + // Read the checked-in schema file + schemaPath := "../../../internal/operator-controller/rukpak/bundle/registryv1bundleconfig.json" + data, err := os.ReadFile(schemaPath) + require.NoError(t, err, "should be able to read the generated schema file") + + // Unmarshal it + var schemaFromFile map[string]interface{} + err = json.Unmarshal(data, &schemaFromFile) + require.NoError(t, err, "checked-in schema should be valid JSON") + + // Verify it has the expected structure + assert.Equal(t, "http://json-schema.org/draft-07/schema#", schemaFromFile["$schema"]) + assert.Equal(t, schemaID, schemaFromFile["$id"]) + assert.Contains(t, schemaFromFile, "properties") + + props, ok := schemaFromFile["properties"].(map[string]interface{}) + require.True(t, ok) + + assert.Contains(t, props, "watchNamespace") + assert.Contains(t, props, "deploymentConfig") + + // Verify deploymentConfig has expected fields + deployConfig, ok := props["deploymentConfig"].(map[string]interface{}) + require.True(t, ok) + + dcProps, ok := deployConfig["properties"].(map[string]interface{}) + require.True(t, ok) + + expectedFields := []string{ + "nodeSelector", "tolerations", "resources", "env", "envFrom", + "volumes", "volumeMounts", "affinity", "annotations", + } + + for _, field := range expectedFields { + assert.Contains(t, dcProps, field, "deploymentConfig should include %s", field) + } + + // Verify selector is NOT present + assert.NotContains(t, dcProps, "selector", "selector field should be excluded per RFC requirement") +} diff --git a/hack/tools/update-registryv1-bundle-schema.sh b/hack/tools/update-registryv1-bundle-schema.sh new file mode 100755 index 000000000..45c27cdb9 --- /dev/null +++ b/hack/tools/update-registryv1-bundle-schema.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# This script generates the registry+v1 bundle configuration JSON schema +# by extracting field information from v1alpha1.SubscriptionConfig and mapping +# those fields to their corresponding Kubernetes OpenAPI v3 schemas. + +# Get the module path from go mod cache +if ! MODULE_PATH=$(go list -mod=readonly -m -f "{{.Dir}}" github.com/operator-framework/api); then + echo "Error: Could not find github.com/operator-framework/api module" >&2 + exit 1 +fi + +# Source files +SUBSCRIPTION_TYPES="${MODULE_PATH}/pkg/operators/v1alpha1/subscription_types.go" + +# Output file +SCHEMA_OUTPUT="internal/operator-controller/rukpak/bundle/registryv1bundleconfig.json" + +# Verify required source file exists +if [[ ! -f "${SUBSCRIPTION_TYPES}" ]]; then + echo "Error: ${SUBSCRIPTION_TYPES} not found." >&2 + echo "Module path: ${MODULE_PATH}" >&2 + exit 1 +fi + +# Get the effective k8s.io/api version (honors replace directives) +if ! K8S_API_VERSION=$(go list -m -f '{{.Version}}' k8s.io/api); then + echo "Error: Could not determine k8s.io/api version" >&2 + exit 1 +fi +if [[ -z "${K8S_API_VERSION}" ]]; then + echo "Error: k8s.io/api version is empty" >&2 + exit 1 +fi + +# Convert k8s.io/api version (v0.35.0) to Kubernetes version (v1.35.0) +# k8s.io/api uses v0.X.Y while Kubernetes uses v1.X.Y +K8S_VERSION=$(echo "${K8S_API_VERSION}" | sed 's/^v0\./v1./' | tr -d '\n') + +echo "$(date '+%Y/%m/%d %T') Detected k8s.io/api version: ${K8S_API_VERSION}" +echo "$(date '+%Y/%m/%d %T') Using Kubernetes version: ${K8S_VERSION}" + +# Construct OpenAPI spec URL +OPENAPI_SPEC_URL="https://raw.githubusercontent.com/kubernetes/kubernetes/refs/tags/${K8S_VERSION}/api/openapi-spec/v3/api__v1_openapi.json" + +echo "$(date '+%Y/%m/%d %T') Fetching Kubernetes OpenAPI spec from: ${OPENAPI_SPEC_URL}" +echo "$(date '+%Y/%m/%d %T') Generating registry+v1 bundle configuration JSON schema..." + +# Run the schema generator +go run ./hack/tools/schema-generator "${OPENAPI_SPEC_URL}" "${SUBSCRIPTION_TYPES}" "${SCHEMA_OUTPUT}" + +echo "$(date '+%Y/%m/%d %T') Schema generation complete: ${SCHEMA_OUTPUT}" diff --git a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml index 66824fa12..b857b9c1c 100644 --- a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml +++ b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml @@ -601,12 +601,12 @@ spec: When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. - When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + When the ClusterExtension is sourced from a catalog, it surfaces deprecation conditions based on catalog metadata. These are indications from a package owner to guide users away from a particular package, channel, or bundle: - - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + - BundleDeprecated is True if the installed bundle is marked deprecated, False if not deprecated, or Unknown if no bundle is installed yet or if catalog data is unavailable. + - ChannelDeprecated is True if any requested channel is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - PackageDeprecated is True if the requested package is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - Deprecated is a rollup condition that is True when any deprecation exists, False when none exist, or Unknown when catalog data is unavailable. items: description: Condition contains details for one aspect of the current state of this API Resource. diff --git a/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml b/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml index ca316f7e8..1840c756f 100644 --- a/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml +++ b/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml @@ -507,12 +507,12 @@ spec: When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. - When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + When the ClusterExtension is sourced from a catalog, it surfaces deprecation conditions based on catalog metadata. These are indications from a package owner to guide users away from a particular package, channel, or bundle: - - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + - BundleDeprecated is True if the installed bundle is marked deprecated, False if not deprecated, or Unknown if no bundle is installed yet or if catalog data is unavailable. + - ChannelDeprecated is True if any requested channel is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - PackageDeprecated is True if the requested package is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - Deprecated is a rollup condition that is True when any deprecation exists, False when none exist, or Unknown when catalog data is unavailable. items: description: Condition contains details for one aspect of the current state of this API Resource. diff --git a/internal/operator-controller/conditionsets/conditionsets.go b/internal/operator-controller/conditionsets/conditionsets.go index e72a95c2a..0a741ed8d 100644 --- a/internal/operator-controller/conditionsets/conditionsets.go +++ b/internal/operator-controller/conditionsets/conditionsets.go @@ -36,6 +36,8 @@ var ConditionTypes = []string{ var ConditionReasons = []string{ ocv1.ReasonSucceeded, ocv1.ReasonDeprecated, + ocv1.ReasonNotDeprecated, + ocv1.ReasonDeprecationStatusUnknown, ocv1.ReasonFailed, ocv1.ReasonBlocked, ocv1.ReasonRetrying, diff --git a/internal/operator-controller/config/config.go b/internal/operator-controller/config/config.go index 8fcadf40a..43f755762 100644 --- a/internal/operator-controller/config/config.go +++ b/internal/operator-controller/config/config.go @@ -98,6 +98,47 @@ func (c *Config) GetWatchNamespace() *string { return &str } +// GetDeploymentConfig returns the deploymentConfig value if present in the configuration. +// Returns nil if deploymentConfig is not set or is explicitly set to null. +// The returned value is a generic map[string]any that can be marshaled to JSON +// for validation or conversion to specific types (like v1alpha1.SubscriptionConfig). +// +// Returns a defensive deep copy so callers can't mutate the internal Config state. +func (c *Config) GetDeploymentConfig() map[string]any { + if c == nil || *c == nil { + return nil + } + val, exists := (*c)["deploymentConfig"] + if !exists { + return nil + } + // User set deploymentConfig: null - treat as "not configured" + if val == nil { + return nil + } + // Schema validation ensures this is an object (map) + dcMap, ok := val.(map[string]any) + if !ok { + return nil + } + + // Return a defensive deep copy so callers can't mutate the internal Config state. + // We use JSON marshal/unmarshal because the data is already JSON-compatible and + // this handles nested structures correctly. + data, err := json.Marshal(dcMap) + if err != nil { + // This should never happen since the map came from validated JSON/YAML, + // but return nil as a safe fallback + return nil + } + var copied map[string]any + if err := json.Unmarshal(data, &copied); err != nil { + // This should never happen for valid JSON + return nil + } + return copied +} + // UnmarshalConfig takes user configuration, validates it, and creates a Config object. // This is the only way to create a Config. // diff --git a/internal/operator-controller/config/config_test.go b/internal/operator-controller/config/config_test.go index 95bb98f0b..d49bc6b87 100644 --- a/internal/operator-controller/config/config_test.go +++ b/internal/operator-controller/config/config_test.go @@ -337,8 +337,8 @@ func Test_UnmarshalConfig_EmptySchema(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - emptySchemaBundle := &mockEmptySchemaBundle{} - schema, err := emptySchemaBundle.GetConfigSchema() + noSchemaBundle := &mockNoSchemaBundle{} + schema, err := noSchemaBundle.GetConfigSchema() require.NoError(t, err) config, err := config.UnmarshalConfig(tc.rawConfig, schema, "my-namespace") @@ -566,9 +566,135 @@ func (h *mockHelmBundle) GetConfigSchema() (map[string]any, error) { return schemaMap, nil } -// mockEmptySchemaBundle represents a ClusterExtension that doesn't provide a configuration schema. -type mockEmptySchemaBundle struct{} +// mockNoSchemaBundle represents a bundle that doesn't provide a configuration schema. +type mockNoSchemaBundle struct{} -func (e *mockEmptySchemaBundle) GetConfigSchema() (map[string]any, error) { +func (e *mockNoSchemaBundle) GetConfigSchema() (map[string]any, error) { + // Return nil to indicate "no schema" (skip validation) return nil, nil } + +// Test_GetDeploymentConfig tests the GetDeploymentConfig accessor method. +func Test_GetDeploymentConfig(t *testing.T) { + // Create a bundle that returns nil schema (no validation) + bundle := &mockNoSchemaBundle{} + + tests := []struct { + name string + rawConfig []byte + expectedDeploymentConfig map[string]any + expectedDeploymentConfigNil bool + }{ + { + name: "empty config returns nil", + rawConfig: []byte(`{}`), + expectedDeploymentConfigNil: true, + }, + { + name: "config without deploymentConfig field returns nil", + rawConfig: []byte(`{"watchNamespace": "test-ns"}`), + expectedDeploymentConfigNil: true, + }, + { + name: "config with null deploymentConfig returns nil", + rawConfig: []byte(`{"deploymentConfig": null}`), + expectedDeploymentConfigNil: true, + }, + { + name: "config with valid deploymentConfig returns the object", + rawConfig: []byte(`{ + "deploymentConfig": { + "nodeSelector": { + "kubernetes.io/os": "linux" + }, + "resources": { + "requests": { + "memory": "128Mi" + } + } + } + }`), + expectedDeploymentConfig: map[string]any{ + "nodeSelector": map[string]any{ + "kubernetes.io/os": "linux", + }, + "resources": map[string]any{ + "requests": map[string]any{ + "memory": "128Mi", + }, + }, + }, + expectedDeploymentConfigNil: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schema, err := bundle.GetConfigSchema() + require.NoError(t, err) + + cfg, err := config.UnmarshalConfig(tt.rawConfig, schema, "") + require.NoError(t, err) + + result := cfg.GetDeploymentConfig() + if tt.expectedDeploymentConfigNil { + require.Nil(t, result) + } else { + require.NotNil(t, result) + require.Equal(t, tt.expectedDeploymentConfig, result) + } + }) + } + + // Test nil config separately + t.Run("nil config returns nil", func(t *testing.T) { + var cfg *config.Config + result := cfg.GetDeploymentConfig() + require.Nil(t, result) + }) + + // Test that returned map is a defensive copy (mutations don't affect original) + t.Run("returned map is defensive copy - mutations don't affect original", func(t *testing.T) { + rawConfig := []byte(`{ + "deploymentConfig": { + "nodeSelector": { + "kubernetes.io/os": "linux" + } + } + }`) + + schema, err := bundle.GetConfigSchema() + require.NoError(t, err) + + cfg, err := config.UnmarshalConfig(rawConfig, schema, "") + require.NoError(t, err) + + // Get the deploymentConfig + result1 := cfg.GetDeploymentConfig() + require.NotNil(t, result1) + + // Mutate the returned map + result1["nodeSelector"] = map[string]any{ + "mutated": "value", + } + result1["newField"] = "added" + + // Get deploymentConfig again - should be unaffected by mutations + result2 := cfg.GetDeploymentConfig() + require.NotNil(t, result2) + + // Original values should be intact + require.Equal(t, map[string]any{ + "nodeSelector": map[string]any{ + "kubernetes.io/os": "linux", + }, + }, result2) + + // New field should not exist + _, exists := result2["newField"] + require.False(t, exists) + + // result1 should have the mutations + require.Equal(t, "added", result1["newField"]) + }) +} diff --git a/internal/operator-controller/controllers/clusterextension_admission_test.go b/internal/operator-controller/controllers/clusterextension_admission_test.go index 6ce9fc3c7..259bb9505 100644 --- a/internal/operator-controller/controllers/clusterextension_admission_test.go +++ b/internal/operator-controller/controllers/clusterextension_admission_test.go @@ -13,9 +13,7 @@ import ( ) func TestClusterExtensionSourceConfig(t *testing.T) { - // NOTE: Kubernetes validation error format for JSON null values varies across K8s versions. - // We check for the common part "Invalid value:" which appears in all versions. - sourceTypeEmptyError := "Invalid value:" + sourceTypeEmptyErrors := []string{"Invalid value: \"null\"", "Invalid value: null"} sourceTypeMismatchError := "spec.source.sourceType: Unsupported value" sourceConfigInvalidError := "spec.source: Invalid value" // unionField represents the required Catalog or (future) Bundle field required by SourceConfig @@ -23,12 +21,12 @@ func TestClusterExtensionSourceConfig(t *testing.T) { name string sourceType string unionField string - errMsg string + errMsgs []string }{ - {"sourceType is null", "", "Catalog", sourceTypeEmptyError}, - {"sourceType is invalid", "Invalid", "Catalog", sourceTypeMismatchError}, - {"catalog field does not exist", "Catalog", "", sourceConfigInvalidError}, - {"sourceConfig has required fields", "Catalog", "Catalog", ""}, + {"sourceType is null", "", "Catalog", sourceTypeEmptyErrors}, + {"sourceType is invalid", "Invalid", "Catalog", []string{sourceTypeMismatchError}}, + {"catalog field does not exist", "Catalog", "", []string{sourceConfigInvalidError}}, + {"sourceConfig has required fields", "Catalog", "Catalog", []string{}}, } t.Parallel() @@ -64,12 +62,20 @@ func TestClusterExtensionSourceConfig(t *testing.T) { })) } - if tc.errMsg == "" { + if len(tc.errMsgs) == 0 { require.NoError(t, err, "unexpected error for sourceType %q: %w", tc.sourceType, err) - } else { - require.Error(t, err) - require.Contains(t, err.Error(), tc.errMsg) + return + } + + require.Error(t, err) + matched := false + for _, msg := range tc.errMsgs { + if strings.Contains(err.Error(), msg) { + matched = true + break + } } + require.True(t, matched, "expected one of %v in error %q", tc.errMsgs, err) }) } } diff --git a/internal/operator-controller/controllers/clusterextension_controller.go b/internal/operator-controller/controllers/clusterextension_controller.go index f381152e6..f089efcac 100644 --- a/internal/operator-controller/controllers/clusterextension_controller.go +++ b/internal/operator-controller/controllers/clusterextension_controller.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "io/fs" + "slices" "strings" "github.com/go-logr/logr" @@ -166,15 +167,22 @@ func (r *ClusterExtensionReconciler) Reconcile(ctx context.Context, req ctrl.Req return res, reconcileErr } -// ensureAllConditionsWithReason checks that all defined condition types exist in the given ClusterExtension, -// and assigns a specified reason and custom message to any missing condition. +// ensureFailureConditionsWithReason keeps every non-deprecation condition present. +// If one is missing, we add it with the given reason and message so users see why +// reconcile failed. Deprecation conditions are handled later by SetDeprecationStatus. // //nolint:unparam // reason parameter is designed to be flexible, even if current callers use the same value -func ensureAllConditionsWithReason(ext *ocv1.ClusterExtension, reason v1alpha1.ConditionReason, message string) { +func ensureFailureConditionsWithReason(ext *ocv1.ClusterExtension, reason v1alpha1.ConditionReason, message string) { for _, condType := range conditionsets.ConditionTypes { + if isDeprecationCondition(condType) { + continue + } cond := apimeta.FindStatusCondition(ext.Status.Conditions, condType) + // Guard so we only fill empty slots. Without it, we would overwrite the detailed status that + // helpers (setStatusProgressing, setInstalledStatusCondition*, SetDeprecationStatus) already set. if cond == nil { - // Create a new condition with a valid reason and add it + // No condition exists yet, so add a fallback with the failure reason. Specific helpers replace it + // with the real progressing/bundle/package/channel message during reconciliation. SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ Type: condType, Status: metav1.ConditionFalse, @@ -186,83 +194,224 @@ func ensureAllConditionsWithReason(ext *ocv1.ClusterExtension, reason v1alpha1.C } } -// SetDeprecationStatus will set the appropriate deprecation statuses for a ClusterExtension -// based on the provided bundle -func SetDeprecationStatus(ext *ocv1.ClusterExtension, bundleName string, deprecation *declcfg.Deprecation) { - deprecations := map[string][]declcfg.DeprecationEntry{} +// SetDeprecationStatus updates deprecation conditions based on catalog metadata. +// +// Behavior (following Kubernetes API conventions - conditions always present): +// - IS deprecated -> condition True with Reason: Deprecated +// - NOT deprecated -> condition False with Reason: NotDeprecated +// - Can't check (no catalog) -> condition Unknown with Reason: DeprecationStatusUnknown +// - No bundle installed -> BundleDeprecated Unknown with Reason: Absent +// +// This keeps deprecation conditions focused on catalog data. Install/validation errors +// never appear here - they belong in Progressing/Installed conditions. +func SetDeprecationStatus(ext *ocv1.ClusterExtension, installedBundleName string, deprecation *declcfg.Deprecation, hasCatalogData bool) { + info := buildDeprecationInfo(ext, installedBundleName, deprecation) + packageMessages := collectDeprecationMessages(info.PackageEntries) + channelMessages := collectDeprecationMessages(info.ChannelEntries) + bundleMessages := collectDeprecationMessages(info.BundleEntries) + + // Strategy: Always set deprecation conditions (following Kubernetes API conventions). + // SetStatusCondition preserves lastTransitionTime when status/reason/message haven't changed, + // preventing infinite reconciliation loops. + // - True = deprecated + // - False = not deprecated (verified via catalog) + // - Unknown = cannot verify (no catalog data or no bundle installed) + + if !hasCatalogData { + // When catalog is unavailable, set all to Unknown. + // BundleDeprecated uses Absent only when no bundle installed. + bundleReason := ocv1.ReasonAbsent + bundleMessage := "no bundle installed yet" + if installedBundleName != "" { + bundleReason = ocv1.ReasonDeprecationStatusUnknown + bundleMessage = "deprecation status unknown: catalog data unavailable" + } + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypeDeprecated, + Status: metav1.ConditionUnknown, + Reason: ocv1.ReasonDeprecationStatusUnknown, + Message: "deprecation status unknown: catalog data unavailable", + ObservedGeneration: ext.GetGeneration(), + }) + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypePackageDeprecated, + Status: metav1.ConditionUnknown, + Reason: ocv1.ReasonDeprecationStatusUnknown, + Message: "deprecation status unknown: catalog data unavailable", + ObservedGeneration: ext.GetGeneration(), + }) + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypeChannelDeprecated, + Status: metav1.ConditionUnknown, + Reason: ocv1.ReasonDeprecationStatusUnknown, + Message: "deprecation status unknown: catalog data unavailable", + ObservedGeneration: ext.GetGeneration(), + }) + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypeBundleDeprecated, + Status: metav1.ConditionUnknown, + Reason: bundleReason, + Message: bundleMessage, + ObservedGeneration: ext.GetGeneration(), + }) + return + } + + // Handle catalog data available: set conditions to True when deprecated, False when not. + messages := slices.Concat(packageMessages, channelMessages, bundleMessages) + if len(messages) > 0 { + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypeDeprecated, + Status: metav1.ConditionTrue, + Reason: ocv1.ReasonDeprecated, + Message: strings.Join(messages, "\n"), + ObservedGeneration: ext.GetGeneration(), + }) + } else { + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypeDeprecated, + Status: metav1.ConditionFalse, + Reason: ocv1.ReasonNotDeprecated, + Message: "not deprecated", + ObservedGeneration: ext.GetGeneration(), + }) + } + + if len(packageMessages) > 0 { + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypePackageDeprecated, + Status: metav1.ConditionTrue, + Reason: ocv1.ReasonDeprecated, + Message: strings.Join(packageMessages, "\n"), + ObservedGeneration: ext.GetGeneration(), + }) + } else { + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypePackageDeprecated, + Status: metav1.ConditionFalse, + Reason: ocv1.ReasonNotDeprecated, + Message: "package not deprecated", + ObservedGeneration: ext.GetGeneration(), + }) + } + + if len(channelMessages) > 0 { + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypeChannelDeprecated, + Status: metav1.ConditionTrue, + Reason: ocv1.ReasonDeprecated, + Message: strings.Join(channelMessages, "\n"), + ObservedGeneration: ext.GetGeneration(), + }) + } else { + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypeChannelDeprecated, + Status: metav1.ConditionFalse, + Reason: ocv1.ReasonNotDeprecated, + Message: "channel not deprecated", + ObservedGeneration: ext.GetGeneration(), + }) + } + + // BundleDeprecated: Unknown when no bundle installed, True when deprecated, False when not + if info.BundleStatus == metav1.ConditionUnknown { + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypeBundleDeprecated, + Status: metav1.ConditionUnknown, + Reason: ocv1.ReasonAbsent, + Message: "no bundle installed yet", + ObservedGeneration: ext.GetGeneration(), + }) + } else if len(bundleMessages) > 0 { + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypeBundleDeprecated, + Status: metav1.ConditionTrue, + Reason: ocv1.ReasonDeprecated, + Message: strings.Join(bundleMessages, "\n"), + ObservedGeneration: ext.GetGeneration(), + }) + } else { + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypeBundleDeprecated, + Status: metav1.ConditionFalse, + Reason: ocv1.ReasonNotDeprecated, + Message: "bundle not deprecated", + ObservedGeneration: ext.GetGeneration(), + }) + } +} + +// isDeprecationCondition reports whether the given type is one of the deprecation +// conditions we manage separately. +func isDeprecationCondition(condType string) bool { + switch condType { + case ocv1.TypeDeprecated, ocv1.TypePackageDeprecated, ocv1.TypeChannelDeprecated, ocv1.TypeBundleDeprecated: + return true + default: + return false + } +} + +// deprecationInfo captures the deprecation data needed to update condition status. +type deprecationInfo struct { + PackageEntries []declcfg.DeprecationEntry + ChannelEntries []declcfg.DeprecationEntry + BundleEntries []declcfg.DeprecationEntry + BundleStatus metav1.ConditionStatus +} + +// buildDeprecationInfo filters the catalog deprecation data down to the package, channel, +// and bundle entries that matter for this ClusterExtension. An empty bundle name means +// nothing is installed yet, so we leave bundle status Unknown/Absent. +func buildDeprecationInfo(ext *ocv1.ClusterExtension, installedBundleName string, deprecation *declcfg.Deprecation) deprecationInfo { + info := deprecationInfo{BundleStatus: metav1.ConditionUnknown} channelSet := sets.New[string]() if ext.Spec.Source.Catalog != nil { - for _, channel := range ext.Spec.Source.Catalog.Channels { - channelSet.Insert(channel) - } + channelSet.Insert(ext.Spec.Source.Catalog.Channels...) } + if deprecation != nil { for _, entry := range deprecation.Entries { switch entry.Reference.Schema { case declcfg.SchemaPackage: - deprecations[ocv1.TypePackageDeprecated] = []declcfg.DeprecationEntry{entry} + info.PackageEntries = append(info.PackageEntries, entry) case declcfg.SchemaChannel: - if channelSet.Has(entry.Reference.Name) { - deprecations[ocv1.TypeChannelDeprecated] = append(deprecations[ocv1.TypeChannelDeprecated], entry) + // Include channel deprecations if: + // 1. No channels specified (channelSet empty) - any channel could be auto-selected + // 2. The deprecated channel matches one of the specified channels + if len(channelSet) == 0 || channelSet.Has(entry.Reference.Name) { + info.ChannelEntries = append(info.ChannelEntries, entry) } case declcfg.SchemaBundle: - if bundleName != entry.Reference.Name { - continue + if installedBundleName != "" && entry.Reference.Name == installedBundleName { + info.BundleEntries = append(info.BundleEntries, entry) } - deprecations[ocv1.TypeBundleDeprecated] = []declcfg.DeprecationEntry{entry} } } } - // first get ordered deprecation messages that we'll join in the Deprecated condition message - var deprecationMessages []string - for _, conditionType := range []string{ - ocv1.TypePackageDeprecated, - ocv1.TypeChannelDeprecated, - ocv1.TypeBundleDeprecated, - } { - if entries, ok := deprecations[conditionType]; ok { - for _, entry := range entries { - deprecationMessages = append(deprecationMessages, entry.Message) - } + // installedBundleName is empty when nothing is installed. In that case we want + // to report the bundle deprecation condition as Unknown/Absent. + if installedBundleName != "" { + if len(info.BundleEntries) > 0 { + info.BundleStatus = metav1.ConditionTrue + } else { + info.BundleStatus = metav1.ConditionFalse } } - // next, set the Deprecated condition - status, reason, message := metav1.ConditionFalse, ocv1.ReasonDeprecated, "" - if len(deprecationMessages) > 0 { - status, reason, message = metav1.ConditionTrue, ocv1.ReasonDeprecated, strings.Join(deprecationMessages, ";") - } - SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ - Type: ocv1.TypeDeprecated, - Reason: reason, - Status: status, - Message: message, - ObservedGeneration: ext.Generation, - }) - - // finally, set the individual deprecation conditions for package, channel, and bundle - for _, conditionType := range []string{ - ocv1.TypePackageDeprecated, - ocv1.TypeChannelDeprecated, - ocv1.TypeBundleDeprecated, - } { - entries, ok := deprecations[conditionType] - status, reason, message := metav1.ConditionFalse, ocv1.ReasonDeprecated, "" - if ok { - status, reason = metav1.ConditionTrue, ocv1.ReasonDeprecated - for _, entry := range entries { - message = fmt.Sprintf("%s\n%s", message, entry.Message) - } + return info +} + +// collectDeprecationMessages collects the non-empty deprecation messages from the provided entries. +func collectDeprecationMessages(entries []declcfg.DeprecationEntry) []string { + messages := make([]string, 0, len(entries)) + for _, entry := range entries { + if entry.Message != "" { + messages = append(messages, entry.Message) } - SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ - Type: conditionType, - Reason: reason, - Status: status, - Message: message, - ObservedGeneration: ext.Generation, - }) } + return messages } type ControllerBuilderOption func(builder *ctrl.Builder) diff --git a/internal/operator-controller/controllers/clusterextension_controller_test.go b/internal/operator-controller/controllers/clusterextension_controller_test.go index eabdba4f9..28d766ace 100644 --- a/internal/operator-controller/controllers/clusterextension_controller_test.go +++ b/internal/operator-controller/controllers/clusterextension_controller_test.go @@ -15,6 +15,7 @@ import ( "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/release" "helm.sh/helm/v3/pkg/storage/driver" + "k8s.io/apimachinery/pkg/api/equality" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -32,7 +33,8 @@ import ( "github.com/operator-framework/operator-controller/internal/operator-controller/bundle" "github.com/operator-framework/operator-controller/internal/operator-controller/conditionsets" "github.com/operator-framework/operator-controller/internal/operator-controller/controllers" - finalizers "github.com/operator-framework/operator-controller/internal/operator-controller/finalizers" + "github.com/operator-framework/operator-controller/internal/operator-controller/features" + "github.com/operator-framework/operator-controller/internal/operator-controller/finalizers" "github.com/operator-framework/operator-controller/internal/operator-controller/labels" "github.com/operator-framework/operator-controller/internal/operator-controller/resolve" imageutil "github.com/operator-framework/operator-controller/internal/shared/util/image" @@ -127,7 +129,7 @@ func TestClusterExtensionShortCircuitsReconcileDuringDeletion(t *testing.T) { func TestClusterExtensionResolutionFails(t *testing.T) { pkgName := fmt.Sprintf("non-existent-%s", rand.String(6)) cl, reconciler := newClientAndReconciler(t, func(d *deps) { - d.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { + d.Resolver = resolve.Func(func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { return nil, nil, nil, fmt.Errorf("no package %q found", pkgName) }) }) @@ -177,6 +179,262 @@ func TestClusterExtensionResolutionFails(t *testing.T) { require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{})) } +// TestClusterExtensionResolutionFailsWithDeprecationData verifies that deprecation warnings are shown even when resolution fails. +// +// Scenario: +// - Resolution fails (package not found or version not available) +// - Resolver returns deprecation data along with the error +// - Catalog has marked the package as deprecated +// - PackageDeprecated and Deprecated conditions show True with the deprecation message +// - BundleDeprecated stays Unknown/Absent because no bundle is installed yet +// +// This ensures deprecation warnings reach users even when installation cannot proceed. +func TestClusterExtensionResolutionFailsWithDeprecationData(t *testing.T) { + ctx := context.Background() + pkgName := fmt.Sprintf("deprecated-%s", rand.String(6)) + deprecationMessage := "package marked deprecated in catalog" + cl, reconciler := newClientAndReconciler(t, func(d *deps) { + d.Resolver = resolve.Func(func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { + return nil, nil, &declcfg.Deprecation{ + Entries: []declcfg.DeprecationEntry{{ + Reference: declcfg.PackageScopedReference{Schema: declcfg.SchemaPackage}, + Message: deprecationMessage, + }}, + }, fmt.Errorf("no package %q found", pkgName) + }) + }) + + extKey := types.NamespacedName{Name: fmt.Sprintf("cluster-extension-test-%s", rand.String(8))} + clusterExtension := &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{Name: extKey.Name}, + Spec: ocv1.ClusterExtensionSpec{ + Source: ocv1.SourceConfig{ + SourceType: "Catalog", + Catalog: &ocv1.CatalogFilter{PackageName: pkgName}, + }, + Namespace: "default", + ServiceAccount: ocv1.ServiceAccountReference{Name: "default"}, + }, + } + require.NoError(t, cl.Create(ctx, clusterExtension)) + + res, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: extKey}) + require.Equal(t, ctrl.Result{}, res) + require.EqualError(t, err, fmt.Sprintf("no package %q found", pkgName)) + + require.NoError(t, cl.Get(ctx, extKey, clusterExtension)) + + pkgCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypePackageDeprecated) + require.NotNil(t, pkgCond) + require.Equal(t, metav1.ConditionTrue, pkgCond.Status) + require.Equal(t, deprecationMessage, pkgCond.Message) + + deprecatedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeDeprecated) + require.NotNil(t, deprecatedCond) + require.Equal(t, metav1.ConditionTrue, deprecatedCond.Status) + + bundleCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeBundleDeprecated) + require.NotNil(t, bundleCond) + require.Equal(t, metav1.ConditionUnknown, bundleCond.Status, "no bundle installed yet, so keep it Unknown/Absent") + require.Equal(t, ocv1.ReasonAbsent, bundleCond.Reason) + + verifyInvariants(ctx, t, reconciler.Client, clusterExtension) + require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{})) +} + +// TestClusterExtensionUpgradeShowsInstalledBundleDeprecation verifies that deprecation status +// reflects the INSTALLED bundle, not the RESOLVED bundle during upgrades. +// +// Scenario: +// - Bundle v1.0.0 is installed and deprecated in the catalog +// - Bundle v2.0.0 is available (resolved) and NOT deprecated +// - BundleDeprecated should show True with v1.0.0's deprecation message +// +// This demonstrates the key fix: status shows actual state (installed), not desired state (resolved). +// Users need to know what's currently running is deprecated, even if the upgrade target is fine. +func TestClusterExtensionUpgradeShowsInstalledBundleDeprecation(t *testing.T) { + ctx := context.Background() + pkgName := fmt.Sprintf("upgrade-%s", rand.String(6)) + installedBundleName := fmt.Sprintf("%s.v1.0.0", pkgName) + resolvedBundleName := fmt.Sprintf("%s.v2.0.0", pkgName) + deprecationMessage := "v1.0.0 is deprecated, please upgrade to v2.0.0" + + cl, reconciler := newClientAndReconciler(t, func(d *deps) { + d.Resolver = resolve.Func(func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { + v := bundle.VersionRelease{ + Version: bsemver.MustParse("2.0.0"), + } + // Catalog has deprecation for v1.0.0 (installed), but v2.0.0 (resolved) is NOT deprecated + return &declcfg.Bundle{ + Name: resolvedBundleName, + Package: pkgName, + Image: fmt.Sprintf("quay.io/example/%s@sha256:resolved200", pkgName), + }, &v, &declcfg.Deprecation{ + Entries: []declcfg.DeprecationEntry{{ + Reference: declcfg.PackageScopedReference{ + Schema: declcfg.SchemaBundle, + Name: installedBundleName, // v1.0.0 is deprecated + }, + Message: deprecationMessage, + }}, + }, nil + }) + d.RevisionStatesGetter = &MockRevisionStatesGetter{ + RevisionStates: &controllers.RevisionStates{ + Installed: &controllers.RevisionMetadata{ + Package: pkgName, + BundleMetadata: ocv1.BundleMetadata{ + Name: installedBundleName, // v1.0.0 installed + Version: "1.0.0", + }, + Image: fmt.Sprintf("quay.io/example/%s@sha256:installed100", pkgName), + }, + }, + } + d.ImagePuller = &imageutil.MockPuller{ImageFS: fstest.MapFS{}} + d.Applier = &MockApplier{} + }) + + extKey := types.NamespacedName{Name: fmt.Sprintf("cluster-extension-test-%s", rand.String(8))} + clusterExtension := &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{Name: extKey.Name}, + Spec: ocv1.ClusterExtensionSpec{ + Source: ocv1.SourceConfig{ + SourceType: "Catalog", + Catalog: &ocv1.CatalogFilter{PackageName: pkgName}, + }, + Namespace: "default", + ServiceAccount: ocv1.ServiceAccountReference{Name: "default"}, + }, + } + require.NoError(t, cl.Create(ctx, clusterExtension)) + + res, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: extKey}) + require.Equal(t, ctrl.Result{}, res) + require.NoError(t, err) + + require.NoError(t, cl.Get(ctx, extKey, clusterExtension)) + + // BundleDeprecated should reflect the INSTALLED bundle (v1.0.0), not the RESOLVED bundle (v2.0.0) + bundleCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeBundleDeprecated) + require.NotNil(t, bundleCond) + require.Equal(t, metav1.ConditionTrue, bundleCond.Status, "installed bundle v1.0.0 is deprecated") + require.Equal(t, ocv1.ReasonDeprecated, bundleCond.Reason) + require.Equal(t, deprecationMessage, bundleCond.Message) + + // Deprecated condition should also be True (combines all deprecation types) + deprecatedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeDeprecated) + require.NotNil(t, deprecatedCond) + require.Equal(t, metav1.ConditionTrue, deprecatedCond.Status) + require.Contains(t, deprecatedCond.Message, deprecationMessage) + + // Package and Channel should NOT be deprecated (not in deprecation data) + pkgCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypePackageDeprecated) + require.NotNil(t, pkgCond, "package is not deprecated, condition should be False") + require.Equal(t, metav1.ConditionFalse, pkgCond.Status) + require.Equal(t, ocv1.ReasonNotDeprecated, pkgCond.Reason) + + channelCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeChannelDeprecated) + require.NotNil(t, channelCond, "channel is not deprecated, condition should be False") + require.Equal(t, metav1.ConditionFalse, channelCond.Status) + require.Equal(t, ocv1.ReasonNotDeprecated, channelCond.Reason) + + verifyInvariants(ctx, t, reconciler.Client, clusterExtension) + require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{})) +} + +// TestClusterExtensionResolutionFailsWithoutCatalogDeprecationData verifies deprecation status handling when catalog data is unavailable. +// +// Scenario: +// - A bundle is already installed (v1.0.0) +// - Catalog exists but resolution fails (transient catalog issue, e.g., updating) +// - Resolution error is returned with no deprecation data +// - All deprecation conditions must be set to Unknown (not False) +// - BundleDeprecated uses reason DeprecationStatusUnknown because catalog is unavailable +// +// This ensures users see "we don't know the deprecation status" rather than "definitely not deprecated" +// when the catalog source of truth is unavailable. +func TestClusterExtensionResolutionFailsWithoutCatalogDeprecationData(t *testing.T) { + ctx := context.Background() + pkgName := fmt.Sprintf("missing-%s", rand.String(6)) + catalogName := fmt.Sprintf("test-catalog-%s", rand.String(6)) + installedBundleName := fmt.Sprintf("%s.v1.0.0", pkgName) + cl, reconciler := newClientAndReconciler(t, func(d *deps) { + d.Resolver = resolve.Func(func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { + return nil, nil, nil, fmt.Errorf("no bundles found for package %q", pkgName) + }) + + d.RevisionStatesGetter = &MockRevisionStatesGetter{ + RevisionStates: &controllers.RevisionStates{ + Installed: &controllers.RevisionMetadata{ + Package: pkgName, + BundleMetadata: ocv1.BundleMetadata{ + Name: installedBundleName, + Version: "1.0.0", + }, + Image: "example.com/installed@sha256:deadbeef", + }, + }, + } + }) + + // Create a ClusterCatalog so CheckCatalogsExist returns true, causing retry instead of fallback + catalog := &ocv1.ClusterCatalog{ + ObjectMeta: metav1.ObjectMeta{Name: catalogName}, + Spec: ocv1.ClusterCatalogSpec{ + Source: ocv1.CatalogSource{ + Type: ocv1.SourceTypeImage, + Image: &ocv1.ImageSource{ + Ref: "quay.io/example/catalog:latest", + }, + }, + }, + } + require.NoError(t, cl.Create(ctx, catalog)) + + extKey := types.NamespacedName{Name: fmt.Sprintf("cluster-extension-test-%s", rand.String(8))} + clusterExtension := &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{Name: extKey.Name}, + Spec: ocv1.ClusterExtensionSpec{ + Source: ocv1.SourceConfig{ + SourceType: "Catalog", + Catalog: &ocv1.CatalogFilter{PackageName: pkgName}, + }, + Namespace: "default", + ServiceAccount: ocv1.ServiceAccountReference{Name: "default"}, + }, + } + require.NoError(t, cl.Create(ctx, clusterExtension)) + + res, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: extKey}) + require.Equal(t, ctrl.Result{}, res) + require.EqualError(t, err, fmt.Sprintf("no bundles found for package %q", pkgName)) + + require.NoError(t, cl.Get(ctx, extKey, clusterExtension)) + + packageCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypePackageDeprecated) + require.NotNil(t, packageCond) + require.Equal(t, metav1.ConditionUnknown, packageCond.Status) + require.Equal(t, ocv1.ReasonDeprecationStatusUnknown, packageCond.Reason) + require.Equal(t, "deprecation status unknown: catalog data unavailable", packageCond.Message) + + deprecatedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeDeprecated) + require.NotNil(t, deprecatedCond) + require.Equal(t, metav1.ConditionUnknown, deprecatedCond.Status) + require.Equal(t, ocv1.ReasonDeprecationStatusUnknown, deprecatedCond.Reason) + require.Equal(t, "deprecation status unknown: catalog data unavailable", deprecatedCond.Message) + + bundleCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeBundleDeprecated) + require.NotNil(t, bundleCond) + require.Equal(t, metav1.ConditionUnknown, bundleCond.Status) + require.Equal(t, ocv1.ReasonDeprecationStatusUnknown, bundleCond.Reason) + require.Equal(t, "deprecation status unknown: catalog data unavailable", bundleCond.Message) + + verifyInvariants(ctx, t, reconciler.Client, clusterExtension) + require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{})) + require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterCatalog{})) +} + func TestClusterExtensionResolutionSuccessfulUnpackFails(t *testing.T) { type testCase struct { name string @@ -230,7 +488,7 @@ func TestClusterExtensionResolutionSuccessfulUnpackFails(t *testing.T) { } }, func(d *deps) { - d.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { + d.Resolver = resolve.Func(func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { v := bundle.VersionRelease{ Version: bsemver.MustParse("1.0.0"), } @@ -277,6 +535,25 @@ func TestClusterExtensionResolutionSuccessfulUnpackFails(t *testing.T) { require.Equal(t, expectReason, progressingCond.Reason) require.Contains(t, progressingCond.Message, fmt.Sprintf("for resolved bundle %q with version %q", expectedBundleMetadata.Name, expectedBundleMetadata.Version)) + t.Log("By checking deprecation conditions remain neutral and bundle is Unknown when not installed") + // When not deprecated, conditions are False (following K8s conventions) + deprecatedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeDeprecated) + require.NotNil(t, deprecatedCond, "Deprecated condition should be False when not deprecated") + require.Equal(t, metav1.ConditionFalse, deprecatedCond.Status) + require.Equal(t, ocv1.ReasonNotDeprecated, deprecatedCond.Reason) + pkgCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypePackageDeprecated) + require.NotNil(t, pkgCond, "PackageDeprecated condition should be False when not deprecated") + require.Equal(t, metav1.ConditionFalse, pkgCond.Status) + require.Equal(t, ocv1.ReasonNotDeprecated, pkgCond.Reason) + chanCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeChannelDeprecated) + require.NotNil(t, chanCond, "ChannelDeprecated condition should be False when not deprecated") + require.Equal(t, metav1.ConditionFalse, chanCond.Status) + require.Equal(t, ocv1.ReasonNotDeprecated, chanCond.Reason) + bundleCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeBundleDeprecated) + require.NotNil(t, bundleCond) + require.Equal(t, metav1.ConditionUnknown, bundleCond.Status) + require.Equal(t, ocv1.ReasonAbsent, bundleCond.Reason) + require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{})) }) } @@ -288,7 +565,7 @@ func TestClusterExtensionResolutionAndUnpackSuccessfulApplierFails(t *testing.T) d.ImagePuller = &imageutil.MockPuller{ ImageFS: fstest.MapFS{}, } - d.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { + d.Resolver = resolve.Func(func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { v := bundle.VersionRelease{ Version: bsemver.MustParse("1.0.0"), } @@ -361,6 +638,132 @@ func TestClusterExtensionResolutionAndUnpackSuccessfulApplierFails(t *testing.T) require.Equal(t, ocv1.ReasonRetrying, progressingCond.Reason) require.Contains(t, progressingCond.Message, fmt.Sprintf("for resolved bundle %q with version %q", expectedBundleMetadata.Name, expectedBundleMetadata.Version)) + t.Log("By checking deprecation conditions remain neutral and bundle is Unknown when not installed") + // When not deprecated, conditions are False (following K8s conventions) + deprecatedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeDeprecated) + require.NotNil(t, deprecatedCond, "Deprecated condition should be False when not deprecated") + require.Equal(t, metav1.ConditionFalse, deprecatedCond.Status) + require.Equal(t, ocv1.ReasonNotDeprecated, deprecatedCond.Reason) + pkgCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypePackageDeprecated) + require.NotNil(t, pkgCond, "PackageDeprecated condition should be False when not deprecated") + require.Equal(t, metav1.ConditionFalse, pkgCond.Status) + require.Equal(t, ocv1.ReasonNotDeprecated, pkgCond.Reason) + chanCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeChannelDeprecated) + require.NotNil(t, chanCond, "ChannelDeprecated condition should be False when not deprecated") + require.Equal(t, metav1.ConditionFalse, chanCond.Status) + require.Equal(t, ocv1.ReasonNotDeprecated, chanCond.Reason) + bundleCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeBundleDeprecated) + require.NotNil(t, bundleCond) + require.Equal(t, metav1.ConditionUnknown, bundleCond.Status) + require.Equal(t, ocv1.ReasonAbsent, bundleCond.Reason) + + require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{})) +} + +// TestClusterExtensionBoxcutterApplierFailsDoesNotLeakDeprecationErrors verifies deprecation status when apply fails. +// +// Scenario: +// - Resolution succeeds and returns a valid bundle (prometheus.v1.0.0) +// - Boxcutter applier fails during rollout (simulates apply failure) +// - A rolling revision exists but nothing is installed yet +// - Progressing condition shows the apply error (Retrying) +// - Deprecation conditions reflect catalog data (all False since nothing deprecated) +// - BundleDeprecated stays Unknown/Absent because apply failed before install +// +// This ensures apply errors appear in Progressing condition, not in deprecation conditions. +func TestClusterExtensionBoxcutterApplierFailsDoesNotLeakDeprecationErrors(t *testing.T) { + require.NoError(t, features.OperatorControllerFeatureGate.Set(fmt.Sprintf("%s=true", features.BoxcutterRuntime))) + t.Cleanup(func() { + require.NoError(t, features.OperatorControllerFeatureGate.Set(fmt.Sprintf("%s=false", features.BoxcutterRuntime))) + }) + + cl, reconciler := newClientAndReconciler(t, func(d *deps) { + // Boxcutter keeps a rolling revision when apply fails. We mirror that state so the test uses + // the same inputs the runtime would see. + d.RevisionStatesGetter = &MockRevisionStatesGetter{ + RevisionStates: &controllers.RevisionStates{ + RollingOut: []*controllers.RevisionMetadata{{}}, + }, + } + d.Resolver = resolve.Func(func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { + v := bundle.VersionRelease{ + Version: bsemver.MustParse("1.0.0"), + } + return &declcfg.Bundle{ + Name: "prometheus.v1.0.0", + Package: "prometheus", + Image: "quay.io/operatorhubio/prometheus@fake1.0.0", + }, &v, nil, nil + }) + d.ImagePuller = &imageutil.MockPuller{ImageFS: fstest.MapFS{}} + d.Applier = &MockApplier{err: errors.New("boxcutter apply failure")} + }) + + ctx := context.Background() + extKey := types.NamespacedName{Name: fmt.Sprintf("cluster-extension-test-%s", rand.String(8))} + + t.Log("When the Boxcutter Feature Flag is enabled and apply fails") + clusterExtension := &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{Name: extKey.Name}, + Spec: ocv1.ClusterExtensionSpec{ + Source: ocv1.SourceConfig{ + SourceType: "Catalog", + Catalog: &ocv1.CatalogFilter{ + PackageName: "prometheus", + Version: "1.0.0", + Channels: []string{"beta"}, + }, + }, + Namespace: "default", + ServiceAccount: ocv1.ServiceAccountReference{ + Name: "default", + }, + }, + } + require.NoError(t, cl.Create(ctx, clusterExtension)) + + res, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: extKey}) + require.Equal(t, ctrl.Result{}, res) + require.Error(t, err) + + require.NoError(t, cl.Get(ctx, extKey, clusterExtension)) + + installedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) + require.NotNil(t, installedCond) + require.Equal(t, metav1.ConditionFalse, installedCond.Status) + require.Equal(t, ocv1.ReasonAbsent, installedCond.Reason) + require.Contains(t, installedCond.Message, "No bundle installed") + + progressingCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) + require.NotNil(t, progressingCond) + require.Equal(t, metav1.ConditionTrue, progressingCond.Status) + require.Equal(t, ocv1.ReasonRetrying, progressingCond.Reason) + require.Contains(t, progressingCond.Message, "boxcutter apply failure") + + deprecatedCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeDeprecated) + require.NotNil(t, deprecatedCond) + require.Equal(t, metav1.ConditionUnknown, deprecatedCond.Status, "no catalog data during rollout, so Unknown") + require.Equal(t, ocv1.ReasonDeprecationStatusUnknown, deprecatedCond.Reason) + require.Equal(t, "deprecation status unknown: catalog data unavailable", deprecatedCond.Message) + + packageCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypePackageDeprecated) + require.NotNil(t, packageCond) + require.Equal(t, metav1.ConditionUnknown, packageCond.Status, "no catalog data during rollout, so Unknown") + require.Equal(t, ocv1.ReasonDeprecationStatusUnknown, packageCond.Reason) + require.Equal(t, "deprecation status unknown: catalog data unavailable", packageCond.Message) + + channelCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeChannelDeprecated) + require.NotNil(t, channelCond) + require.Equal(t, metav1.ConditionUnknown, channelCond.Status, "no catalog data during rollout, so Unknown") + require.Equal(t, ocv1.ReasonDeprecationStatusUnknown, channelCond.Reason) + require.Equal(t, "deprecation status unknown: catalog data unavailable", channelCond.Message) + + bundleCond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeBundleDeprecated) + require.NotNil(t, bundleCond) + require.Equal(t, metav1.ConditionUnknown, bundleCond.Status, "apply failed before install, so bundle status stays Unknown/Absent") + require.Equal(t, ocv1.ReasonAbsent, bundleCond.Reason) + require.Equal(t, "no bundle installed yet", bundleCond.Message) + require.NoError(t, cl.DeleteAllOf(ctx, &ocv1.ClusterExtension{})) } @@ -428,7 +831,7 @@ func TestClusterExtensionApplierFailsWithBundleInstalled(t *testing.T) { d.ImagePuller = &imageutil.MockPuller{ ImageFS: fstest.MapFS{}, } - d.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { + d.Resolver = resolve.Func(func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { v := bundle.VersionRelease{ Version: bsemver.MustParse("1.0.0"), } @@ -523,7 +926,7 @@ func TestClusterExtensionManagerFailed(t *testing.T) { d.ImagePuller = &imageutil.MockPuller{ ImageFS: fstest.MapFS{}, } - d.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { + d.Resolver = resolve.Func(func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { v := bundle.VersionRelease{ Version: bsemver.MustParse("1.0.0"), } @@ -602,7 +1005,7 @@ func TestClusterExtensionManagedContentCacheWatchFail(t *testing.T) { d.ImagePuller = &imageutil.MockPuller{ ImageFS: fstest.MapFS{}, } - d.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { + d.Resolver = resolve.Func(func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { v := bundle.VersionRelease{ Version: bsemver.MustParse("1.0.0"), } @@ -683,7 +1086,7 @@ func TestClusterExtensionInstallationSucceeds(t *testing.T) { d.ImagePuller = &imageutil.MockPuller{ ImageFS: fstest.MapFS{}, } - d.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { + d.Resolver = resolve.Func(func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { v := bundle.VersionRelease{ Version: bsemver.MustParse("1.0.0"), } @@ -764,7 +1167,7 @@ func TestClusterExtensionDeleteFinalizerFails(t *testing.T) { d.ImagePuller = &imageutil.MockPuller{ ImageFS: fstest.MapFS{}, } - d.Resolver = resolve.Func(func(_ context.Context, _ *ocv1.ClusterExtension, _ *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { + d.Resolver = resolve.Func(func(ctx context.Context, ext *ocv1.ClusterExtension, installedBundle *ocv1.BundleMetadata) (*declcfg.Bundle, *bundle.VersionRelease, *declcfg.Deprecation, error) { v := bundle.VersionRelease{ Version: bsemver.MustParse("1.0.0"), } @@ -867,14 +1270,24 @@ func verifyInvariants(ctx context.Context, t *testing.T, c client.Client, ext *o } func verifyConditionsInvariants(t *testing.T, ext *ocv1.ClusterExtension) { - // Expect that the cluster extension's set of conditions contains all defined - // condition types for the ClusterExtension API. Every reconcile should always - // ensure every condition type's status/reason/message reflects the state - // read during _this_ reconcile call. - require.Len(t, ext.Status.Conditions, len(conditionsets.ConditionTypes)) - for _, tt := range conditionsets.ConditionTypes { + // All conditions must always be present after first reconciliation. + // Core conditions: Installed, Progressing + // Deprecation conditions: Deprecated, PackageDeprecated, ChannelDeprecated, BundleDeprecated + coreConditions := []string{ocv1.TypeInstalled, ocv1.TypeProgressing} + deprecationConditions := []string{ocv1.TypeDeprecated, ocv1.TypePackageDeprecated, ocv1.TypeChannelDeprecated, ocv1.TypeBundleDeprecated} + + for _, tt := range coreConditions { cond := apimeta.FindStatusCondition(ext.Status.Conditions, tt) - require.NotNil(t, cond) + require.NotNil(t, cond, "core condition %s must be present", tt) + require.NotEmpty(t, cond.Status) + require.Contains(t, conditionsets.ConditionReasons, cond.Reason) + require.Equal(t, ext.GetGeneration(), cond.ObservedGeneration) + } + + // Deprecation conditions must always be present and valid + for _, tt := range deprecationConditions { + cond := apimeta.FindStatusCondition(ext.Status.Conditions, tt) + require.NotNil(t, cond, "deprecation condition %s must be present", tt) require.NotEmpty(t, cond.Status) require.Contains(t, conditionsets.ConditionReasons, cond.Reason) require.Equal(t, ext.GetGeneration(), cond.ObservedGeneration) @@ -882,15 +1295,30 @@ func verifyConditionsInvariants(t *testing.T, ext *ocv1.ClusterExtension) { } func TestSetDeprecationStatus(t *testing.T) { + // The catalogDataProvided/hasCatalogData pair lets each test express whether the catalog + // answered during reconciliation and, if it did, whether it marked anything as deprecated. + // This helps us cover three distinct user-facing states: "no catalog response" (everything + // stays Unknown), "catalog answered with no deprecations" (conditions explicitly set to + // False with reason NotDeprecated, with BundleDeprecated remaining Unknown when no bundle + // is installed), and "catalog answered with explicit deprecations" (conditions go True). + // + // Key scenarios tested: + // 1. No catalog data + no bundle -> all Unknown, BundleDeprecated uses reason Absent + // 2. No catalog data + bundle installed -> all Unknown, BundleDeprecated uses reason DeprecationStatusUnknown + // 3. Catalog data provided + no deprecations -> deprecation conditions explicitly set to False + // with reason NotDeprecated (BundleDeprecated remains Unknown when no bundle is installed) + // 4. Catalog data provided + explicit deprecations -> relevant conditions True for _, tc := range []struct { name string clusterExtension *ocv1.ClusterExtension expectedClusterExtension *ocv1.ClusterExtension bundle *declcfg.Bundle deprecation *declcfg.Deprecation + catalogDataProvided bool + hasCatalogData bool }{ { - name: "no deprecations, all deprecation statuses set to False", + name: "no catalog data, all deprecation statuses set to Unknown", clusterExtension: &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Generation: 1, @@ -907,36 +1335,183 @@ func TestSetDeprecationStatus(t *testing.T) { Conditions: []metav1.Condition{ { Type: ocv1.TypeDeprecated, - Reason: ocv1.ReasonDeprecated, + Reason: ocv1.ReasonDeprecationStatusUnknown, + Status: metav1.ConditionUnknown, + Message: "deprecation status unknown: catalog data unavailable", + ObservedGeneration: 1, + }, + { + Type: ocv1.TypePackageDeprecated, + Reason: ocv1.ReasonDeprecationStatusUnknown, + Status: metav1.ConditionUnknown, + Message: "deprecation status unknown: catalog data unavailable", + ObservedGeneration: 1, + }, + { + Type: ocv1.TypeChannelDeprecated, + Reason: ocv1.ReasonDeprecationStatusUnknown, + Status: metav1.ConditionUnknown, + Message: "deprecation status unknown: catalog data unavailable", + ObservedGeneration: 1, + }, + { + Type: ocv1.TypeBundleDeprecated, + Reason: ocv1.ReasonAbsent, + Status: metav1.ConditionUnknown, + Message: "no bundle installed yet", + ObservedGeneration: 1, + }, + }, + }, + }, + bundle: &declcfg.Bundle{}, + deprecation: nil, + catalogDataProvided: false, + hasCatalogData: false, + }, + { + // Scenario: + // - A bundle is installed (v1.0.0) + // - Catalog becomes unavailable (removed or network failure) + // - No catalog data can be retrieved + // - BundleDeprecated must show Unknown/DeprecationStatusUnknown (not Absent) + // - Reason is DeprecationStatusUnknown because catalog data is unavailable; Absent is only for no bundle + name: "no catalog data with installed bundle keeps bundle condition Unknown", + clusterExtension: &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: ocv1.ClusterExtensionStatus{Conditions: []metav1.Condition{}}, + }, + expectedClusterExtension: &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{Generation: 1}, + Status: ocv1.ClusterExtensionStatus{Conditions: []metav1.Condition{ + {Type: ocv1.TypeDeprecated, Reason: ocv1.ReasonDeprecationStatusUnknown, Status: metav1.ConditionUnknown, Message: "deprecation status unknown: catalog data unavailable", ObservedGeneration: 1}, + {Type: ocv1.TypePackageDeprecated, Reason: ocv1.ReasonDeprecationStatusUnknown, Status: metav1.ConditionUnknown, Message: "deprecation status unknown: catalog data unavailable", ObservedGeneration: 1}, + {Type: ocv1.TypeChannelDeprecated, Reason: ocv1.ReasonDeprecationStatusUnknown, Status: metav1.ConditionUnknown, Message: "deprecation status unknown: catalog data unavailable", ObservedGeneration: 1}, + {Type: ocv1.TypeBundleDeprecated, Reason: ocv1.ReasonDeprecationStatusUnknown, Status: metav1.ConditionUnknown, Message: "deprecation status unknown: catalog data unavailable", ObservedGeneration: 1}, + }}, + }, + bundle: &declcfg.Bundle{Name: "installed.v1.0.0"}, + deprecation: nil, + catalogDataProvided: false, + hasCatalogData: false, + }, + { + // Scenario: + // - A bundle is installed + // - Catalog returns deprecation entries but catalogDataProvided=false + // - This tests that deprecation data is ignored when hasCatalogData is false + // - All conditions go to Unknown regardless of deprecation entries present + // - BundleDeprecated uses DeprecationStatusUnknown (not Absent) because bundle exists + name: "deprecation entries ignored when catalog data flag is false", + clusterExtension: &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: ocv1.ClusterExtensionStatus{ + Conditions: []metav1.Condition{}, + }, + }, + expectedClusterExtension: &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: ocv1.ClusterExtensionStatus{ + Conditions: []metav1.Condition{ + { + Type: ocv1.TypeDeprecated, + Reason: ocv1.ReasonDeprecationStatusUnknown, + Status: metav1.ConditionUnknown, + Message: "deprecation status unknown: catalog data unavailable", + ObservedGeneration: 1, + }, + { + Type: ocv1.TypePackageDeprecated, + Reason: ocv1.ReasonDeprecationStatusUnknown, + Status: metav1.ConditionUnknown, + Message: "deprecation status unknown: catalog data unavailable", + ObservedGeneration: 1, + }, + { + Type: ocv1.TypeChannelDeprecated, + Reason: ocv1.ReasonDeprecationStatusUnknown, + Status: metav1.ConditionUnknown, + Message: "deprecation status unknown: catalog data unavailable", + ObservedGeneration: 1, + }, + { + Type: ocv1.TypeBundleDeprecated, + Reason: ocv1.ReasonDeprecationStatusUnknown, + Status: metav1.ConditionUnknown, + Message: "deprecation status unknown: catalog data unavailable", + ObservedGeneration: 1, + }, + }, + }, + }, + bundle: &declcfg.Bundle{Name: "ignored"}, + deprecation: &declcfg.Deprecation{Entries: []declcfg.DeprecationEntry{{ + Reference: declcfg.PackageScopedReference{Schema: declcfg.SchemaPackage}, + Message: "should not surface", + }}}, + catalogDataProvided: true, + hasCatalogData: false, + }, + { + name: "catalog consulted but no deprecations, conditions False except BundleDeprecated Unknown when no bundle", + clusterExtension: &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: ocv1.ClusterExtensionStatus{ + Conditions: []metav1.Condition{}, + }, + }, + expectedClusterExtension: &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: ocv1.ClusterExtensionStatus{ + Conditions: []metav1.Condition{ + { + Type: ocv1.TypeDeprecated, + Reason: ocv1.ReasonNotDeprecated, Status: metav1.ConditionFalse, + Message: "not deprecated", ObservedGeneration: 1, }, { Type: ocv1.TypePackageDeprecated, - Reason: ocv1.ReasonDeprecated, + Reason: ocv1.ReasonNotDeprecated, Status: metav1.ConditionFalse, + Message: "package not deprecated", ObservedGeneration: 1, }, { Type: ocv1.TypeChannelDeprecated, - Reason: ocv1.ReasonDeprecated, + Reason: ocv1.ReasonNotDeprecated, Status: metav1.ConditionFalse, + Message: "channel not deprecated", ObservedGeneration: 1, }, { Type: ocv1.TypeBundleDeprecated, - Reason: ocv1.ReasonDeprecated, - Status: metav1.ConditionFalse, + Reason: ocv1.ReasonAbsent, + Status: metav1.ConditionUnknown, + Message: "no bundle installed yet", ObservedGeneration: 1, }, }, }, }, - bundle: &declcfg.Bundle{}, - deprecation: nil, + bundle: &declcfg.Bundle{}, + deprecation: nil, + catalogDataProvided: true, + hasCatalogData: true, }, { - name: "deprecated channel, but no channel specified, all deprecation statuses set to False", + name: "deprecated channel exists, no channels specified (auto-select), channel deprecation shown", clusterExtension: &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Generation: 1, @@ -966,25 +1541,29 @@ func TestSetDeprecationStatus(t *testing.T) { { Type: ocv1.TypeDeprecated, Reason: ocv1.ReasonDeprecated, - Status: metav1.ConditionFalse, + Status: metav1.ConditionTrue, + Message: "bad channel!", ObservedGeneration: 1, }, { Type: ocv1.TypePackageDeprecated, - Reason: ocv1.ReasonDeprecated, + Reason: ocv1.ReasonNotDeprecated, Status: metav1.ConditionFalse, + Message: "package not deprecated", ObservedGeneration: 1, }, { Type: ocv1.TypeChannelDeprecated, Reason: ocv1.ReasonDeprecated, - Status: metav1.ConditionFalse, + Status: metav1.ConditionTrue, + Message: "bad channel!", ObservedGeneration: 1, }, { Type: ocv1.TypeBundleDeprecated, - Reason: ocv1.ReasonDeprecated, - Status: metav1.ConditionFalse, + Reason: ocv1.ReasonAbsent, + Status: metav1.ConditionUnknown, + Message: "no bundle installed yet", ObservedGeneration: 1, }, }, @@ -997,11 +1576,14 @@ func TestSetDeprecationStatus(t *testing.T) { Schema: declcfg.SchemaChannel, Name: "badchannel", }, + Message: "bad channel!", }}, }, + catalogDataProvided: true, + hasCatalogData: true, }, { - name: "deprecated channel, but a non-deprecated channel specified, all deprecation statuses set to False", + name: "deprecated channel exists but non-deprecated channel specified; conditions False except BundleDeprecated Unknown", clusterExtension: &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Generation: 1, @@ -1034,26 +1616,30 @@ func TestSetDeprecationStatus(t *testing.T) { Conditions: []metav1.Condition{ { Type: ocv1.TypeDeprecated, - Reason: ocv1.ReasonDeprecated, + Reason: ocv1.ReasonNotDeprecated, Status: metav1.ConditionFalse, + Message: "not deprecated", ObservedGeneration: 1, }, { Type: ocv1.TypePackageDeprecated, - Reason: ocv1.ReasonDeprecated, + Reason: ocv1.ReasonNotDeprecated, Status: metav1.ConditionFalse, + Message: "package not deprecated", ObservedGeneration: 1, }, { Type: ocv1.TypeChannelDeprecated, - Reason: ocv1.ReasonDeprecated, + Reason: ocv1.ReasonNotDeprecated, Status: metav1.ConditionFalse, + Message: "channel not deprecated", ObservedGeneration: 1, }, { Type: ocv1.TypeBundleDeprecated, - Reason: ocv1.ReasonDeprecated, - Status: metav1.ConditionFalse, + Reason: ocv1.ReasonAbsent, + Status: metav1.ConditionUnknown, + Message: "no bundle installed yet", ObservedGeneration: 1, }, }, @@ -1070,9 +1656,11 @@ func TestSetDeprecationStatus(t *testing.T) { }, }, }, + catalogDataProvided: true, + hasCatalogData: true, }, { - name: "deprecated channel specified, ChannelDeprecated and Deprecated status set to true, others set to false", + name: "deprecated channel specified, ChannelDeprecated and Deprecated set to true, PackageDeprecated False, BundleDeprecated Unknown", clusterExtension: &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Generation: 1, @@ -1107,24 +1695,28 @@ func TestSetDeprecationStatus(t *testing.T) { Type: ocv1.TypeDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad channel!", ObservedGeneration: 1, }, { Type: ocv1.TypePackageDeprecated, - Reason: ocv1.ReasonDeprecated, + Reason: ocv1.ReasonNotDeprecated, Status: metav1.ConditionFalse, + Message: "package not deprecated", ObservedGeneration: 1, }, { Type: ocv1.TypeChannelDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad channel!", ObservedGeneration: 1, }, { Type: ocv1.TypeBundleDeprecated, - Reason: ocv1.ReasonDeprecated, - Status: metav1.ConditionFalse, + Reason: ocv1.ReasonAbsent, + Status: metav1.ConditionUnknown, + Message: "no bundle installed yet", ObservedGeneration: 1, }, }, @@ -1142,6 +1734,8 @@ func TestSetDeprecationStatus(t *testing.T) { }, }, }, + catalogDataProvided: true, + hasCatalogData: true, }, { name: "deprecated package and channel specified, deprecated bundle, all deprecation statuses set to true", @@ -1179,24 +1773,28 @@ func TestSetDeprecationStatus(t *testing.T) { Type: ocv1.TypeDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad package!\nbad channel!\nbad bundle!", ObservedGeneration: 1, }, { Type: ocv1.TypePackageDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad package!", ObservedGeneration: 1, }, { Type: ocv1.TypeChannelDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad channel!", ObservedGeneration: 1, }, { Type: ocv1.TypeBundleDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad bundle!", ObservedGeneration: 1, }, }, @@ -1227,9 +1825,11 @@ func TestSetDeprecationStatus(t *testing.T) { }, }, }, + catalogDataProvided: true, + hasCatalogData: true, }, { - name: "deprecated channel specified, deprecated bundle, all deprecation statuses set to true, all deprecation statuses set to true except PackageDeprecated", + name: "deprecated channel and bundle specified, Deprecated/ChannelDeprecated/BundleDeprecated set to true, PackageDeprecated False", clusterExtension: &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Generation: 1, @@ -1264,24 +1864,28 @@ func TestSetDeprecationStatus(t *testing.T) { Type: ocv1.TypeDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad channel!\nbad bundle!", ObservedGeneration: 1, }, { Type: ocv1.TypePackageDeprecated, - Reason: ocv1.ReasonDeprecated, + Reason: ocv1.ReasonNotDeprecated, Status: metav1.ConditionFalse, + Message: "package not deprecated", ObservedGeneration: 1, }, { Type: ocv1.TypeChannelDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad channel!", ObservedGeneration: 1, }, { Type: ocv1.TypeBundleDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad bundle!", ObservedGeneration: 1, }, }, @@ -1306,9 +1910,11 @@ func TestSetDeprecationStatus(t *testing.T) { }, }, }, + catalogDataProvided: true, + hasCatalogData: true, }, { - name: "deprecated package and channel specified, all deprecation statuses set to true except BundleDeprecated", + name: "deprecated package and channel specified, Deprecated/PackageDeprecated/ChannelDeprecated set to true, BundleDeprecated Unknown/Absent (no bundle installed)", clusterExtension: &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Generation: 1, @@ -1343,24 +1949,28 @@ func TestSetDeprecationStatus(t *testing.T) { Type: ocv1.TypeDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad package!\nbad channel!", ObservedGeneration: 1, }, { Type: ocv1.TypePackageDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad package!", ObservedGeneration: 1, }, { Type: ocv1.TypeChannelDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad channel!", ObservedGeneration: 1, }, { Type: ocv1.TypeBundleDeprecated, - Reason: ocv1.ReasonDeprecated, - Status: metav1.ConditionFalse, + Reason: ocv1.ReasonAbsent, + Status: metav1.ConditionUnknown, + Message: "no bundle installed yet", ObservedGeneration: 1, }, }, @@ -1384,9 +1994,11 @@ func TestSetDeprecationStatus(t *testing.T) { }, }, }, + catalogDataProvided: true, + hasCatalogData: true, }, { - name: "deprecated channels specified, ChannelDeprecated and Deprecated status set to true, others set to false", + name: "deprecated channels specified, ChannelDeprecated and Deprecated set to true, PackageDeprecated False, BundleDeprecated Unknown/Absent", clusterExtension: &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Generation: 1, @@ -1421,24 +2033,28 @@ func TestSetDeprecationStatus(t *testing.T) { Type: ocv1.TypeDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad channel!\nanother bad channel!", ObservedGeneration: 1, }, { Type: ocv1.TypePackageDeprecated, - Reason: ocv1.ReasonDeprecated, + Reason: ocv1.ReasonNotDeprecated, Status: metav1.ConditionFalse, + Message: "package not deprecated", ObservedGeneration: 1, }, { Type: ocv1.TypeChannelDeprecated, Reason: ocv1.ReasonDeprecated, Status: metav1.ConditionTrue, + Message: "bad channel!\nanother bad channel!", ObservedGeneration: 1, }, { Type: ocv1.TypeBundleDeprecated, - Reason: ocv1.ReasonDeprecated, - Status: metav1.ConditionFalse, + Reason: ocv1.ReasonAbsent, + Status: metav1.ConditionUnknown, + Message: "no bundle installed yet", ObservedGeneration: 1, }, }, @@ -1459,21 +2075,232 @@ func TestSetDeprecationStatus(t *testing.T) { Schema: declcfg.SchemaChannel, Name: "anotherbadchannel", }, - Message: "another bad channedl!", + Message: "another bad channel!", }, }, }, + catalogDataProvided: true, + hasCatalogData: true, }, } { t.Run(tc.name, func(t *testing.T) { - controllers.SetDeprecationStatus(tc.clusterExtension, tc.bundle.Name, tc.deprecation) + // When a test provides deprecation data it must also explicitly state that the catalog responded. + // This guard keeps future cases from silently falling back to the "catalog absent" branch. + if tc.deprecation != nil && !tc.catalogDataProvided { + require.Failf(t, "test case must set catalogDataProvided when deprecation is supplied", "test case %q", tc.name) + } + hasCatalogData := tc.catalogDataProvided && tc.hasCatalogData + controllers.SetDeprecationStatus(tc.clusterExtension, tc.bundle.Name, tc.deprecation, hasCatalogData) // TODO: we should test for unexpected changes to lastTransitionTime. We only expect // lastTransitionTime to change when the status of the condition changes. - assert.Empty(t, cmp.Diff(tc.expectedClusterExtension, tc.clusterExtension, cmpopts.IgnoreFields(metav1.Condition{}, "Message", "LastTransitionTime"))) + assert.Empty(t, cmp.Diff(tc.expectedClusterExtension, tc.clusterExtension, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime"))) + }) + } +} + +// TestSetDeprecationStatus_NoInfiniteReconcileLoop verifies that calling SetDeprecationStatus +// multiple times with the same inputs does not cause infinite reconciliation loops. +// +// The issue: If we always remove and re-add conditions, lastTransitionTime updates every time, +// which causes DeepEqual to fail, triggering another reconcile indefinitely. +// +// The fix: Only remove conditions when we're NOT re-adding them. When setting a condition, +// call SetStatusCondition directly - it preserves lastTransitionTime when status/reason/message +// haven't changed. +func TestSetDeprecationStatus_NoInfiniteReconcileLoop(t *testing.T) { + tests := []struct { + name string + installedBundleName string + deprecation *declcfg.Deprecation + hasCatalogData bool + setupConditions func(*ocv1.ClusterExtension) + expectConditionsCount int + description string + }{ + { + name: "deprecated package - should stabilize after first reconcile", + installedBundleName: "test.v1.0.0", + deprecation: &declcfg.Deprecation{ + Entries: []declcfg.DeprecationEntry{ + { + Reference: declcfg.PackageScopedReference{ + Schema: declcfg.SchemaPackage, + }, + Message: "package is deprecated", + }, + }, + }, + hasCatalogData: true, + setupConditions: func(ext *ocv1.ClusterExtension) { + // No conditions initially + }, + expectConditionsCount: 4, // All 4 conditions: Deprecated/PackageDeprecated=True, ChannelDeprecated/BundleDeprecated=False + description: "First call adds conditions, second call preserves lastTransitionTime", + }, + { + name: "not deprecated - conditions always present as False", + installedBundleName: "", // No bundle installed + deprecation: nil, + hasCatalogData: true, + setupConditions: func(ext *ocv1.ClusterExtension) { + // Simulate old behavior: False conditions present with old reason + apimeta.SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypeDeprecated, + Status: metav1.ConditionFalse, + Reason: ocv1.ReasonDeprecated, + Message: "", + ObservedGeneration: 1, + }) + apimeta.SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + Type: ocv1.TypePackageDeprecated, + Status: metav1.ConditionFalse, + Reason: ocv1.ReasonDeprecated, + Message: "", + ObservedGeneration: 1, + }) + }, + expectConditionsCount: 4, // All 4 conditions as False (except BundleDeprecated Unknown when no bundle) + description: "Sets all conditions to False with NotDeprecated reason, then stabilizes", + }, + { + name: "catalog unavailable - should stabilize with Unknown conditions", + installedBundleName: "test.v1.0.0", + deprecation: nil, + hasCatalogData: false, + setupConditions: func(ext *ocv1.ClusterExtension) { + // No conditions initially + }, + expectConditionsCount: 4, // All four Unknown conditions + description: "Sets Unknown conditions, then preserves them", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ext := &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: ocv1.ClusterExtensionStatus{ + Conditions: []metav1.Condition{}, + }, + } + + // Setup initial conditions if specified + if tt.setupConditions != nil { + tt.setupConditions(ext) + } + + // First reconcile: should add/update conditions + controllers.SetDeprecationStatus(ext, tt.installedBundleName, tt.deprecation, tt.hasCatalogData) + + firstReconcileConditions := make([]metav1.Condition, len(ext.Status.Conditions)) + copy(firstReconcileConditions, ext.Status.Conditions) + + // Verify expected number of conditions + deprecationConditions := filterDeprecationConditions(ext.Status.Conditions) + require.Len(t, deprecationConditions, tt.expectConditionsCount, + "First reconcile should have %d deprecation conditions", tt.expectConditionsCount) + + // Second reconcile: should preserve lastTransitionTime (no changes) + controllers.SetDeprecationStatus(ext, tt.installedBundleName, tt.deprecation, tt.hasCatalogData) + + secondReconcileConditions := ext.Status.Conditions + + // Verify conditions are identical (including lastTransitionTime) + require.Len(t, secondReconcileConditions, len(firstReconcileConditions), + "Number of conditions should remain the same") + + for i, firstCond := range firstReconcileConditions { + secondCond := secondReconcileConditions[i] + require.Equal(t, firstCond.Type, secondCond.Type, "Condition type should match") + require.Equal(t, firstCond.Status, secondCond.Status, "Condition status should match") + require.Equal(t, firstCond.Reason, secondCond.Reason, "Condition reason should match") + require.Equal(t, firstCond.Message, secondCond.Message, "Condition message should match") + + // This is the critical check: lastTransitionTime should NOT change + require.Equal(t, firstCond.LastTransitionTime, secondCond.LastTransitionTime, + "lastTransitionTime should be preserved (prevents infinite reconcile loop)") + } + + // Third reconcile: verify it remains stable + controllers.SetDeprecationStatus(ext, tt.installedBundleName, tt.deprecation, tt.hasCatalogData) + + thirdReconcileConditions := ext.Status.Conditions + require.Len(t, thirdReconcileConditions, len(secondReconcileConditions), + "Conditions should remain stable after multiple reconciles") + + for i, secondCond := range secondReconcileConditions { + thirdCond := thirdReconcileConditions[i] + require.Equal(t, secondCond.LastTransitionTime, thirdCond.LastTransitionTime, + "lastTransitionTime should remain stable across reconciles") + } }) } } +// TestSetDeprecationStatus_StatusChangesOnlyWhenNeeded verifies that calling SetDeprecationStatus +// only modifies the status when actual deprecation state changes, not on every reconcile. +func TestSetDeprecationStatus_StatusChangesOnlyWhenNeeded(t *testing.T) { + ext := &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: ocv1.ClusterExtensionStatus{ + Conditions: []metav1.Condition{}, + }, + } + + // Scenario 1: Package becomes deprecated + deprecation := &declcfg.Deprecation{ + Entries: []declcfg.DeprecationEntry{ + { + Reference: declcfg.PackageScopedReference{Schema: declcfg.SchemaPackage}, + Message: "package is deprecated", + }, + }, + } + + // First reconcile: add deprecation condition + controllers.SetDeprecationStatus(ext, "test.v1.0.0", deprecation, true) + statusAfterFirstReconcile := ext.Status.DeepCopy() + + // Second reconcile: same deprecation state + controllers.SetDeprecationStatus(ext, "test.v1.0.0", deprecation, true) + statusAfterSecondReconcile := ext.Status.DeepCopy() + + // Status should be semantically equal (DeepEqual would return true) + require.True(t, equality.Semantic.DeepEqual(statusAfterFirstReconcile, statusAfterSecondReconcile), + "Status should not change when deprecation state is unchanged") + + // Scenario 2: Deprecation is resolved (package no longer deprecated) + controllers.SetDeprecationStatus(ext, "test.v1.0.0", nil, true) + statusAfterResolution := ext.Status.DeepCopy() + + // Status should have changed (conditions removed) + require.False(t, equality.Semantic.DeepEqual(statusAfterSecondReconcile, statusAfterResolution), + "Status should change when deprecation is resolved") + + // Scenario 3: Verify resolution is stable + controllers.SetDeprecationStatus(ext, "test.v1.0.0", nil, true) + statusAfterFourthReconcile := ext.Status.DeepCopy() + + require.True(t, equality.Semantic.DeepEqual(statusAfterResolution, statusAfterFourthReconcile), + "Status should remain stable after deprecation is resolved") +} + +// filterDeprecationConditions returns only the deprecation-related conditions +func filterDeprecationConditions(conditions []metav1.Condition) []metav1.Condition { + var result []metav1.Condition + for _, cond := range conditions { + switch cond.Type { + case ocv1.TypeDeprecated, ocv1.TypePackageDeprecated, ocv1.TypeChannelDeprecated, ocv1.TypeBundleDeprecated: + result = append(result, cond) + } + } + return result +} + type MockActionGetter struct { description string rels []*release.Release diff --git a/internal/operator-controller/controllers/clusterextension_reconcile_steps.go b/internal/operator-controller/controllers/clusterextension_reconcile_steps.go index 1e5996414..16d691f8b 100644 --- a/internal/operator-controller/controllers/clusterextension_reconcile_steps.go +++ b/internal/operator-controller/controllers/clusterextension_reconcile_steps.go @@ -95,46 +95,62 @@ func RetrieveRevisionStates(r RevisionStatesGetter) ReconcileStepFunc { func ResolveBundle(r resolve.Resolver, c client.Client) ReconcileStepFunc { return func(ctx context.Context, state *reconcileState, ext *ocv1.ClusterExtension) (*ctrl.Result, error) { l := log.FromContext(ctx) - var resolvedRevisionMetadata *RevisionMetadata - if len(state.revisionStates.RollingOut) == 0 { - l.Info("resolving bundle") - var bm *ocv1.BundleMetadata + + // If already rolling out, use existing revision and set deprecation to Unknown (no catalog check) + if len(state.revisionStates.RollingOut) > 0 { + installedBundleName := "" if state.revisionStates.Installed != nil { - bm = &state.revisionStates.Installed.BundleMetadata - } - resolvedBundle, resolvedBundleVersion, resolvedDeprecation, err := r.Resolve(ctx, ext, bm) - if err != nil { - return handleResolutionError(ctx, c, state, ext, err) + installedBundleName = state.revisionStates.Installed.Name } + SetDeprecationStatus(ext, installedBundleName, nil, false) + state.resolvedRevisionMetadata = state.revisionStates.RollingOut[0] + return nil, nil + } - // set deprecation status after _successful_ resolution - // TODO: - // 1. It seems like deprecation status should reflect the currently installed bundle, not the resolved - // bundle. So perhaps we should set package and channel deprecations directly after resolution, but - // defer setting the bundle deprecation until we successfully install the bundle. - // 2. If resolution fails because it can't find a bundle, that doesn't mean we wouldn't be able to find - // a deprecation for the ClusterExtension's spec.packageName. Perhaps we should check for a non-nil - // resolvedDeprecation even if resolution returns an error. If present, we can still update some of - // our deprecation status. - // - Open question though: what if different catalogs have different opinions of what's deprecated. - // If we can't resolve a bundle, how do we know which catalog to trust for deprecation information? - // Perhaps if the package shows up in multiple catalogs and deprecations don't match, we can set - // the deprecation status to unknown? Or perhaps we somehow combine the deprecation information from - // all catalogs? - SetDeprecationStatus(ext, resolvedBundle.Name, resolvedDeprecation) - resolvedRevisionMetadata = &RevisionMetadata{ - Package: resolvedBundle.Package, - Image: resolvedBundle.Image, - // TODO: Right now, operator-controller only supports registry+v1 bundles and has no concept - // of a "release" field. If/when we add a release field concept or a new bundle format - // we need to re-evaluate use of `AsLegacyRegistryV1Version` so that we avoid propagating - // registry+v1's semver spec violations of treating build metadata as orderable. - BundleMetadata: bundleutil.MetadataFor(resolvedBundle.Name, resolvedBundleVersion.AsLegacyRegistryV1Version()), - } - } else { - resolvedRevisionMetadata = state.revisionStates.RollingOut[0] + // Resolve a new bundle from the catalog + l.V(1).Info("resolving bundle") + var bm *ocv1.BundleMetadata + if state.revisionStates.Installed != nil { + bm = &state.revisionStates.Installed.BundleMetadata + } + resolvedBundle, resolvedBundleVersion, resolvedDeprecation, err := r.Resolve(ctx, ext, bm) + + // Get the installed bundle name for deprecation status. + // BundleDeprecated should reflect what's currently running, not what we're trying to install. + installedBundleName := "" + if state.revisionStates.Installed != nil { + installedBundleName = state.revisionStates.Installed.Name + } + + // Set deprecation status based on resolution results: + // - If resolution succeeds: hasCatalogData=true, deprecation shows catalog data (nil=not deprecated) + // - If resolution fails but returns deprecation: hasCatalogData=true, show package/channel deprecation warnings + // - If resolution fails with nil deprecation: hasCatalogData=false, all conditions go Unknown + // + // Note: We DO check for deprecation data even when resolution fails (hasCatalogData = err == nil || resolvedDeprecation != nil). + // This allows us to show package/channel deprecation warnings even when we can't resolve a specific bundle. + // + // TODO: Open question - what if different catalogs have different opinions of what's deprecated? + // If we can't resolve a bundle, how do we know which catalog to trust for deprecation information? + // Perhaps if the package shows up in multiple catalogs and deprecations don't match, we can set + // the deprecation status to unknown? Or perhaps we somehow combine the deprecation information from + // all catalogs? This needs a follow-up discussion and PR. + hasCatalogData := err == nil || resolvedDeprecation != nil + SetDeprecationStatus(ext, installedBundleName, resolvedDeprecation, hasCatalogData) + + if err != nil { + return handleResolutionError(ctx, c, state, ext, err) + } + + state.resolvedRevisionMetadata = &RevisionMetadata{ + Package: resolvedBundle.Package, + Image: resolvedBundle.Image, + // TODO: Right now, operator-controller only supports registry+v1 bundles and has no concept + // of a "release" field. If/when we add a release field concept or a new bundle format + // we need to re-evaluate use of `AsLegacyRegistryV1Version` so that we avoid propagating + // registry+v1's semver spec violations of treating build metadata as orderable. + BundleMetadata: bundleutil.MetadataFor(resolvedBundle.Name, resolvedBundleVersion.AsLegacyRegistryV1Version()), } - state.resolvedRevisionMetadata = resolvedRevisionMetadata return nil, nil } } @@ -160,7 +176,7 @@ func handleResolutionError(ctx context.Context, c client.Client, state *reconcil msg := fmt.Sprintf("failed to resolve bundle: %v", err) setStatusProgressing(ext, err) setInstalledStatusFromRevisionStates(ext, state.revisionStates) - ensureAllConditionsWithReason(ext, ocv1.ReasonRetrying, msg) + ensureFailureConditionsWithReason(ext, ocv1.ReasonRetrying, msg) return nil, err } @@ -179,7 +195,7 @@ func handleResolutionError(ctx context.Context, c client.Client, state *reconcil "installedVersion", installedVersion) setStatusProgressing(ext, err) setInstalledStatusFromRevisionStates(ext, state.revisionStates) - ensureAllConditionsWithReason(ext, ocv1.ReasonRetrying, msg) + ensureFailureConditionsWithReason(ext, ocv1.ReasonRetrying, msg) return nil, err } @@ -198,7 +214,7 @@ func handleResolutionError(ctx context.Context, c client.Client, state *reconcil "catalogName", catalogName) setStatusProgressing(ext, err) setInstalledStatusFromRevisionStates(ext, state.revisionStates) - ensureAllConditionsWithReason(ext, ocv1.ReasonRetrying, msg) + ensureFailureConditionsWithReason(ext, ocv1.ReasonRetrying, msg) return nil, err } @@ -215,7 +231,7 @@ func handleResolutionError(ctx context.Context, c client.Client, state *reconcil "catalogName", catalogName) setStatusProgressing(ext, err) setInstalledStatusFromRevisionStates(ext, state.revisionStates) - ensureAllConditionsWithReason(ext, ocv1.ReasonRetrying, msg) + ensureFailureConditionsWithReason(ext, ocv1.ReasonRetrying, msg) return nil, err } diff --git a/internal/operator-controller/controllers/common_controller_test.go b/internal/operator-controller/controllers/common_controller_test.go index 4d0a0536d..93fad962e 100644 --- a/internal/operator-controller/controllers/common_controller_test.go +++ b/internal/operator-controller/controllers/common_controller_test.go @@ -146,7 +146,7 @@ func TestClusterExtensionDeprecationMessageTruncation(t *testing.T) { deprecationMessages = append(deprecationMessages, fmt.Sprintf("API version 'v1beta1' of resource 'customresources%d.example.com' is deprecated, use 'v1' instead", i)) } - longDeprecationMsg := strings.Join(deprecationMessages, "; ") + longDeprecationMsg := strings.Join(deprecationMessages, "\n") setInstalledStatusConditionUnknown(ext, longDeprecationMsg) cond := meta.FindStatusCondition(ext.Status.Conditions, ocv1.TypeInstalled) diff --git a/internal/operator-controller/rukpak/bundle/README.md b/internal/operator-controller/rukpak/bundle/README.md new file mode 100644 index 000000000..010cf46b7 --- /dev/null +++ b/internal/operator-controller/rukpak/bundle/README.md @@ -0,0 +1,51 @@ +# Registry+v1 Bundle Configuration JSON Schema + +This directory contains the JSON schema for registry+v1 bundle configuration validation. + +## Overview + +The `registryv1bundleconfig.json` schema is used to validate the bundle configuration in the ClusterExtension's inline configuration. This includes: + +- `watchNamespace`: Controls which namespace(s) the operator watches for custom resources +- `deploymentConfig`: Customizes operator deployment (environment variables, resources, volumes, etc.) + +The `deploymentConfig` portion is based on OLM v0's `SubscriptionConfig` struct but excludes the `selector` field which was never used in v0. + +## Schema Generation + +The schema in `registryv1bundleconfig.json` is a frozen snapshot that provides stability for validation. It is based on the `v1alpha1.SubscriptionConfig` type from `github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go`. + +### Fields Included + +- `nodeSelector`: Map of node selector labels +- `tolerations`: Array of pod tolerations +- `resources`: Container resource requirements (requests/limits) +- `envFrom`: Environment variables from ConfigMaps/Secrets +- `env`: Individual environment variables +- `volumes`: Pod volumes +- `volumeMounts`: Container volume mounts +- `affinity`: Pod affinity/anti-affinity rules +- `annotations`: Custom annotations for deployments/pods + +### Fields Excluded + +- `selector`: This field exists in v0's `SubscriptionConfig` but is never used by the v0 controller. It has been intentionally excluded from the v1 schema. + +## Regenerating the Schema + +To regenerate the schema when the `github.com/operator-framework/api` dependency is updated: + +```bash +make update-registryv1-bundle-schema +``` + +This will regenerate the schema based on the current module-resolved version of `v1alpha1.SubscriptionConfig` from `github.com/operator-framework/api` (as determined via `go list -m`). + +## Validation + +The schema is used to validate user-provided bundle configuration (including `watchNamespace` and `deploymentConfig`) in ClusterExtension resources. The base schema is loaded and customized at runtime based on the operator's install modes to ensure proper validation of the `watchNamespace` field. Validation happens during: + +1. **Admission**: When a ClusterExtension is created or updated +2. **Runtime**: When extracting configuration from the inline field + +Validation errors provide clear, semantic feedback to users about what fields are invalid and why. diff --git a/internal/operator-controller/rukpak/bundle/registryv1.go b/internal/operator-controller/rukpak/bundle/registryv1.go index 7fc3e3e18..d8412fde6 100644 --- a/internal/operator-controller/rukpak/bundle/registryv1.go +++ b/internal/operator-controller/rukpak/bundle/registryv1.go @@ -1,6 +1,10 @@ package bundle import ( + _ "embed" + "encoding/json" + "fmt" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" @@ -11,7 +15,13 @@ import ( ) const ( - BundleConfigWatchNamespaceKey = "watchNamespace" + BundleConfigWatchNamespaceKey = "watchNamespace" + BundleConfigDeploymentConfigKey = "deploymentConfig" +) + +var ( + //go:embed registryv1bundleconfig.json + bundleConfigSchemaJSON []byte ) type RegistryV1 struct { @@ -31,38 +41,91 @@ func (rv1 *RegistryV1) GetConfigSchema() (map[string]any, error) { return buildBundleConfigSchema(installModes) } -// buildBundleConfigSchema creates validation rules based on what the operator supports. +// buildBundleConfigSchema loads the base bundle config schema and modifies it based on +// the operator's install modes. // -// Examples of how install modes affect validation: -// - AllNamespaces only: user can't set watchNamespace (operator watches everything) -// - OwnNamespace only: user must set watchNamespace to the install namespace -// - SingleNamespace only: user must set watchNamespace to a different namespace -// - AllNamespaces + OwnNamespace: user can optionally set watchNamespace +// The base schema includes +// 1. watchNamespace +// 2. deploymentConfig properties. +// The watchNamespace property is modified based on what the operator supports: +// - AllNamespaces only: remove watchNamespace (operator always watches everything) +// - OwnNamespace only: make watchNamespace required, must equal install namespace +// - SingleNamespace only: make watchNamespace required, must differ from install namespace +// - AllNamespaces + OwnNamespace: make watchNamespace optional func buildBundleConfigSchema(installModes sets.Set[v1alpha1.InstallMode]) (map[string]any, error) { - schema := map[string]any{ - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "additionalProperties": false, // Reject unknown fields (catches typos and misconfigurations) + // Load the base schema + baseSchema, err := getBundleConfigSchemaMap() + if err != nil { + return nil, fmt.Errorf("failed to get base bundle config schema: %w", err) } - properties := map[string]any{} - var required []any + // Get properties map from the schema + properties, ok := baseSchema["properties"].(map[string]any) + if !ok { + return nil, fmt.Errorf("base schema missing properties") + } - // Add watchNamespace property if the bundle supports it + // Modify watchNamespace field based on install modes if isWatchNamespaceConfigurable(installModes) { + // Replace the generic watchNamespace with install-mode-specific version watchNSProperty, isRequired := buildWatchNamespaceProperty(installModes) properties["watchNamespace"] = watchNSProperty + + // Preserve existing required fields, only add/remove watchNamespace if isRequired { - required = append(required, "watchNamespace") + addToRequired(baseSchema, "watchNamespace") + } else { + removeFromRequired(baseSchema, "watchNamespace") + } + } else { + // AllNamespaces only - remove watchNamespace property entirely + // (operator always watches all namespaces, no configuration needed) + delete(properties, "watchNamespace") + removeFromRequired(baseSchema, "watchNamespace") + } + + return baseSchema, nil +} + +// addToRequired adds fieldName to the schema's required array if it's not already present. +// Preserves any existing required fields. +func addToRequired(schema map[string]any, fieldName string) { + var required []any + if existingRequired, ok := schema["required"].([]any); ok { + // Check if field is already required + for _, field := range existingRequired { + if field == fieldName { + return // Already required + } } + required = existingRequired + } + // Add the field to required list + schema["required"] = append(required, fieldName) +} + +// removeFromRequired removes fieldName from the schema's required array if present. +// Preserves all other required fields. +func removeFromRequired(schema map[string]any, fieldName string) { + existingRequired, ok := schema["required"].([]any) + if !ok { + return // No required array } - schema["properties"] = properties - if len(required) > 0 { - schema["required"] = required + // Filter out the field + filtered := make([]any, 0, len(existingRequired)) + for _, field := range existingRequired { + if field != fieldName { + filtered = append(filtered, field) + } } - return schema, nil + // Update or delete the required array + if len(filtered) > 0 { + schema["required"] = filtered + } else { + delete(schema, "required") + } } // buildWatchNamespaceProperty creates the validation rules for the watchNamespace field. @@ -151,3 +214,16 @@ func isWatchNamespaceConfigRequired(installModes sets.Set[v1alpha1.InstallMode]) return isWatchNamespaceConfigurable(installModes) && !installModes.Has(v1alpha1.InstallMode{Type: v1alpha1.InstallModeTypeAllNamespaces, Supported: true}) } + +// getBundleConfigSchemaMap returns the complete registry+v1 bundle configuration schema +// as a map[string]any. This includes the following properties: +// 1. watchNamespace +// 2. deploymentConfig +// The schema can be modified at runtime based on operator install modes before validation. +func getBundleConfigSchemaMap() (map[string]any, error) { + var schemaMap map[string]any + if err := json.Unmarshal(bundleConfigSchemaJSON, &schemaMap); err != nil { + return nil, fmt.Errorf("failed to unmarshal bundle config schema: %w", err) + } + return schemaMap, nil +} diff --git a/internal/operator-controller/rukpak/bundle/registryv1_test.go b/internal/operator-controller/rukpak/bundle/registryv1_test.go new file mode 100644 index 000000000..6efcbafa2 --- /dev/null +++ b/internal/operator-controller/rukpak/bundle/registryv1_test.go @@ -0,0 +1,119 @@ +package bundle + +import ( + "testing" + + "github.com/santhosh-tekuri/jsonschema/v6" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetBundleConfigSchemaMap(t *testing.T) { + schema, err := getBundleConfigSchemaMap() + require.NoError(t, err, "should successfully get bundle config schema") + require.NotNil(t, schema, "schema should not be nil") + + t.Run("schema has correct metadata", func(t *testing.T) { + assert.Equal(t, "http://json-schema.org/draft-07/schema#", schema["$schema"]) + assert.Contains(t, schema["$id"], "registry-v1-bundle-config") + assert.Equal(t, "Registry+v1 Bundle Configuration", schema["title"]) + assert.NotEmpty(t, schema["description"]) + assert.Equal(t, "object", schema["type"]) + assert.Equal(t, false, schema["additionalProperties"]) + }) + + t.Run("schema includes watchNamespace and deploymentConfig properties", func(t *testing.T) { + properties, ok := schema["properties"].(map[string]any) + require.True(t, ok, "schema should have properties") + + assert.Contains(t, properties, "watchNamespace") + assert.Contains(t, properties, "deploymentConfig") + }) + + t.Run("watchNamespace has anyOf with null and string", func(t *testing.T) { + properties, ok := schema["properties"].(map[string]any) + require.True(t, ok) + + watchNamespace, ok := properties["watchNamespace"].(map[string]any) + require.True(t, ok, "watchNamespace should be present") + + anyOf, ok := watchNamespace["anyOf"].([]any) + require.True(t, ok, "watchNamespace should have anyOf") + assert.Len(t, anyOf, 2, "watchNamespace anyOf should have 2 options") + }) + + t.Run("deploymentConfig has expected structure", func(t *testing.T) { + properties, ok := schema["properties"].(map[string]any) + require.True(t, ok) + + deploymentConfig, ok := properties["deploymentConfig"].(map[string]any) + require.True(t, ok, "deploymentConfig should be present") + + assert.Equal(t, "object", deploymentConfig["type"]) + assert.Equal(t, false, deploymentConfig["additionalProperties"]) + assert.NotEmpty(t, deploymentConfig["description"]) + + dcProps, ok := deploymentConfig["properties"].(map[string]any) + require.True(t, ok, "deploymentConfig should have properties") + + // Verify expected fields from SubscriptionConfig + expectedFields := []string{ + "nodeSelector", + "tolerations", + "resources", + "env", + "envFrom", + "volumes", + "volumeMounts", + "affinity", + "annotations", + } + + for _, field := range expectedFields { + assert.Contains(t, dcProps, field, "deploymentConfig should include %s field", field) + } + + // Verify selector is NOT included + assert.NotContains(t, dcProps, "selector", "selector field should be excluded per RFC") + }) + + t.Run("schema includes components/schemas for OpenAPI types", func(t *testing.T) { + components, ok := schema["components"].(map[string]any) + require.True(t, ok, "schema should have components section for $ref resolution") + + schemas, ok := components["schemas"].(map[string]any) + require.True(t, ok, "components should have schemas") + assert.NotEmpty(t, schemas, "components/schemas should not be empty") + + // Verify some expected Kubernetes types are included + expectedTypes := []string{ + "io.k8s.api.core.v1.Toleration", + "io.k8s.api.core.v1.ResourceRequirements", + "io.k8s.api.core.v1.EnvVar", + } + + for _, typeName := range expectedTypes { + assert.Contains(t, schemas, typeName, "components/schemas should include %s", typeName) + } + }) +} + +// TestSchemaCompilation verifies that the generated schema can be compiled +// by a JSON schema validator without errors. This catches broken $ref targets +// and other structural issues. +func TestSchemaCompilation(t *testing.T) { + // Get the schema as a map (same as how config package uses it) + schemaMap, err := getBundleConfigSchemaMap() + require.NoError(t, err, "should successfully get bundle config schema") + + // Compile the schema using the same library used by config package + compiler := jsonschema.NewCompiler() + + // Add the schema resource (using map[string]any, same as config package) + err = compiler.AddResource("schema.json", schemaMap) + require.NoError(t, err, "should add schema resource to compiler") + + compiledSchema, err := compiler.Compile("schema.json") + require.NoError(t, err, "schema should compile without errors - this verifies all $ref targets are resolvable") + require.NotNil(t, compiledSchema, "compiled schema should not be nil") +} diff --git a/internal/operator-controller/rukpak/bundle/registryv1bundleconfig.json b/internal/operator-controller/rukpak/bundle/registryv1bundleconfig.json new file mode 100644 index 000000000..1197f721a --- /dev/null +++ b/internal/operator-controller/rukpak/bundle/registryv1bundleconfig.json @@ -0,0 +1,2657 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "https://operator-framework.io/schemas/registry-v1-bundle-config.json", + "title": "Registry+v1 Bundle Configuration", + "description": "Configuration schema for registry+v1 bundles. Includes watchNamespace for controlling operator scope and deploymentConfig for customizing operator deployment (environment variables, resource scheduling, storage, and pod placement). The deploymentConfig follows the same structure and behavior as OLM v0's SubscriptionConfig. Note: The 'selector' field from v0's SubscriptionConfig is not included as it was never used.", + "type": "object", + "properties": { + "deploymentConfig": { + "type": "object", + "description": "Configuration for customizing operator deployment (environment variables, resources, volumes, etc.)", + "properties": { + "affinity": { + "$ref": "#/components/schemas/io.k8s.api.core.v1.Affinity" + }, + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "env": { + "type": "array", + "items": { + "$ref": "#/components/schemas/io.k8s.api.core.v1.EnvVar" + } + }, + "envFrom": { + "type": "array", + "items": { + "$ref": "#/components/schemas/io.k8s.api.core.v1.EnvFromSource" + } + }, + "nodeSelector": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "resources": { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceRequirements" + }, + "tolerations": { + "type": "array", + "items": { + "$ref": "#/components/schemas/io.k8s.api.core.v1.Toleration" + } + }, + "volumeMounts": { + "type": "array", + "items": { + "$ref": "#/components/schemas/io.k8s.api.core.v1.VolumeMount" + } + }, + "volumes": { + "type": "array", + "items": { + "$ref": "#/components/schemas/io.k8s.api.core.v1.Volume" + } + } + }, + "additionalProperties": false + }, + "watchNamespace": { + "description": "The namespace that the operator should watch for custom resources. The meaning and validation of this field depends on the operator's install modes. This field may be optional or required, and may have format constraints, based on the operator's supported install modes.", + "anyOf": [ + { + "type": "null" + }, + { + "type": "string" + } + ] + } + }, + "additionalProperties": false, + "components": { + "schemas": { + "io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource": { + "description": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "type": "string" + }, + "partition": { + "description": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", + "format": "int32", + "type": "integer" + }, + "readOnly": { + "description": "readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "type": "boolean" + }, + "volumeID": { + "default": "", + "description": "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "type": "string" + } + }, + "required": [ + "volumeID" + ], + "type": "object" + }, + "io.k8s.api.core.v1.Affinity": { + "description": "Affinity is a group of affinity scheduling rules.", + "properties": { + "nodeAffinity": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeAffinity" + } + ], + "description": "Describes node affinity scheduling rules for the pod." + }, + "podAffinity": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.PodAffinity" + } + ], + "description": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s))." + }, + "podAntiAffinity": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.PodAntiAffinity" + } + ], + "description": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s))." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.AzureDiskVolumeSource": { + "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "properties": { + "cachingMode": { + "default": "ReadWrite", + "description": "cachingMode is the Host Caching mode: None, Read Only, Read Write.", + "type": "string" + }, + "diskName": { + "default": "", + "description": "diskName is the Name of the data disk in the blob storage", + "type": "string" + }, + "diskURI": { + "default": "", + "description": "diskURI is the URI of data disk in the blob storage", + "type": "string" + }, + "fsType": { + "default": "ext4", + "description": "fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "kind": { + "default": "Shared", + "description": "kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", + "type": "string" + }, + "readOnly": { + "default": false, + "description": "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + } + }, + "required": [ + "diskName", + "diskURI" + ], + "type": "object" + }, + "io.k8s.api.core.v1.AzureFileVolumeSource": { + "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "properties": { + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretName": { + "default": "", + "description": "secretName is the name of secret that contains Azure Storage Account Name and Key", + "type": "string" + }, + "shareName": { + "default": "", + "description": "shareName is the azure share Name", + "type": "string" + } + }, + "required": [ + "secretName", + "shareName" + ], + "type": "object" + }, + "io.k8s.api.core.v1.CSIVolumeSource": { + "description": "Represents a source location of a volume to mount, managed by an external CSI driver", + "properties": { + "driver": { + "default": "", + "description": "driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", + "type": "string" + }, + "fsType": { + "description": "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", + "type": "string" + }, + "nodePublishSecretRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.LocalObjectReference" + } + ], + "description": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed." + }, + "readOnly": { + "description": "readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).", + "type": "boolean" + }, + "volumeAttributes": { + "additionalProperties": { + "default": "", + "type": "string" + }, + "description": "volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", + "type": "object" + } + }, + "required": [ + "driver" + ], + "type": "object" + }, + "io.k8s.api.core.v1.CephFSVolumeSource": { + "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + "properties": { + "monitors": { + "description": "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "path": { + "description": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "type": "string" + }, + "readOnly": { + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "boolean" + }, + "secretFile": { + "description": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "string" + }, + "secretRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.LocalObjectReference" + } + ], + "description": "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + }, + "user": { + "description": "user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "string" + } + }, + "required": [ + "monitors" + ], + "type": "object" + }, + "io.k8s.api.core.v1.CinderVolumeSource": { + "description": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "boolean" + }, + "secretRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.LocalObjectReference" + } + ], + "description": "secretRef is optional: points to a secret object containing parameters used to connect to OpenStack." + }, + "volumeID": { + "default": "", + "description": "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "string" + } + }, + "required": [ + "volumeID" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ClusterTrustBundleProjection": { + "description": "ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem.", + "properties": { + "labelSelector": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + } + ], + "description": "Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as \"match nothing\". If set but empty, interpreted as \"match everything\"." + }, + "name": { + "description": "Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector.", + "type": "string" + }, + "optional": { + "description": "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.", + "type": "boolean" + }, + "path": { + "default": "", + "description": "Relative path from the volume root to write the bundle.", + "type": "string" + }, + "signerName": { + "description": "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated.", + "type": "string" + } + }, + "required": [ + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ConfigMapEnvSource": { + "description": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", + "properties": { + "name": { + "default": "", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "Specify whether the ConfigMap must be defined", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ConfigMapKeySelector": { + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "default": "", + "description": "The key to select.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "Specify whether the ConfigMap or its key must be defined", + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.ConfigMapProjection": { + "description": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", + "properties": { + "items": { + "description": "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.KeyToPath" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "name": { + "default": "", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "optional specify whether the ConfigMap or its keys must be defined", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ConfigMapVolumeSource": { + "description": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", + "properties": { + "defaultMode": { + "description": "defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "format": "int32", + "type": "integer" + }, + "items": { + "description": "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.KeyToPath" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "name": { + "default": "", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "optional specify whether the ConfigMap or its keys must be defined", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.DownwardAPIProjection": { + "description": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", + "properties": { + "items": { + "description": "Items is a list of DownwardAPIVolume file", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeFile" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.DownwardAPIVolumeFile": { + "description": "DownwardAPIVolumeFile represents information to create the file containing the pod field", + "properties": { + "fieldRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ObjectFieldSelector" + } + ], + "description": "Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported." + }, + "mode": { + "description": "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "format": "int32", + "type": "integer" + }, + "path": { + "default": "", + "description": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + "type": "string" + }, + "resourceFieldRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceFieldSelector" + } + ], + "description": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." + } + }, + "required": [ + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.DownwardAPIVolumeSource": { + "description": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.", + "properties": { + "defaultMode": { + "description": "Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "format": "int32", + "type": "integer" + }, + "items": { + "description": "Items is a list of downward API volume file", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeFile" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.EmptyDirVolumeSource": { + "description": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", + "properties": { + "medium": { + "description": "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "type": "string" + }, + "sizeLimit": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + ], + "description": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.EnvFromSource": { + "description": "EnvFromSource represents the source of a set of ConfigMaps or Secrets", + "properties": { + "configMapRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ConfigMapEnvSource" + } + ], + "description": "The ConfigMap to select from" + }, + "prefix": { + "description": "Optional text to prepend to the name of each environment variable. May consist of any printable ASCII characters except '='.", + "type": "string" + }, + "secretRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.SecretEnvSource" + } + ], + "description": "The Secret to select from" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.EnvVar": { + "description": "EnvVar represents an environment variable present in a Container.", + "properties": { + "name": { + "default": "", + "description": "Name of the environment variable. May consist of any printable ASCII characters except '='.", + "type": "string" + }, + "value": { + "description": "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", + "type": "string" + }, + "valueFrom": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.EnvVarSource" + } + ], + "description": "Source for the environment variable's value. Cannot be used if value is not empty." + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.EnvVarSource": { + "description": "EnvVarSource represents a source for the value of an EnvVar.", + "properties": { + "configMapKeyRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ConfigMapKeySelector" + } + ], + "description": "Selects a key of a ConfigMap." + }, + "fieldRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ObjectFieldSelector" + } + ], + "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\u003cKEY\u003e']`, `metadata.annotations['\u003cKEY\u003e']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." + }, + "fileKeyRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.FileKeySelector" + } + ], + "description": "FileKeyRef selects a key of the env file. Requires the EnvFiles feature gate to be enabled." + }, + "resourceFieldRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceFieldSelector" + } + ], + "description": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." + }, + "secretKeyRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.SecretKeySelector" + } + ], + "description": "Selects a key of a secret in the pod's namespace" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.EphemeralVolumeSource": { + "description": "Represents an ephemeral volume that is handled by a normal storage driver.", + "properties": { + "volumeClaimTemplate": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimTemplate" + } + ], + "description": "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `\u003cpod name\u003e-\u003cvolume name\u003e` where `\u003cvolume name\u003e` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.FCVolumeSource": { + "description": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "lun": { + "description": "lun is Optional: FC target lun number", + "format": "int32", + "type": "integer" + }, + "readOnly": { + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "targetWWNs": { + "description": "targetWWNs is Optional: FC target worldwide names (WWNs)", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "wwids": { + "description": "wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.FileKeySelector": { + "description": "FileKeySelector selects a key of the env file.", + "properties": { + "key": { + "default": "", + "description": "The key within the env file. An invalid key will prevent the pod from starting. The keys defined within a source may consist of any printable ASCII characters except '='. During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.", + "type": "string" + }, + "optional": { + "default": false, + "description": "Specify whether the file or its key must be defined. If the file or key does not exist, then the env var is not published. If optional is set to true and the specified key does not exist, the environment variable will not be set in the Pod's containers.\n\nIf optional is set to false and the specified key does not exist, an error will be returned during Pod creation.", + "type": "boolean" + }, + "path": { + "default": "", + "description": "The path within the volume from which to select the file. Must be relative and may not contain the '..' path or start with '..'.", + "type": "string" + }, + "volumeName": { + "default": "", + "description": "The name of the volume mount containing the env file.", + "type": "string" + } + }, + "required": [ + "volumeName", + "path", + "key" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.FlexVolumeSource": { + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "properties": { + "driver": { + "default": "", + "description": "driver is the name of the driver to use for this volume.", + "type": "string" + }, + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "type": "string" + }, + "options": { + "additionalProperties": { + "default": "", + "type": "string" + }, + "description": "options is Optional: this field holds extra command options if any.", + "type": "object" + }, + "readOnly": { + "description": "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.LocalObjectReference" + } + ], + "description": "secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts." + } + }, + "required": [ + "driver" + ], + "type": "object" + }, + "io.k8s.api.core.v1.FlockerVolumeSource": { + "description": "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", + "properties": { + "datasetName": { + "description": "datasetName is Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated", + "type": "string" + }, + "datasetUUID": { + "description": "datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.GCEPersistentDiskVolumeSource": { + "description": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", + "properties": { + "fsType": { + "description": "fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "type": "string" + }, + "partition": { + "description": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "format": "int32", + "type": "integer" + }, + "pdName": { + "default": "", + "description": "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "type": "boolean" + } + }, + "required": [ + "pdName" + ], + "type": "object" + }, + "io.k8s.api.core.v1.GitRepoVolumeSource": { + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "properties": { + "directory": { + "description": "directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", + "type": "string" + }, + "repository": { + "default": "", + "description": "repository is the URL", + "type": "string" + }, + "revision": { + "description": "revision is the commit hash for the specified revision.", + "type": "string" + } + }, + "required": [ + "repository" + ], + "type": "object" + }, + "io.k8s.api.core.v1.GlusterfsVolumeSource": { + "description": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "properties": { + "endpoints": { + "default": "", + "description": "endpoints is the endpoint name that details Glusterfs topology.", + "type": "string" + }, + "path": { + "default": "", + "description": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "boolean" + } + }, + "required": [ + "endpoints", + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.HostPathVolumeSource": { + "description": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", + "properties": { + "path": { + "default": "", + "description": "path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "type": "string" + }, + "type": { + "description": "type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "type": "string" + } + }, + "required": [ + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ISCSIVolumeSource": { + "description": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "properties": { + "chapAuthDiscovery": { + "description": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication", + "type": "boolean" + }, + "chapAuthSession": { + "description": "chapAuthSession defines whether support iSCSI Session CHAP authentication", + "type": "boolean" + }, + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "type": "string" + }, + "initiatorName": { + "description": "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.", + "type": "string" + }, + "iqn": { + "default": "", + "description": "iqn is the target iSCSI Qualified Name.", + "type": "string" + }, + "iscsiInterface": { + "default": "default", + "description": "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "type": "string" + }, + "lun": { + "default": 0, + "description": "lun represents iSCSI Target Lun number.", + "format": "int32", + "type": "integer" + }, + "portals": { + "description": "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "type": "boolean" + }, + "secretRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.LocalObjectReference" + } + ], + "description": "secretRef is the CHAP Secret for iSCSI target and initiator authentication" + }, + "targetPortal": { + "default": "", + "description": "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "type": "string" + } + }, + "required": [ + "targetPortal", + "iqn", + "lun" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ImageVolumeSource": { + "description": "ImageVolumeSource represents a image volume resource.", + "properties": { + "pullPolicy": { + "description": "Policy for pulling OCI objects. Possible values are: Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.", + "type": "string" + }, + "reference": { + "description": "Required: Image or artifact reference to be used. Behaves in the same way as pod.spec.containers[*].image. Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.KeyToPath": { + "description": "Maps a string key to a path within a volume.", + "properties": { + "key": { + "default": "", + "description": "key is the key to project.", + "type": "string" + }, + "mode": { + "description": "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "format": "int32", + "type": "integer" + }, + "path": { + "default": "", + "description": "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", + "type": "string" + } + }, + "required": [ + "key", + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.LocalObjectReference": { + "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", + "properties": { + "name": { + "default": "", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.NFSVolumeSource": { + "description": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", + "properties": { + "path": { + "default": "", + "description": "path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "type": "boolean" + }, + "server": { + "default": "", + "description": "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "type": "string" + } + }, + "required": [ + "server", + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.NodeAffinity": { + "description": "Node affinity is a group of node affinity scheduling rules.", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.PreferredSchedulingTerm" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelector" + } + ], + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.NodeSelector": { + "description": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", + "properties": { + "nodeSelectorTerms": { + "description": "Required. A list of node selector terms. The terms are ORed.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "nodeSelectorTerms" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.NodeSelectorRequirement": { + "description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "properties": { + "key": { + "default": "", + "description": "The label key that the selector applies to.", + "type": "string" + }, + "operator": { + "default": "", + "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", + "type": "string" + }, + "values": { + "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "io.k8s.api.core.v1.NodeSelectorTerm": { + "description": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", + "properties": { + "matchExpressions": { + "description": "A list of node selector requirements by node's labels.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "matchFields": { + "description": "A list of node selector requirements by node's fields.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.ObjectFieldSelector": { + "description": "ObjectFieldSelector selects an APIVersioned field of an object.", + "properties": { + "apiVersion": { + "description": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", + "type": "string" + }, + "fieldPath": { + "default": "", + "description": "Path of the field to select in the specified API version.", + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.PersistentVolumeClaimSpec": { + "description": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + "properties": { + "accessModes": { + "description": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "dataSource": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.TypedLocalObjectReference" + } + ], + "description": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource." + }, + "dataSourceRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.TypedObjectReference" + } + ], + "description": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled." + }, + "resources": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.VolumeResourceRequirements" + } + ], + "default": {}, + "description": "resources represents the minimum resources the volume should have. Users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" + }, + "selector": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + } + ], + "description": "selector is a label query over volumes to consider for binding." + }, + "storageClassName": { + "description": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "type": "string" + }, + "volumeAttributesClassName": { + "description": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string or nil value indicates that no VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/", + "type": "string" + }, + "volumeMode": { + "description": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", + "type": "string" + }, + "volumeName": { + "description": "volumeName is the binding reference to the PersistentVolume backing this claim.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PersistentVolumeClaimTemplate": { + "description": "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.", + "properties": { + "metadata": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + } + ], + "default": {}, + "description": "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation." + }, + "spec": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimSpec" + } + ], + "default": {}, + "description": "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here." + } + }, + "required": [ + "spec" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource": { + "description": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", + "properties": { + "claimName": { + "default": "", + "description": "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "type": "string" + }, + "readOnly": { + "description": "readOnly Will force the ReadOnly setting in VolumeMounts. Default false.", + "type": "boolean" + } + }, + "required": [ + "claimName" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource": { + "description": "Represents a Photon Controller persistent disk resource.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "pdID": { + "default": "", + "description": "pdID is the ID that identifies Photon Controller persistent disk", + "type": "string" + } + }, + "required": [ + "pdID" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PodAffinity": { + "description": "Pod affinity is a group of inter pod affinity scheduling rules.", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.WeightedPodAffinityTerm" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PodAffinityTerm": { + "description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running", + "properties": { + "labelSelector": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + } + ], + "description": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + }, + "matchLabelKeys": { + "description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "namespaceSelector": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + } + ], + "description": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." + }, + "namespaces": { + "description": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "topologyKey": { + "default": "", + "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PodAntiAffinity": { + "description": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and subtracting \"weight\" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.WeightedPodAffinityTerm" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PodCertificateProjection": { + "description": "PodCertificateProjection provides a private key and X.509 certificate in the pod filesystem.", + "properties": { + "certificateChainPath": { + "description": "Write the certificate chain at this path in the projected volume.\n\nMost applications should use credentialBundlePath. When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.", + "type": "string" + }, + "credentialBundlePath": { + "description": "Write the credential bundle at this path in the projected volume.\n\nThe credential bundle is a single file that contains multiple PEM blocks. The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private key.\n\nThe remaining blocks are CERTIFICATE blocks, containing the issued certificate chain from the signer (leaf and any intermediates).\n\nUsing credentialBundlePath lets your Pod's application code make a single atomic read that retrieves a consistent key and certificate chain. If you project them to separate files, your application code will need to additionally check that the leaf certificate was issued to the key.", + "type": "string" + }, + "keyPath": { + "description": "Write the key at this path in the projected volume.\n\nMost applications should use credentialBundlePath. When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.", + "type": "string" + }, + "keyType": { + "description": "The type of keypair Kubelet will generate for the pod.\n\nValid values are \"RSA3072\", \"RSA4096\", \"ECDSAP256\", \"ECDSAP384\", \"ECDSAP521\", and \"ED25519\".", + "type": "string" + }, + "maxExpirationSeconds": { + "description": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nKubelet copies this value verbatim into the PodCertificateRequests it generates for this projection.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.", + "format": "int32", + "type": "integer" + }, + "signerName": { + "description": "Kubelet's generated CSRs will be addressed to this signer.", + "type": "string" + }, + "userAnnotations": { + "additionalProperties": { + "default": "", + "type": "string" + }, + "description": "userAnnotations allow pod authors to pass additional information to the signer implementation. Kubernetes does not restrict or validate this metadata in any way.\n\nThese values are copied verbatim into the `spec.unverifiedUserAnnotations` field of the PodCertificateRequest objects that Kubelet creates.\n\nEntries are subject to the same validation as object metadata annotations, with the addition that all keys must be domain-prefixed. No restrictions are placed on values, except an overall size limitation on the entire field.\n\nSigners should document the keys and values they support. Signers should deny requests that contain keys they do not recognize.", + "type": "object" + } + }, + "required": [ + "signerName", + "keyType" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PortworxVolumeSource": { + "description": "PortworxVolumeSource represents a Portworx volume resource.", + "properties": { + "fsType": { + "description": "fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "volumeID": { + "default": "", + "description": "volumeID uniquely identifies a Portworx volume", + "type": "string" + } + }, + "required": [ + "volumeID" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PreferredSchedulingTerm": { + "description": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", + "properties": { + "preference": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm" + } + ], + "default": {}, + "description": "A node selector term, associated with the corresponding weight." + }, + "weight": { + "default": 0, + "description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "weight", + "preference" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ProjectedVolumeSource": { + "description": "Represents a projected volume source", + "properties": { + "defaultMode": { + "description": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "format": "int32", + "type": "integer" + }, + "sources": { + "description": "sources is the list of volume projections. Each entry in this list handles one source.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.VolumeProjection" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.QuobyteVolumeSource": { + "description": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", + "properties": { + "group": { + "description": "group to map volume access to Default is no group", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", + "type": "boolean" + }, + "registry": { + "default": "", + "description": "registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", + "type": "string" + }, + "tenant": { + "description": "tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", + "type": "string" + }, + "user": { + "description": "user to map volume access to Defaults to serivceaccount user", + "type": "string" + }, + "volume": { + "default": "", + "description": "volume is a string that references an already created Quobyte volume by name.", + "type": "string" + } + }, + "required": [ + "registry", + "volume" + ], + "type": "object" + }, + "io.k8s.api.core.v1.RBDVolumeSource": { + "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "type": "string" + }, + "image": { + "default": "", + "description": "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "keyring": { + "default": "/etc/ceph/keyring", + "description": "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "monitors": { + "description": "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "pool": { + "default": "rbd", + "description": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "boolean" + }, + "secretRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.LocalObjectReference" + } + ], + "description": "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + }, + "user": { + "default": "admin", + "description": "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + } + }, + "required": [ + "monitors", + "image" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ResourceClaim": { + "description": "ResourceClaim references one entry in PodSpec.ResourceClaims.", + "properties": { + "name": { + "default": "", + "description": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", + "type": "string" + }, + "request": { + "description": "Request is the name chosen for a request in the referenced claim. If empty, everything from the claim is made available, otherwise only the result of this request.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ResourceFieldSelector": { + "description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", + "properties": { + "containerName": { + "description": "Container name: required for volumes, optional for env vars", + "type": "string" + }, + "divisor": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + ], + "description": "Specifies the output format of the exposed resources, defaults to \"1\"" + }, + "resource": { + "default": "", + "description": "Required: resource to select", + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.ResourceRequirements": { + "description": "ResourceRequirements describes the compute resource requirements.", + "properties": { + "claims": { + "description": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis field depends on the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ResourceClaim" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, + "limits": { + "additionalProperties": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "object" + }, + "requests": { + "additionalProperties": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "object" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ScaleIOVolumeSource": { + "description": "ScaleIOVolumeSource represents a persistent ScaleIO volume", + "properties": { + "fsType": { + "default": "xfs", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", + "type": "string" + }, + "gateway": { + "default": "", + "description": "gateway is the host address of the ScaleIO API Gateway.", + "type": "string" + }, + "protectionDomain": { + "description": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.", + "type": "string" + }, + "readOnly": { + "description": "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.LocalObjectReference" + } + ], + "description": "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail." + }, + "sslEnabled": { + "description": "sslEnabled Flag enable/disable SSL communication with Gateway, default false", + "type": "boolean" + }, + "storageMode": { + "default": "ThinProvisioned", + "description": "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + "type": "string" + }, + "storagePool": { + "description": "storagePool is the ScaleIO Storage Pool associated with the protection domain.", + "type": "string" + }, + "system": { + "default": "", + "description": "system is the name of the storage system as configured in ScaleIO.", + "type": "string" + }, + "volumeName": { + "description": "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.", + "type": "string" + } + }, + "required": [ + "gateway", + "system", + "secretRef" + ], + "type": "object" + }, + "io.k8s.api.core.v1.SecretEnvSource": { + "description": "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", + "properties": { + "name": { + "default": "", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "Specify whether the Secret must be defined", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.SecretKeySelector": { + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "default": "", + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "Specify whether the Secret or its key must be defined", + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.SecretProjection": { + "description": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", + "properties": { + "items": { + "description": "items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.KeyToPath" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "name": { + "default": "", + "description": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "optional field specify whether the Secret or its key must be defined", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.SecretVolumeSource": { + "description": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", + "properties": { + "defaultMode": { + "description": "defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "format": "int32", + "type": "integer" + }, + "items": { + "description": "items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.KeyToPath" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "optional": { + "description": "optional field specify whether the Secret or its keys must be defined", + "type": "boolean" + }, + "secretName": { + "description": "secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ServiceAccountTokenProjection": { + "description": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).", + "properties": { + "audience": { + "description": "audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", + "type": "string" + }, + "expirationSeconds": { + "description": "expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", + "format": "int64", + "type": "integer" + }, + "path": { + "default": "", + "description": "path is the path relative to the mount point of the file to project the token into.", + "type": "string" + } + }, + "required": [ + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.StorageOSVolumeSource": { + "description": "Represents a StorageOS persistent volume resource.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.LocalObjectReference" + } + ], + "description": "secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted." + }, + "volumeName": { + "description": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "type": "string" + }, + "volumeNamespace": { + "description": "volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.Toleration": { + "description": "The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.", + "properties": { + "effect": { + "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", + "type": "string" + }, + "key": { + "description": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", + "type": "string" + }, + "operator": { + "description": "Operator represents a key's relationship to the value. Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators).", + "type": "string" + }, + "tolerationSeconds": { + "description": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", + "format": "int64", + "type": "integer" + }, + "value": { + "description": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.TypedLocalObjectReference": { + "description": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "type": "string" + }, + "kind": { + "default": "", + "description": "Kind is the type of resource being referenced", + "type": "string" + }, + "name": { + "default": "", + "description": "Name is the name of resource being referenced", + "type": "string" + } + }, + "required": [ + "kind", + "name" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.TypedObjectReference": { + "description": "TypedObjectReference contains enough information to let you locate the typed referenced object", + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "type": "string" + }, + "kind": { + "default": "", + "description": "Kind is the type of resource being referenced", + "type": "string" + }, + "name": { + "default": "", + "description": "Name is the name of resource being referenced", + "type": "string" + }, + "namespace": { + "description": "Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + "type": "string" + } + }, + "required": [ + "kind", + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.Volume": { + "description": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + "properties": { + "awsElasticBlockStore": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource" + } + ], + "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + }, + "azureDisk": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.AzureDiskVolumeSource" + } + ], + "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver." + }, + "azureFile": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.AzureFileVolumeSource" + } + ], + "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver." + }, + "cephfs": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.CephFSVolumeSource" + } + ], + "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported." + }, + "cinder": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.CinderVolumeSource" + } + ], + "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" + }, + "configMap": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ConfigMapVolumeSource" + } + ], + "description": "configMap represents a configMap that should populate this volume" + }, + "csi": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.CSIVolumeSource" + } + ], + "description": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers." + }, + "downwardAPI": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.DownwardAPIVolumeSource" + } + ], + "description": "downwardAPI represents downward API about the pod that should populate this volume" + }, + "emptyDir": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.EmptyDirVolumeSource" + } + ], + "description": "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + }, + "ephemeral": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.EphemeralVolumeSource" + } + ], + "description": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time." + }, + "fc": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.FCVolumeSource" + } + ], + "description": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod." + }, + "flexVolume": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.FlexVolumeSource" + } + ], + "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead." + }, + "flocker": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.FlockerVolumeSource" + } + ], + "description": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported." + }, + "gcePersistentDisk": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource" + } + ], + "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + }, + "gitRepo": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.GitRepoVolumeSource" + } + ], + "description": "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." + }, + "glusterfs": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.GlusterfsVolumeSource" + } + ], + "description": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported." + }, + "hostPath": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.HostPathVolumeSource" + } + ], + "description": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + }, + "image": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ImageVolumeSource" + } + ], + "description": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type." + }, + "iscsi": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ISCSIVolumeSource" + } + ], + "description": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi" + }, + "name": { + "default": "", + "description": "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "nfs": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.NFSVolumeSource" + } + ], + "description": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + }, + "persistentVolumeClaim": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource" + } + ], + "description": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + }, + "photonPersistentDisk": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource" + } + ], + "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported." + }, + "portworxVolume": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.PortworxVolumeSource" + } + ], + "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on." + }, + "projected": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ProjectedVolumeSource" + } + ], + "description": "projected items for all in one resources secrets, configmaps, and downward API" + }, + "quobyte": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.QuobyteVolumeSource" + } + ], + "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported." + }, + "rbd": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.RBDVolumeSource" + } + ], + "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported." + }, + "scaleIO": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ScaleIOVolumeSource" + } + ], + "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported." + }, + "secret": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.SecretVolumeSource" + } + ], + "description": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret" + }, + "storageos": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.StorageOSVolumeSource" + } + ], + "description": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported." + }, + "vsphereVolume": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource" + } + ], + "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver." + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.VolumeMount": { + "description": "VolumeMount describes a mounting of a Volume within a container.", + "properties": { + "mountPath": { + "default": "", + "description": "Path within the container at which the volume should be mounted. Must not contain ':'.", + "type": "string" + }, + "mountPropagation": { + "description": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified (which defaults to None).", + "type": "string" + }, + "name": { + "default": "", + "description": "This must match the Name of a Volume.", + "type": "string" + }, + "readOnly": { + "description": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", + "type": "boolean" + }, + "recursiveReadOnly": { + "description": "RecursiveReadOnly specifies whether read-only mounts should be handled recursively.\n\nIf ReadOnly is false, this field has no meaning and must be unspecified.\n\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made recursively read-only. If this field is set to IfPossible, the mount is made recursively read-only, if it is supported by the container runtime. If this field is set to Enabled, the mount is made recursively read-only if it is supported by the container runtime, otherwise the pod will not be started and an error will be generated to indicate the reason.\n\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to None (or be unspecified, which defaults to None).\n\nIf this field is not specified, it is treated as an equivalent of Disabled.", + "type": "string" + }, + "subPath": { + "description": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", + "type": "string" + }, + "subPathExpr": { + "description": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.", + "type": "string" + } + }, + "required": [ + "name", + "mountPath" + ], + "type": "object" + }, + "io.k8s.api.core.v1.VolumeProjection": { + "description": "Projection that may be projected along with other supported volume types. Exactly one of these fields must be set.", + "properties": { + "clusterTrustBundle": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ClusterTrustBundleProjection" + } + ], + "description": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time." + }, + "configMap": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ConfigMapProjection" + } + ], + "description": "configMap information about the configMap data to project" + }, + "downwardAPI": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.DownwardAPIProjection" + } + ], + "description": "downwardAPI information about the downwardAPI data to project" + }, + "podCertificate": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.PodCertificateProjection" + } + ], + "description": "Projects an auto-rotating credential bundle (private key and certificate chain) that the pod can use either as a TLS client or server.\n\nKubelet generates a private key and uses it to send a PodCertificateRequest to the named signer. Once the signer approves the request and issues a certificate chain, Kubelet writes the key and certificate chain to the pod filesystem. The pod does not start until certificates have been issued for each podCertificate projected volume source in its spec.\n\nKubelet will begin trying to rotate the certificate at the time indicated by the signer using the PodCertificateRequest.Status.BeginRefreshAt timestamp.\n\nKubelet can write a single file, indicated by the credentialBundlePath field, or separate files, indicated by the keyPath and certificateChainPath fields.\n\nThe credential bundle is a single file in PEM format. The first PEM entry is the private key (in PKCS#8 format), and the remaining PEM entries are the certificate chain issued by the signer (typically, signers will return their certificate chain in leaf-to-root order).\n\nPrefer using the credential bundle format, since your application code can read it atomically. If you use keyPath and certificateChainPath, your application must make two separate file reads. If these coincide with a certificate rotation, it is possible that the private key and leaf certificate you read may not correspond to each other. Your application will need to check for this condition, and re-read until they are consistent.\n\nThe named signer controls chooses the format of the certificate it issues; consult the signer implementation's documentation to learn how to use the certificates it issues." + }, + "secret": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.SecretProjection" + } + ], + "description": "secret information about the secret data to project" + }, + "serviceAccountToken": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.ServiceAccountTokenProjection" + } + ], + "description": "serviceAccountToken is information about the serviceAccountToken data to project" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.VolumeResourceRequirements": { + "description": "VolumeResourceRequirements describes the storage resource requirements for a volume.", + "properties": { + "limits": { + "additionalProperties": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "object" + }, + "requests": { + "additionalProperties": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "object" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource": { + "description": "Represents a vSphere volume resource.", + "properties": { + "fsType": { + "description": "fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "storagePolicyID": { + "description": "storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", + "type": "string" + }, + "storagePolicyName": { + "description": "storagePolicyName is the storage Policy Based Management (SPBM) profile name.", + "type": "string" + }, + "volumePath": { + "default": "", + "description": "volumePath is the path that identifies vSphere volume vmdk", + "type": "string" + } + }, + "required": [ + "volumePath" + ], + "type": "object" + }, + "io.k8s.api.core.v1.WeightedPodAffinityTerm": { + "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "properties": { + "podAffinityTerm": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.PodAffinityTerm" + } + ], + "default": {}, + "description": "Required. A pod affinity term, associated with the corresponding weight." + }, + "weight": { + "default": 0, + "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "weight", + "podAffinityTerm" + ], + "type": "object" + }, + "io.k8s.apimachinery.pkg.api.resource.Quantity": { + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n\n\t(Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "oneOf": [ + { + "type": "string" + }, + { + "type": "number" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1": { + "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector": { + "description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", + "properties": { + "matchExpressions": { + "description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "matchLabels": { + "additionalProperties": { + "default": "", + "type": "string" + }, + "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", + "type": "object" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement": { + "description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "properties": { + "key": { + "default": "", + "description": "key is the label key that the selector applies to.", + "type": "string" + }, + "operator": { + "default": "", + "description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", + "type": "string" + }, + "values": { + "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry": { + "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", + "type": "string" + }, + "fieldsType": { + "description": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", + "type": "string" + }, + "fieldsV1": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1" + } + ], + "description": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type." + }, + "manager": { + "description": "Manager is an identifier of the workflow managing these fields.", + "type": "string" + }, + "operation": { + "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", + "type": "string" + }, + "subresource": { + "description": "Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.", + "type": "string" + }, + "time": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + ], + "description": "Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over." + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta": { + "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + "properties": { + "annotations": { + "additionalProperties": { + "default": "", + "type": "string" + }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "type": "object" + }, + "creationTimestamp": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + ], + "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "deletionGracePeriodSeconds": { + "description": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + "format": "int64", + "type": "integer" + }, + "deletionTimestamp": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + ], + "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "finalizers": { + "description": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set", + "x-kubernetes-patch-strategy": "merge" + }, + "generateName": { + "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "type": "string" + }, + "generation": { + "description": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + "format": "int64", + "type": "integer" + }, + "labels": { + "additionalProperties": { + "default": "", + "type": "string" + }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "type": "object" + }, + "managedFields": { + "description": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "name": { + "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", + "type": "string" + }, + "namespace": { + "description": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces", + "type": "string" + }, + "ownerReferences": { + "description": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "uid" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "uid", + "x-kubernetes-patch-strategy": "merge" + }, + "resourceVersion": { + "description": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "type": "string" + }, + "selfLink": { + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", + "type": "string" + }, + "uid": { + "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference": { + "description": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", + "properties": { + "apiVersion": { + "default": "", + "description": "API version of the referent.", + "type": "string" + }, + "blockOwnerDeletion": { + "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", + "type": "boolean" + }, + "controller": { + "description": "If true, this reference points to the managing controller.", + "type": "boolean" + }, + "kind": { + "default": "", + "description": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", + "type": "string" + }, + "uid": { + "default": "", + "description": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", + "type": "string" + } + }, + "required": [ + "apiVersion", + "kind", + "name", + "uid" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Time": { + "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", + "format": "date-time", + "type": "string" + } + } + } +} \ No newline at end of file diff --git a/manifests/experimental-e2e.yaml b/manifests/experimental-e2e.yaml index edd046e6a..6536baf93 100644 --- a/manifests/experimental-e2e.yaml +++ b/manifests/experimental-e2e.yaml @@ -1498,12 +1498,12 @@ spec: When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. - When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + When the ClusterExtension is sourced from a catalog, it surfaces deprecation conditions based on catalog metadata. These are indications from a package owner to guide users away from a particular package, channel, or bundle: - - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + - BundleDeprecated is True if the installed bundle is marked deprecated, False if not deprecated, or Unknown if no bundle is installed yet or if catalog data is unavailable. + - ChannelDeprecated is True if any requested channel is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - PackageDeprecated is True if the requested package is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - Deprecated is a rollup condition that is True when any deprecation exists, False when none exist, or Unknown when catalog data is unavailable. items: description: Condition contains details for one aspect of the current state of this API Resource. diff --git a/manifests/experimental.yaml b/manifests/experimental.yaml index 877552395..0b750139d 100644 --- a/manifests/experimental.yaml +++ b/manifests/experimental.yaml @@ -1459,12 +1459,12 @@ spec: When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. - When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + When the ClusterExtension is sourced from a catalog, it surfaces deprecation conditions based on catalog metadata. These are indications from a package owner to guide users away from a particular package, channel, or bundle: - - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + - BundleDeprecated is True if the installed bundle is marked deprecated, False if not deprecated, or Unknown if no bundle is installed yet or if catalog data is unavailable. + - ChannelDeprecated is True if any requested channel is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - PackageDeprecated is True if the requested package is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - Deprecated is a rollup condition that is True when any deprecation exists, False when none exist, or Unknown when catalog data is unavailable. items: description: Condition contains details for one aspect of the current state of this API Resource. diff --git a/manifests/standard-e2e.yaml b/manifests/standard-e2e.yaml index 51c3b412c..3ae2938d8 100644 --- a/manifests/standard-e2e.yaml +++ b/manifests/standard-e2e.yaml @@ -1119,12 +1119,12 @@ spec: When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. - When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + When the ClusterExtension is sourced from a catalog, it surfaces deprecation conditions based on catalog metadata. These are indications from a package owner to guide users away from a particular package, channel, or bundle: - - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + - BundleDeprecated is True if the installed bundle is marked deprecated, False if not deprecated, or Unknown if no bundle is installed yet or if catalog data is unavailable. + - ChannelDeprecated is True if any requested channel is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - PackageDeprecated is True if the requested package is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - Deprecated is a rollup condition that is True when any deprecation exists, False when none exist, or Unknown when catalog data is unavailable. items: description: Condition contains details for one aspect of the current state of this API Resource. diff --git a/manifests/standard.yaml b/manifests/standard.yaml index 16c489f76..2fc75569c 100644 --- a/manifests/standard.yaml +++ b/manifests/standard.yaml @@ -1080,12 +1080,12 @@ spec: When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. - When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + When the ClusterExtension is sourced from a catalog, it surfaces deprecation conditions based on catalog metadata. These are indications from a package owner to guide users away from a particular package, channel, or bundle: - - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + - BundleDeprecated is True if the installed bundle is marked deprecated, False if not deprecated, or Unknown if no bundle is installed yet or if catalog data is unavailable. + - ChannelDeprecated is True if any requested channel is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - PackageDeprecated is True if the requested package is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - Deprecated is a rollup condition that is True when any deprecation exists, False when none exist, or Unknown when catalog data is unavailable. items: description: Condition contains details for one aspect of the current state of this API Resource. diff --git a/openshift/operator-controller/manifests-experimental.yaml b/openshift/operator-controller/manifests-experimental.yaml index 4a8151592..5e5ce11d6 100644 --- a/openshift/operator-controller/manifests-experimental.yaml +++ b/openshift/operator-controller/manifests-experimental.yaml @@ -671,12 +671,12 @@ spec: When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. - When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + When the ClusterExtension is sourced from a catalog, it surfaces deprecation conditions based on catalog metadata. These are indications from a package owner to guide users away from a particular package, channel, or bundle: - - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + - BundleDeprecated is True if the installed bundle is marked deprecated, False if not deprecated, or Unknown if no bundle is installed yet or if catalog data is unavailable. + - ChannelDeprecated is True if any requested channel is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - PackageDeprecated is True if the requested package is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - Deprecated is a rollup condition that is True when any deprecation exists, False when none exist, or Unknown when catalog data is unavailable. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: diff --git a/openshift/operator-controller/manifests.yaml b/openshift/operator-controller/manifests.yaml index 529c0f648..015d42889 100644 --- a/openshift/operator-controller/manifests.yaml +++ b/openshift/operator-controller/manifests.yaml @@ -579,12 +579,12 @@ spec: When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. - When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + When the ClusterExtension is sourced from a catalog, it surfaces deprecation conditions based on catalog metadata. These are indications from a package owner to guide users away from a particular package, channel, or bundle: - - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + - BundleDeprecated is True if the installed bundle is marked deprecated, False if not deprecated, or Unknown if no bundle is installed yet or if catalog data is unavailable. + - ChannelDeprecated is True if any requested channel is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - PackageDeprecated is True if the requested package is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable. + - Deprecated is a rollup condition that is True when any deprecation exists, False when none exist, or Unknown when catalog data is unavailable. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: diff --git a/requirements.txt b/requirements.txt index 140b20bb4..19dcbbfd5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,7 +18,7 @@ mkdocs-material==9.7.1 mkdocs-material-extensions==1.3.1 packaging==25.0 paginate==0.5.7 -pathspec==1.0.1 +pathspec==1.0.3 platformdirs==4.5.1 Pygments==2.19.2 pymdown-extensions==10.20 @@ -27,7 +27,7 @@ python-dateutil==2.9.0.post0 PyYAML==6.0.3 pyyaml_env_tag==1.1 readtime==3.0.0 -regex==2025.11.3 +regex==2026.1.15 requests==2.32.5 six==1.17.0 soupsieve==2.8.1 diff --git a/test/e2e/features/uninstall.feature b/test/e2e/features/uninstall.feature new file mode 100644 index 000000000..e14b8494f --- /dev/null +++ b/test/e2e/features/uninstall.feature @@ -0,0 +1,42 @@ +Feature: Uninstall ClusterExtension + + As an OLM user I would like to uninstall a cluster extension, + removing all resources previously installed/updated through the extension. + + Background: + Given OLM is available + And ClusterCatalog "test" serves bundles + And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And bundle "test-operator.1.2.0" is installed in version "1.2.0" + And ClusterExtension is rolled out + And ClusterExtension resources are created and labeled + + Scenario: Removing ClusterExtension triggers the extension uninstall, eventually removing all installed resources + When ClusterExtension is removed + Then the ClusterExtension's constituent resources are removed + + Scenario: Removing ClusterExtension resources leads to all installed resources being removed even if the service account is no longer present + When resource "serviceaccount/olm-sa" is removed + # Ensure service account is gone before checking to ensure resources are cleaned up whether the service account + # and its permissions are present on the cluster or not + And resource "serviceaccount/olm-sa" is eventually not found + And ClusterExtension is removed + Then the ClusterExtension's constituent resources are removed diff --git a/test/e2e/steps/hooks.go b/test/e2e/steps/hooks.go index c91072947..7bea8d230 100644 --- a/test/e2e/steps/hooks.go +++ b/test/e2e/steps/hooks.go @@ -16,6 +16,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/component-base/featuregate" "k8s.io/klog/v2/textlogger" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/operator-framework/operator-controller/internal/operator-controller/features" ) @@ -32,6 +33,26 @@ type scenarioContext struct { removedResources []unstructured.Unstructured backGroundCmds []*exec.Cmd metricsResponse map[string]string + + extensionObjects []client.Object +} + +// GatherClusterExtensionObjects collects all resources related to the ClusterExtension container in +// either their Helm release Secret or ClusterExtensionRevision depending on the applier being used +// and saves them into the context. +func (s *scenarioContext) GatherClusterExtensionObjects() error { + objs, err := listExtensionResources(s.clusterExtensionName) + if err != nil { + return fmt.Errorf("failed to load extension resources into context: %w", err) + } + s.extensionObjects = objs + return nil +} + +// GetClusterExtensionObjects returns the ClusterExtension objects currently saved into the context. +// Will always return nil until GatherClusterExtensionObjects is called +func (s *scenarioContext) GetClusterExtensionObjects() []client.Object { + return s.extensionObjects } type contextKey string diff --git a/test/e2e/steps/steps.go b/test/e2e/steps/steps.go index a0a371485..9bcffa159 100644 --- a/test/e2e/steps/steps.go +++ b/test/e2e/steps/steps.go @@ -2,6 +2,7 @@ package steps import ( "bytes" + "compress/gzip" "context" "crypto/tls" "encoding/json" @@ -24,15 +25,19 @@ import ( "github.com/prometheus/common/model" "github.com/spf13/pflag" "github.com/stretchr/testify/require" + "helm.sh/helm/v3/pkg/release" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" + k8sresource "k8s.io/cli-runtime/pkg/resource" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" ocv1 "github.com/operator-framework/operator-controller/api/v1" + "github.com/operator-framework/operator-controller/internal/operator-controller/features" ) const ( @@ -56,7 +61,10 @@ func RegisterSteps(sc *godog.ScenarioContext) { sc.Step(`^(?i)ClusterExtension is updated(?:\s+.*)?$`, ResourceIsApplied) sc.Step(`^(?i)ClusterExtension is available$`, ClusterExtensionIsAvailable) sc.Step(`^(?i)ClusterExtension is rolled out$`, ClusterExtensionIsRolledOut) + sc.Step(`^(?i)ClusterExtension resources are created and labeled$`, ClusterExtensionResourcesCreatedAndAreLabeled) + sc.Step(`^(?i)ClusterExtension is removed$`, ClusterExtensionIsRemoved) sc.Step(`^(?i)ClusterExtension (?:latest generation )?has (?:been )?reconciled(?: the latest generation)?$`, ClusterExtensionReconciledLatestGeneration) + sc.Step(`^(?i)the ClusterExtension's constituent resources are removed$`, ClusterExtensionResourcesRemoved) sc.Step(`^(?i)ClusterExtension reports "([^"]+)" as active revision(s?)$`, ClusterExtensionReportsActiveRevisions) sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+) and Message:$`, ClusterExtensionReportsCondition) sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+) and Message includes:$`, ClusterExtensionReportsConditionWithMessageFragment) @@ -69,6 +77,7 @@ func RegisterSteps(sc *godog.ScenarioContext) { sc.Step(`^(?i)resource "([^"]+)" is installed$`, ResourceAvailable) sc.Step(`^(?i)resource "([^"]+)" is available$`, ResourceAvailable) sc.Step(`^(?i)resource "([^"]+)" is removed$`, ResourceRemoved) + sc.Step(`^(?i)resource "([^"]+)" is eventually not found$`, ResourceEventuallyNotFound) sc.Step(`^(?i)resource "([^"]+)" exists$`, ResourceAvailable) sc.Step(`^(?i)resource is applied$`, ResourceIsApplied) sc.Step(`^(?i)resource "deployment/test-operator" reports as (not ready|ready)$`, MarkTestOperatorNotReady) @@ -279,8 +288,68 @@ func ClusterExtensionIsRolledOut(ctx context.Context) error { if err := json.Unmarshal([]byte(v), &condition); err != nil { return false } + return condition["status"] == "True" && condition["reason"] == "Succeeded" && condition["type"] == "Progressing" }, timeout, tick) + + // Save ClusterExtension resources to test context for posterior checks + if err := sc.GatherClusterExtensionObjects(); err != nil { + return err + } + return nil +} + +func ClusterExtensionResourcesCreatedAndAreLabeled(ctx context.Context) error { + sc := scenarioCtx(ctx) + if len(sc.GetClusterExtensionObjects()) == 0 { + return fmt.Errorf("extension objects not found in context") + } + + for _, obj := range sc.extensionObjects { + waitFor(ctx, func() bool { + kind := obj.GetObjectKind().GroupVersionKind().Kind + clusterObj, err := getResource(kind, obj.GetName(), obj.GetNamespace()) + if err != nil { + logger.V(1).Error(err, "error getting resource", "name", obj.GetName(), "namespace", obj.GetNamespace(), "kind", kind) + return false + } + + labels := clusterObj.GetLabels() + if labels == nil { + logger.V(1).Info("no labels found for resource", "name", obj.GetName(), "namespace", obj.GetNamespace(), "kind", kind) + return false + } + + for key, expectedValue := range map[string]string{ + "olm.operatorframework.io/owner-kind": "ClusterExtension", + "olm.operatorframework.io/owner-name": sc.clusterExtensionName, + } { + if labels[key] != expectedValue { + logger.V(1).Info("invalid resource label value", "name", obj.GetName(), "namespace", obj.GetNamespace(), "kind", kind, "label", key, "expected", expectedValue, "actual", labels["olm.operatorframework.io/owner-kind"]) + return false + } + } + return true + }) + } + return nil +} + +func ClusterExtensionIsRemoved(ctx context.Context) error { + sc := scenarioCtx(ctx) + return ResourceRemoved(ctx, fmt.Sprintf("clusterextension/%s", sc.clusterExtensionName)) +} + +func ClusterExtensionResourcesRemoved(ctx context.Context) error { + sc := scenarioCtx(ctx) + if len(sc.GetClusterExtensionObjects()) == 0 { + return fmt.Errorf("extension objects not found in context") + } + for _, obj := range sc.extensionObjects { + if err := ResourceEventuallyNotFound(ctx, fmt.Sprintf("%s/%s", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName())); err != nil { + return err + } + } return nil } @@ -419,12 +488,12 @@ func ClusterExtensionRevisionIsArchived(ctx context.Context, revisionName string func ResourceAvailable(ctx context.Context, resource string) error { sc := scenarioCtx(ctx) resource = substituteScenarioVars(resource, sc) - rtype, name, found := strings.Cut(resource, "/") + kind, name, found := strings.Cut(resource, "/") if !found { - return fmt.Errorf("resource %s is not in the format /", resource) + return fmt.Errorf("resource %s is not in the format /", resource) } waitFor(ctx, func() bool { - _, err := k8sClient("get", rtype, name, "-n", sc.namespace) + _, err := k8sClient("get", kind, name, "-n", sc.namespace) return err == nil }) return nil @@ -432,11 +501,12 @@ func ResourceAvailable(ctx context.Context, resource string) error { func ResourceRemoved(ctx context.Context, resource string) error { sc := scenarioCtx(ctx) - rtype, name, found := strings.Cut(resource, "/") + resource = substituteScenarioVars(resource, sc) + kind, name, found := strings.Cut(resource, "/") if !found { - return fmt.Errorf("resource %s is not in the format /", resource) + return fmt.Errorf("resource %s is not in the format /", resource) } - yaml, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "yaml") + yaml, err := k8sClient("get", kind, name, "-n", sc.namespace, "-o", "yaml") if err != nil { return err } @@ -445,23 +515,38 @@ func ResourceRemoved(ctx context.Context, resource string) error { return err } sc.removedResources = append(sc.removedResources, *obj) - _, err = k8sClient("delete", rtype, name, "-n", sc.namespace) + _, err = k8sClient("delete", kind, name, "-n", sc.namespace) return err } +func ResourceEventuallyNotFound(ctx context.Context, resource string) error { + sc := scenarioCtx(ctx) + resource = substituteScenarioVars(resource, sc) + kind, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + + waitFor(ctx, func() bool { + obj, err := k8sClient("get", kind, name, "-n", sc.namespace, "--ignore-not-found", "-o", "yaml") + return err == nil && strings.TrimSpace(obj) == "" + }) + return nil +} + func ResourceMatches(ctx context.Context, resource string, requiredContentTemplate *godog.DocString) error { sc := scenarioCtx(ctx) resource = substituteScenarioVars(resource, sc) - rtype, name, found := strings.Cut(resource, "/") + kind, name, found := strings.Cut(resource, "/") if !found { - return fmt.Errorf("resource %s is not in the format /", resource) + return fmt.Errorf("resource %s is not in the format /", resource) } requiredContent, err := toUnstructured(substituteScenarioVars(requiredContentTemplate.Content, sc)) if err != nil { return fmt.Errorf("failed to parse required resource yaml: %v", err) } waitFor(ctx, func() bool { - objJson, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "json") + objJson, err := k8sClient("get", kind, name, "-n", sc.namespace, "-o", "json") if err != nil { return false } @@ -489,12 +574,13 @@ func ResourceMatches(ctx context.Context, resource string, requiredContentTempla func ResourceRestored(ctx context.Context, resource string) error { sc := scenarioCtx(ctx) - rtype, name, found := strings.Cut(resource, "/") + resource = substituteScenarioVars(resource, sc) + kind, name, found := strings.Cut(resource, "/") if !found { - return fmt.Errorf("resource %s is not in the format /", resource) + return fmt.Errorf("resource %s is not in the format /", resource) } waitFor(ctx, func() bool { - yaml, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "yaml") + yaml, err := k8sClient("get", kind, name, "-n", sc.namespace, "-o", "yaml") if err != nil { return false } @@ -507,7 +593,7 @@ func ResourceRestored(ctx context.Context, resource string) error { for i, removed := range sc.removedResources { rct := removed.GetCreationTimestamp() if removed.GetName() == obj.GetName() && removed.GetKind() == obj.GetKind() && rct.Before(&ct) { - switch rtype { + switch kind { case "configmap": if !reflect.DeepEqual(removed.Object["data"], obj.Object["data"]) { return false @@ -883,3 +969,161 @@ func extendMap(m map[string]string, keyValue ...string) map[string]string { } return m } + +func getResource(kind string, name string, namespace string) (*unstructured.Unstructured, error) { + out, err := k8sClient("get", kind, name, "-n", namespace, "-o", "yaml") + if err != nil { + return nil, err + } + obj, err := toUnstructured(out) + if err != nil { + return nil, err + } + return obj, nil +} + +// listExtensionResources returns a slice of client.Object containing all resources for a ClusterExtension +// this method is best called when the extension has been installed successfully. An error is returned if there was +// any issue in determining the extension's resources. +func listExtensionResources(extName string) ([]client.Object, error) { + if enabled, found := featureGates[features.BoxcutterRuntime]; found && enabled { + return listExtensionRevisionResources(extName) + } + return listHelmReleaseResources(extName) +} + +// listHelmReleaseResources returns a slice of client.Object containing all resources for a ClusterExtension's +// Helm release. Note: The current implementation does not support release secrets chunked across multiple secrets +func listHelmReleaseResources(extName string) ([]client.Object, error) { + secret, err := helmReleaseSecretForExtension(extName) + if err != nil { + return nil, fmt.Errorf("failed to get helm release secret for extension %s: %w", extName, err) + } + + rel, err := helmReleaseFromSecret(secret) + if err != nil { + return nil, fmt.Errorf("failed to get helm release from secret for cluster extension '%s': %w", extName, err) + } + + objs, err := collectHelmReleaseObjects(rel) + if err != nil { + return nil, fmt.Errorf("failed to collect helm release objects for cluster extension '%s': %w", extName, err) + } + return objs, nil +} + +// helmReleaseSecretForExtension returns the Helm release secret for the extension with name extName +func helmReleaseSecretForExtension(extName string) (*corev1.Secret, error) { + out, err := k8sClient("get", "secrets", "-n", olmNamespace, + "-l", fmt.Sprintf("name=%s,status=deployed", extName), + "--field-selector", "type=operatorframework.io/index.v1", "-o", "json") + if err != nil { + return nil, err + } + if strings.TrimSpace(out) == "" { + return nil, err + } + + var secretList corev1.SecretList + if err = json.Unmarshal([]byte(out), &secretList); err != nil { + return nil, err + } + if len(secretList.Items) != 1 { + return nil, err + } + return &secretList.Items[0], nil +} + +// helmReleaseFromSecret returns the Helm Release object encoded in the secret. Note: this function does not yet support +// releases chunked over multiple Secrets +func helmReleaseFromSecret(secret *corev1.Secret) (*release.Release, error) { + // OLM uses a custom release backend that compresses the release data + gzReader, err := gzip.NewReader(strings.NewReader(string(secret.Data["chunk"]))) + if err != nil { + return nil, err + } + defer gzReader.Close() + + releaseJsonBytes, err := io.ReadAll(gzReader) + if err != nil { + return nil, err + } + + var rel release.Release + if err = json.Unmarshal(releaseJsonBytes, &rel); err != nil { + return nil, err + } + return &rel, nil +} + +// collectHelmReleaseObjects returns a slice of client.Object containing the manifests in rel +func collectHelmReleaseObjects(rel *release.Release) ([]client.Object, error) { + result := k8sresource.NewLocalBuilder().Flatten().Unstructured().Stream(strings.NewReader(rel.Manifest), rel.Name).Do() + if err := result.Err(); err != nil { + return nil, err + } + infos, err := result.Infos() + if err != nil { + return nil, err + } + + objs := make([]client.Object, 0, len(infos)) + for _, info := range infos { + clientObject, ok := info.Object.(client.Object) + if !ok { + return nil, fmt.Errorf("object of type %T does not implement client.Object", info.Object) + } + objs = append(objs, clientObject) + } + return objs, nil +} + +// listExtensionRevisionResources lists objects in the phases of the latest active revision +func listExtensionRevisionResources(extName string) ([]client.Object, error) { + rev, err := latestActiveRevisionForExtension(extName) + if err != nil { + return nil, fmt.Errorf("failed to get latest active revision for extension %s: %w", extName, err) + } + + var objs []client.Object + for i := range rev.Spec.Phases { + phase := &rev.Spec.Phases[i] + for j := range phase.Objects { + objs = append(objs, &phase.Objects[j].Object) + } + } + + return objs, nil +} + +// latestActiveRevisionForExtension returns the latest active revision for the extension called extName +func latestActiveRevisionForExtension(extName string) (*ocv1.ClusterExtensionRevision, error) { + out, err := k8sClient("get", "clusterextensionrevisions", "-l", fmt.Sprintf("olm.operatorframework.io/owner-name=%s", extName), "-o", "json") + if err != nil { + return nil, fmt.Errorf("error listing revisions for extension '%s': %w", extName, err) + } + if strings.TrimSpace(out) == "" { + return nil, fmt.Errorf("no revisions found for extension '%s'", extName) + } + var revisionList ocv1.ClusterExtensionRevisionList + if err := json.Unmarshal([]byte(out), &revisionList); err != nil { + return nil, fmt.Errorf("error unmarshalling revisions for extension '%s': %w", extName, err) + } + + var latest *ocv1.ClusterExtensionRevision + for i := range revisionList.Items { + rev := &revisionList.Items[i] + if rev.Spec.LifecycleState != ocv1.ClusterExtensionRevisionLifecycleStateActive { + continue + } + if latest == nil || rev.Spec.Revision > latest.Spec.Revision { + latest = rev + } + } + + if latest == nil { + return nil, fmt.Errorf("no active revisions found for extension '%s'", extName) + } + + return latest, nil +} diff --git a/vendor/github.com/google/renameio/v2/option.go b/vendor/github.com/google/renameio/v2/option.go index a86906f4c..39bfe0b85 100644 --- a/vendor/github.com/google/renameio/v2/option.go +++ b/vendor/github.com/google/renameio/v2/option.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !windows -// +build !windows package renameio @@ -86,3 +85,14 @@ func WithReplaceOnClose() Option { c.renameOnClose = true }) } + +// WithRoot specifies a root directory to use when working with files. +// See [os.Root] and https://go.dev/blog/osroot for more details. +// +// When WithRoot is used, WithTempDir (and the $TMPDIR environment variable) are +// ignored, as temporary files must be created in the specified root directory. +func WithRoot(root *os.Root) Option { + return optionFunc(func(c *config) { + c.root = root + }) +} diff --git a/vendor/github.com/google/renameio/v2/tempfile.go b/vendor/github.com/google/renameio/v2/tempfile.go index 98114e539..e101e4e70 100644 --- a/vendor/github.com/google/renameio/v2/tempfile.go +++ b/vendor/github.com/google/renameio/v2/tempfile.go @@ -13,13 +13,11 @@ // limitations under the License. //go:build !windows -// +build !windows package renameio import ( - "io/ioutil" - "math/rand" + "math/rand/v2" "os" "path/filepath" "strconv" @@ -29,10 +27,10 @@ import ( const defaultPerm os.FileMode = 0o600 // nextrandom is a function generating a random number. -var nextrandom = rand.Int63 +var nextrandom = rand.Int64 // openTempFile creates a randomly named file and returns an open handle. It is -// similar to ioutil.TempFile except that the directory must be given, the file +// similar to os.CreateTemp except that the directory must be given, the file // permissions can be controlled and patterns in the name are not supported. // The name is always suffixed with a random number. func openTempFile(dir, name string, perm os.FileMode) (*os.File, error) { @@ -58,6 +56,33 @@ func openTempFile(dir, name string, perm os.FileMode) (*os.File, error) { } } +// openTempFileRoot creates a randomly named file in root and returns an open +// handle. It is similar to os.CreateTemp except that the directory must be +// given, the file permissions can be controlled and patterns in the name are +// not supported. The name is always suffixed with a random number. +func openTempFileRoot(root *os.Root, name string, perm os.FileMode) (string, *os.File, error) { + prefix := name + + for attempt := 0; ; { + // Generate a reasonably random name which is unlikely to already + // exist. O_EXCL ensures that existing files generate an error. + name := prefix + strconv.FormatInt(nextrandom(), 10) + + f, err := root.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm) + if !os.IsExist(err) { + return name, f, err + } + + if attempt++; attempt > 10000 { + return "", nil, &os.PathError{ + Op: "tempfile", + Path: name, + Err: os.ErrExist, + } + } + } +} + // TempDir checks whether os.TempDir() can be used as a temporary directory for // later atomically replacing files within dest. If no (os.TempDir() resides on // a different mount point), dest is returned. @@ -83,7 +108,7 @@ func tempDir(dir, dest string) string { // the TMPDIR environment variable. tmpdir := os.TempDir() - testsrc, err := ioutil.TempFile(tmpdir, "."+filepath.Base(dest)) + testsrc, err := os.CreateTemp(tmpdir, "."+filepath.Base(dest)) if err != nil { return fallback } @@ -95,7 +120,7 @@ func tempDir(dir, dest string) string { }() testsrc.Close() - testdest, err := ioutil.TempFile(filepath.Dir(dest), "."+filepath.Base(dest)) + testdest, err := os.CreateTemp(filepath.Dir(dest), "."+filepath.Base(dest)) if err != nil { return fallback } @@ -118,6 +143,8 @@ type PendingFile struct { done bool closed bool replaceOnClose bool + root *os.Root + tmpname string } // Cleanup is a no-op if CloseAtomicallyReplace succeeded, and otherwise closes @@ -134,8 +161,14 @@ func (t *PendingFile) Cleanup() error { if !t.closed { closeErr = t.File.Close() } - if err := os.Remove(t.Name()); err != nil { - return err + if t.root != nil { + if err := t.root.Remove(t.tmpname); err != nil { + return err + } + } else { + if err := os.Remove(t.Name()); err != nil { + return err + } } t.done = true return closeErr @@ -163,8 +196,14 @@ func (t *PendingFile) CloseAtomicallyReplace() error { if err := t.File.Close(); err != nil { return err } - if err := os.Rename(t.Name(), t.path); err != nil { - return err + if t.root != nil { + if err := t.root.Rename(t.tmpname, t.path); err != nil { + return err + } + } else { + if err := os.Rename(t.Name(), t.path); err != nil { + return err + } } t.done = true return nil @@ -200,6 +239,7 @@ type config struct { ignoreUmask bool chmod *os.FileMode renameOnClose bool + root *os.Root } // NewPendingFile creates a temporary file destined to atomically creating or @@ -227,8 +267,15 @@ func NewPendingFile(path string, opts ...Option) (*PendingFile, error) { } if cfg.attemptPermCopy { + var existing os.FileInfo + var err error + if cfg.root != nil { + existing, err = cfg.root.Lstat(cfg.path) + } else { + existing, err = os.Lstat(cfg.path) + } // Try to determine permissions from an existing file. - if existing, err := os.Lstat(cfg.path); err == nil && existing.Mode().IsRegular() { + if err == nil && existing.Mode().IsRegular() { perm := existing.Mode() & os.ModePerm cfg.chmod = &perm @@ -240,7 +287,14 @@ func NewPendingFile(path string, opts ...Option) (*PendingFile, error) { } } - f, err := openTempFile(tempDir(cfg.dir, cfg.path), "."+filepath.Base(cfg.path), cfg.createPerm) + var f *os.File + var err error + var tmpname string + if cfg.root != nil { + tmpname, f, err = openTempFileRoot(cfg.root, "."+filepath.Base(cfg.path), cfg.createPerm) + } else { + f, err = openTempFile(tempDir(cfg.dir, cfg.path), "."+filepath.Base(cfg.path), cfg.createPerm) + } if err != nil { return nil, err } @@ -255,7 +309,13 @@ func NewPendingFile(path string, opts ...Option) (*PendingFile, error) { } } - return &PendingFile{File: f, path: cfg.path, replaceOnClose: cfg.renameOnClose}, nil + return &PendingFile{ + File: f, + path: cfg.path, + replaceOnClose: cfg.renameOnClose, + root: cfg.root, + tmpname: tmpname, + }, nil } // Symlink wraps os.Symlink, replacing an existing symlink with the same name @@ -267,9 +327,9 @@ func Symlink(oldname, newname string) error { return err } - // We need to use ioutil.TempDir, as we cannot overwrite a ioutil.TempFile, + // We need to use os.MkdirTemp, as we cannot overwrite a os.CreateTemp file, // and removing+symlinking creates a TOCTOU race. - d, err := ioutil.TempDir(filepath.Dir(newname), "."+filepath.Base(newname)) + d, err := os.MkdirTemp(filepath.Dir(newname), "."+filepath.Base(newname)) if err != nil { return err } @@ -292,3 +352,41 @@ func Symlink(oldname, newname string) error { cleanup = false return os.RemoveAll(d) } + +// SymlinkRoot wraps os.Symlink, replacing an existing symlink with the same +// name atomically (os.Symlink fails when newname already exists, at least on +// Linux). +func SymlinkRoot(root *os.Root, oldname, newname string) error { + // Fast path: if newname does not exist yet, we can skip the whole dance + // below. + if err := root.Symlink(oldname, newname); err == nil || !os.IsExist(err) { + return err + } + + // We need to use os.MkdirTemp, as we cannot overwrite a os.CreateTemp file, + // and removing+symlinking creates a TOCTOU race. + // + // There is no os.Root-compatible os.MkdirTemp, so we use the path directly. + d, err := os.MkdirTemp(root.Name(), "."+filepath.Base(newname)) + if err != nil { + return err + } + cleanup := true + defer func() { + if cleanup { + os.RemoveAll(d) + } + }() + + symlink := filepath.Join(filepath.Base(d), "tmp.symlink") + if err := root.Symlink(oldname, symlink); err != nil { + return err + } + + if err := root.Rename(symlink, newname); err != nil { + return err + } + + cleanup = false + return os.RemoveAll(d) +} diff --git a/vendor/github.com/google/renameio/v2/writefile.go b/vendor/github.com/google/renameio/v2/writefile.go index 545042102..097817f0e 100644 --- a/vendor/github.com/google/renameio/v2/writefile.go +++ b/vendor/github.com/google/renameio/v2/writefile.go @@ -13,13 +13,12 @@ // limitations under the License. //go:build !windows -// +build !windows package renameio import "os" -// WriteFile mirrors ioutil.WriteFile, replacing an existing file with the same +// WriteFile mirrors os.WriteFile, replacing an existing file with the same // name atomically. func WriteFile(filename string, data []byte, perm os.FileMode, opts ...Option) error { opts = append([]Option{ diff --git a/vendor/github.com/rubenv/sql-migrate/migrate.go b/vendor/github.com/rubenv/sql-migrate/migrate.go index c9cb4a48b..d08e22b42 100644 --- a/vendor/github.com/rubenv/sql-migrate/migrate.go +++ b/vendor/github.com/rubenv/sql-migrate/migrate.go @@ -640,7 +640,7 @@ func (ms MigrationSet) planMigrationCommon(db *sql.DB, dialect string, m Migrati } // Sort migrations that have been run by Id. - var existingMigrations []*Migration + existingMigrations := make([]*Migration, 0, len(migrationRecords)) for _, migrationRecord := range migrationRecords { existingMigrations = append(existingMigrations, &Migration{ Id: migrationRecord.Id, diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go index cb4cadc32..dfbfc1eb3 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go @@ -37,6 +37,15 @@ type priorityWriteSchedulerRFC9218 struct { // incremental streams or not, when urgency is the same in a given Pop() // call. prioritizeIncremental bool + + // priorityUpdateBuf is used to buffer the most recent PRIORITY_UPDATE we + // receive per https://www.rfc-editor.org/rfc/rfc9218.html#name-the-priority_update-frame. + priorityUpdateBuf struct { + // streamID being 0 means that the buffer is empty. This is a safe + // assumption as PRIORITY_UPDATE for stream 0 is a PROTOCOL_ERROR. + streamID uint32 + priority PriorityParam + } } func newPriorityWriteSchedulerRFC9218() WriteScheduler { @@ -50,6 +59,10 @@ func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStr if ws.streams[streamID].location != nil { panic(fmt.Errorf("stream %d already opened", streamID)) } + if streamID == ws.priorityUpdateBuf.streamID { + ws.priorityUpdateBuf.streamID = 0 + opt.priority = ws.priorityUpdateBuf.priority + } q := ws.queuePool.get() ws.streams[streamID] = streamMetadata{ location: q, @@ -95,6 +108,8 @@ func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority metadata := ws.streams[streamID] q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental if q == nil { + ws.priorityUpdateBuf.streamID = streamID + ws.priorityUpdateBuf.priority = priority return } diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go index dda743466..c7e76cd91 100644 --- a/vendor/golang.org/x/net/websocket/hybi.go +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -440,6 +440,7 @@ func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (er if err != nil { return err } + defer resp.Body.Close() if resp.StatusCode != 101 { return ErrBadStatus } diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 1e642f330..f5723d4f7 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -64,6 +64,80 @@ func initOptions() { func archInit() { + // From internal/cpu + const ( + // eax bits + cpuid_AVXVNNI = 1 << 4 + + // ecx bits + cpuid_SSE3 = 1 << 0 + cpuid_PCLMULQDQ = 1 << 1 + cpuid_AVX512VBMI = 1 << 1 + cpuid_AVX512VBMI2 = 1 << 6 + cpuid_SSSE3 = 1 << 9 + cpuid_AVX512GFNI = 1 << 8 + cpuid_AVX512VAES = 1 << 9 + cpuid_AVX512VNNI = 1 << 11 + cpuid_AVX512BITALG = 1 << 12 + cpuid_FMA = 1 << 12 + cpuid_AVX512VPOPCNTDQ = 1 << 14 + cpuid_SSE41 = 1 << 19 + cpuid_SSE42 = 1 << 20 + cpuid_POPCNT = 1 << 23 + cpuid_AES = 1 << 25 + cpuid_OSXSAVE = 1 << 27 + cpuid_AVX = 1 << 28 + + // "Extended Feature Flag" bits returned in EBX for CPUID EAX=0x7 ECX=0x0 + cpuid_BMI1 = 1 << 3 + cpuid_AVX2 = 1 << 5 + cpuid_BMI2 = 1 << 8 + cpuid_ERMS = 1 << 9 + cpuid_AVX512F = 1 << 16 + cpuid_AVX512DQ = 1 << 17 + cpuid_ADX = 1 << 19 + cpuid_AVX512CD = 1 << 28 + cpuid_SHA = 1 << 29 + cpuid_AVX512BW = 1 << 30 + cpuid_AVX512VL = 1 << 31 + + // "Extended Feature Flag" bits returned in ECX for CPUID EAX=0x7 ECX=0x0 + cpuid_AVX512_VBMI = 1 << 1 + cpuid_AVX512_VBMI2 = 1 << 6 + cpuid_GFNI = 1 << 8 + cpuid_AVX512VPCLMULQDQ = 1 << 10 + cpuid_AVX512_BITALG = 1 << 12 + + // edx bits + cpuid_FSRM = 1 << 4 + // edx bits for CPUID 0x80000001 + cpuid_RDTSCP = 1 << 27 + ) + // Additional constants not in internal/cpu + const ( + // eax=1: edx + cpuid_SSE2 = 1 << 26 + // eax=1: ecx + cpuid_CX16 = 1 << 13 + cpuid_RDRAND = 1 << 30 + // eax=7,ecx=0: ebx + cpuid_RDSEED = 1 << 18 + cpuid_AVX512IFMA = 1 << 21 + cpuid_AVX512PF = 1 << 26 + cpuid_AVX512ER = 1 << 27 + // eax=7,ecx=0: edx + cpuid_AVX5124VNNIW = 1 << 2 + cpuid_AVX5124FMAPS = 1 << 3 + cpuid_AMXBF16 = 1 << 22 + cpuid_AMXTile = 1 << 24 + cpuid_AMXInt8 = 1 << 25 + // eax=7,ecx=1: eax + cpuid_AVX512BF16 = 1 << 5 + cpuid_AVXIFMA = 1 << 23 + // eax=7,ecx=1: edx + cpuid_AVXVNNIInt8 = 1 << 4 + ) + Initialized = true maxID, _, _, _ := cpuid(0, 0) @@ -73,90 +147,90 @@ func archInit() { } _, _, ecx1, edx1 := cpuid(1, 0) - X86.HasSSE2 = isSet(26, edx1) - - X86.HasSSE3 = isSet(0, ecx1) - X86.HasPCLMULQDQ = isSet(1, ecx1) - X86.HasSSSE3 = isSet(9, ecx1) - X86.HasFMA = isSet(12, ecx1) - X86.HasCX16 = isSet(13, ecx1) - X86.HasSSE41 = isSet(19, ecx1) - X86.HasSSE42 = isSet(20, ecx1) - X86.HasPOPCNT = isSet(23, ecx1) - X86.HasAES = isSet(25, ecx1) - X86.HasOSXSAVE = isSet(27, ecx1) - X86.HasRDRAND = isSet(30, ecx1) + X86.HasSSE2 = isSet(edx1, cpuid_SSE2) + + X86.HasSSE3 = isSet(ecx1, cpuid_SSE3) + X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ) + X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3) + X86.HasFMA = isSet(ecx1, cpuid_FMA) + X86.HasCX16 = isSet(ecx1, cpuid_CX16) + X86.HasSSE41 = isSet(ecx1, cpuid_SSE41) + X86.HasSSE42 = isSet(ecx1, cpuid_SSE42) + X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT) + X86.HasAES = isSet(ecx1, cpuid_AES) + X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE) + X86.HasRDRAND = isSet(ecx1, cpuid_RDRAND) var osSupportsAVX, osSupportsAVX512 bool // For XGETBV, OSXSAVE bit is required and sufficient. if X86.HasOSXSAVE { eax, _ := xgetbv() // Check if XMM and YMM registers have OS support. - osSupportsAVX = isSet(1, eax) && isSet(2, eax) + osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2) if runtime.GOOS == "darwin" { // Darwin requires special AVX512 checks, see cpu_darwin_x86.go osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() } else { // Check if OPMASK and ZMM registers have OS support. - osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + osSupportsAVX512 = osSupportsAVX && isSet(eax, 1<<5) && isSet(eax, 1<<6) && isSet(eax, 1<<7) } } - X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX if maxID < 7 { return } eax7, ebx7, ecx7, edx7 := cpuid(7, 0) - X86.HasBMI1 = isSet(3, ebx7) - X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX - X86.HasBMI2 = isSet(8, ebx7) - X86.HasERMS = isSet(9, ebx7) - X86.HasRDSEED = isSet(18, ebx7) - X86.HasADX = isSet(19, ebx7) - - X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + X86.HasBMI1 = isSet(ebx7, cpuid_BMI1) + X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX + X86.HasBMI2 = isSet(ebx7, cpuid_BMI2) + X86.HasERMS = isSet(ebx7, cpuid_ERMS) + X86.HasRDSEED = isSet(ebx7, cpuid_RDSEED) + X86.HasADX = isSet(ebx7, cpuid_ADX) + + X86.HasAVX512 = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512 // Because avx-512 foundation is the core required extension if X86.HasAVX512 { X86.HasAVX512F = true - X86.HasAVX512CD = isSet(28, ebx7) - X86.HasAVX512ER = isSet(27, ebx7) - X86.HasAVX512PF = isSet(26, ebx7) - X86.HasAVX512VL = isSet(31, ebx7) - X86.HasAVX512BW = isSet(30, ebx7) - X86.HasAVX512DQ = isSet(17, ebx7) - X86.HasAVX512IFMA = isSet(21, ebx7) - X86.HasAVX512VBMI = isSet(1, ecx7) - X86.HasAVX5124VNNIW = isSet(2, edx7) - X86.HasAVX5124FMAPS = isSet(3, edx7) - X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) - X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) - X86.HasAVX512VNNI = isSet(11, ecx7) - X86.HasAVX512GFNI = isSet(8, ecx7) - X86.HasAVX512VAES = isSet(9, ecx7) - X86.HasAVX512VBMI2 = isSet(6, ecx7) - X86.HasAVX512BITALG = isSet(12, ecx7) + X86.HasAVX512CD = isSet(ebx7, cpuid_AVX512CD) + X86.HasAVX512ER = isSet(ebx7, cpuid_AVX512ER) + X86.HasAVX512PF = isSet(ebx7, cpuid_AVX512PF) + X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL) + X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW) + X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ) + X86.HasAVX512IFMA = isSet(ebx7, cpuid_AVX512IFMA) + X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512_VBMI) + X86.HasAVX5124VNNIW = isSet(edx7, cpuid_AVX5124VNNIW) + X86.HasAVX5124FMAPS = isSet(edx7, cpuid_AVX5124FMAPS) + X86.HasAVX512VPOPCNTDQ = isSet(ecx7, cpuid_AVX512VPOPCNTDQ) + X86.HasAVX512VPCLMULQDQ = isSet(ecx7, cpuid_AVX512VPCLMULQDQ) + X86.HasAVX512VNNI = isSet(ecx7, cpuid_AVX512VNNI) + X86.HasAVX512GFNI = isSet(ecx7, cpuid_AVX512GFNI) + X86.HasAVX512VAES = isSet(ecx7, cpuid_AVX512VAES) + X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512VBMI2) + X86.HasAVX512BITALG = isSet(ecx7, cpuid_AVX512BITALG) } - X86.HasAMXTile = isSet(24, edx7) - X86.HasAMXInt8 = isSet(25, edx7) - X86.HasAMXBF16 = isSet(22, edx7) + X86.HasAMXTile = isSet(edx7, cpuid_AMXTile) + X86.HasAMXInt8 = isSet(edx7, cpuid_AMXInt8) + X86.HasAMXBF16 = isSet(edx7, cpuid_AMXBF16) // These features depend on the second level of extended features. if eax7 >= 1 { eax71, _, _, edx71 := cpuid(7, 1) if X86.HasAVX512 { - X86.HasAVX512BF16 = isSet(5, eax71) + X86.HasAVX512BF16 = isSet(eax71, cpuid_AVX512BF16) } if X86.HasAVX { - X86.HasAVXIFMA = isSet(23, eax71) - X86.HasAVXVNNI = isSet(4, eax71) - X86.HasAVXVNNIInt8 = isSet(4, edx71) + X86.HasAVXIFMA = isSet(eax71, cpuid_AVXIFMA) + X86.HasAVXVNNI = isSet(eax71, cpuid_AVXVNNI) + X86.HasAVXVNNIInt8 = isSet(edx71, cpuid_AVXVNNIInt8) } } } -func isSet(bitpos uint, value uint32) bool { - return value&(1<= 4 && b[0] == keyEscape && b[1] == '[' && b[2] == '3' && b[3] == '~' { + return keyDelete, b[4:] + } + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { switch b[5] { case 'C': @@ -590,7 +598,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { } t.line = t.line[:t.pos] t.moveCursorToPos(t.pos) - case keyCtrlD: + case keyCtrlD, keyDelete: // Erase the character under the current position. // The EOF case when the line is empty is handled in // readLine(). @@ -600,6 +608,24 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { } case keyCtrlU: t.eraseNPreviousChars(t.pos) + case keyTranspose: + // This transposes the two characters around the cursor and advances the cursor. Best-effort. + if len(t.line) < 2 || t.pos < 1 { + return + } + swap := t.pos + if swap == len(t.line) { + swap-- // special: at end of line, swap previous two chars + } + t.line[swap-1], t.line[swap] = t.line[swap], t.line[swap-1] + if t.pos < len(t.line) { + t.pos++ + } + if t.echo { + t.moveCursorToPos(swap - 1) + t.writeLine(t.line[swap-1:]) + t.moveCursorToPos(t.pos) + } case keyClearScreen: // Erases the screen and moves the cursor to the home position. t.queue([]rune("\x1b[2J\x1b[H")) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go index fc9bbc714..60ad425f3 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go @@ -453,6 +453,9 @@ func (c Cursor) FindNode(n ast.Node) (Cursor, bool) { // rooted at c such that n.Pos() <= start && end <= n.End(). // (For an *ast.File, it uses the bounds n.FileStart-n.FileEnd.) // +// An empty range (start == end) between two adjacent nodes is +// considered to belong to the first node. +// // It returns zero if none is found. // Precondition: start <= end. // @@ -501,10 +504,17 @@ func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { break // disjoint, after; stop } } + // Inv: node.{Pos,FileStart} <= start if end <= nodeEnd { // node fully contains target range best = i + + // Don't search beyond end of the first match. + // This is important only for an empty range (start=end) + // between two adjoining nodes, which would otherwise + // match both nodes; we want to match only the first. + limit = ev.index } else if nodeEnd < start { i = ev.index // disjoint, before; skip forward } diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/old.go b/vendor/golang.org/x/tools/internal/diff/lcs/old.go index 5acc68e1d..d6265c8c7 100644 --- a/vendor/golang.org/x/tools/internal/diff/lcs/old.go +++ b/vendor/golang.org/x/tools/internal/diff/lcs/old.go @@ -23,9 +23,13 @@ func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) } // DiffRunes returns the differences between two rune sequences. func DiffRunes(a, b []rune) []Diff { return diff(runesSeqs{a, b}) } +// DiffLines returns the differences between two string sequences. +func DiffLines(a, b []string) []Diff { return diff(linesSeqs{a, b}) } + +// A limit on how deeply the LCS algorithm should search. The value is just a guess. +var maxDiffs = 100 + func diff(seqs sequences) []Diff { - // A limit on how deeply the LCS algorithm should search. The value is just a guess. - const maxDiffs = 100 diff, _ := compute(seqs, twosided, maxDiffs/2) return diff } diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go b/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go index 811bb216e..429e8c619 100644 --- a/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go +++ b/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go @@ -13,63 +13,44 @@ type sequences interface { commonSuffixLen(ai, aj, bi, bj int) int // len(commonSuffix(A[ai:aj], B[bi:bj])) } -type stringSeqs struct{ a, b string } - -func (s stringSeqs) lengths() (int, int) { return len(s.a), len(s.b) } -func (s stringSeqs) commonPrefixLen(ai, aj, bi, bj int) int { - return commonPrefixLenString(s.a[ai:aj], s.b[bi:bj]) -} -func (s stringSeqs) commonSuffixLen(ai, aj, bi, bj int) int { - return commonSuffixLenString(s.a[ai:aj], s.b[bi:bj]) -} - // The explicit capacity in s[i:j:j] leads to more efficient code. type bytesSeqs struct{ a, b []byte } func (s bytesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } func (s bytesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { - return commonPrefixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) + return commonPrefixLen(s.a[ai:aj:aj], s.b[bi:bj:bj]) } func (s bytesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { - return commonSuffixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) + return commonSuffixLen(s.a[ai:aj:aj], s.b[bi:bj:bj]) } type runesSeqs struct{ a, b []rune } func (s runesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } func (s runesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { - return commonPrefixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) + return commonPrefixLen(s.a[ai:aj:aj], s.b[bi:bj:bj]) } func (s runesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { - return commonSuffixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) + return commonSuffixLen(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +type linesSeqs struct{ a, b []string } + +func (s linesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s linesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLen(s.a[ai:aj], s.b[bi:bj]) +} +func (s linesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLen(s.a[ai:aj], s.b[bi:bj]) } // TODO(adonovan): optimize these functions using ideas from: // - https://go.dev/cl/408116 common.go // - https://go.dev/cl/421435 xor_generic.go -// TODO(adonovan): factor using generics when available, -// but measure performance impact. - -// commonPrefixLen* returns the length of the common prefix of a[ai:aj] and b[bi:bj]. -func commonPrefixLenBytes(a, b []byte) int { - n := min(len(a), len(b)) - i := 0 - for i < n && a[i] == b[i] { - i++ - } - return i -} -func commonPrefixLenRunes(a, b []rune) int { - n := min(len(a), len(b)) - i := 0 - for i < n && a[i] == b[i] { - i++ - } - return i -} -func commonPrefixLenString(a, b string) int { +// commonPrefixLen returns the length of the common prefix of a[ai:aj] and b[bi:bj]. +func commonPrefixLen[T comparable](a, b []T) int { n := min(len(a), len(b)) i := 0 for i < n && a[i] == b[i] { @@ -78,24 +59,8 @@ func commonPrefixLenString(a, b string) int { return i } -// commonSuffixLen* returns the length of the common suffix of a[ai:aj] and b[bi:bj]. -func commonSuffixLenBytes(a, b []byte) int { - n := min(len(a), len(b)) - i := 0 - for i < n && a[len(a)-1-i] == b[len(b)-1-i] { - i++ - } - return i -} -func commonSuffixLenRunes(a, b []rune) int { - n := min(len(a), len(b)) - i := 0 - for i < n && a[len(a)-1-i] == b[len(b)-1-i] { - i++ - } - return i -} -func commonSuffixLenString(a, b string) int { +// commonSuffixLen returns the length of the common suffix of a[ai:aj] and b[bi:bj]. +func commonSuffixLen[T comparable](a, b []T) int { n := min(len(a), len(b)) i := 0 for i < n && a[len(a)-1-i] == b[len(b)-1-i] { diff --git a/vendor/golang.org/x/tools/internal/diff/ndiff.go b/vendor/golang.org/x/tools/internal/diff/ndiff.go index a2eef26ac..448c8ce65 100644 --- a/vendor/golang.org/x/tools/internal/diff/ndiff.go +++ b/vendor/golang.org/x/tools/internal/diff/ndiff.go @@ -6,11 +6,30 @@ package diff import ( "bytes" + "strings" "unicode/utf8" "golang.org/x/tools/internal/diff/lcs" ) +// Lines computes differences between two strings. All edits are at line boundaries. +func Lines(before, after string) []Edit { + beforeLines, bOffsets := splitLines(before) + afterLines, _ := splitLines(after) + diffs := lcs.DiffLines(beforeLines, afterLines) + + // Convert from LCS diffs to Edits + res := make([]Edit, len(diffs)) + for i, d := range diffs { + res[i] = Edit{ + Start: bOffsets[d.Start], + End: bOffsets[d.End], + New: strings.Join(afterLines[d.ReplStart:d.ReplEnd], ""), + } + } + return res +} + // Strings computes the differences between two strings. // The resulting edits respect rune boundaries. func Strings(before, after string) []Edit { diff --git a/vendor/golang.org/x/tools/internal/diff/unified.go b/vendor/golang.org/x/tools/internal/diff/unified.go index cfbda6102..a6ebe9f95 100644 --- a/vendor/golang.org/x/tools/internal/diff/unified.go +++ b/vendor/golang.org/x/tools/internal/diff/unified.go @@ -7,6 +7,8 @@ package diff import ( "fmt" "log" + "regexp" + "strconv" "strings" ) @@ -114,7 +116,7 @@ func toUnified(fromName, toName string, content string, edits []Edit, contextLin if err != nil { return u, err } - lines := splitLines(content) + lines, _ := splitLines(content) var h *hunk last := 0 toLine := 0 @@ -156,7 +158,8 @@ func toUnified(fromName, toName string, content string, edits []Edit, contextLin last++ } if edit.New != "" { - for _, content := range splitLines(edit.New) { + v, _ := splitLines(edit.New) + for _, content := range v { h.lines = append(h.lines, line{kind: opInsert, content: content}) toLine++ } @@ -170,12 +173,24 @@ func toUnified(fromName, toName string, content string, edits []Edit, contextLin return u, nil } -func splitLines(text string) []string { - lines := strings.SplitAfter(text, "\n") - if lines[len(lines)-1] == "" { - lines = lines[:len(lines)-1] +// split into lines removing a final empty line, +// and also return the offsets of the line beginnings. +func splitLines(text string) ([]string, []int) { + var lines []string + offsets := []int{0} + start := 0 + for i, r := range text { + if r == '\n' { + lines = append(lines, text[start:i+1]) + start = i + 1 + offsets = append(offsets, start) + } + } + if start < len(text) { + lines = append(lines, text[start:]) + offsets = append(offsets, len(text)) } - return lines + return lines, offsets } func addEqualLines(h *hunk, lines []string, start, end int) int { @@ -249,3 +264,51 @@ func (u unified) String() string { } return b.String() } + +// ApplyUnified applies the unified diffs. +func ApplyUnified(udiffs, bef string) (string, error) { + before := strings.Split(bef, "\n") + unif := strings.Split(udiffs, "\n") + var got []string + left := 0 + // parse and apply the unified diffs + for _, l := range unif { + if len(l) == 0 { + continue // probably the last line (from Split) + } + switch l[0] { + case '@': // The @@ line + m := atregexp.FindStringSubmatch(l) + fromLine, err := strconv.Atoi(m[1]) + if err != nil { + return "", fmt.Errorf("missing line number in %q", l) + } + // before is a slice, so0-based; fromLine is 1-based + for ; left < fromLine-1; left++ { + got = append(got, before[left]) + } + case '+': // add this line + if strings.HasPrefix(l, "+++ ") { + continue + } + got = append(got, l[1:]) + case '-': // delete this line + if strings.HasPrefix(l, "--- ") { + continue + } + left++ + case ' ': + return "", fmt.Errorf("unexpected line %q", l) + default: + return "", fmt.Errorf("impossible unified %q", udiffs) + } + } + // copy any remaining lines + for ; left < len(before); left++ { + got = append(got, before[left]) + } + return strings.Join(got, "\n"), nil +} + +// The first number in the @@ lines is the line number in the 'before' data +var atregexp = regexp.MustCompile(`@@ -(\d+).* @@`) diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go index f7b9c1286..f41431c94 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/deps.go +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -12,364 +12,366 @@ type pkginfo struct { } var deps = [...]pkginfo{ - {"archive/tar", "\x03p\x03F=\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, - {"archive/zip", "\x02\x04f\a\x03\x13\x021=\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, - {"bufio", "\x03p\x86\x01D\x14"}, - {"bytes", "s+[\x03\fG\x02\x02"}, + {"archive/tar", "\x03q\x03F=\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\r"}, + {"archive/zip", "\x02\x04g\a\x03\x13\x021=\x01+\x05\x01\x0f\x03\x02\x0f\x04"}, + {"bufio", "\x03q\x86\x01D\x15"}, + {"bytes", "t+[\x03\fH\x02\x02"}, {"cmp", ""}, - {"compress/bzip2", "\x02\x02\xf5\x01A"}, - {"compress/flate", "\x02q\x03\x83\x01\f\x033\x01\x03"}, - {"compress/gzip", "\x02\x04f\a\x03\x15nT"}, - {"compress/lzw", "\x02q\x03\x83\x01"}, - {"compress/zlib", "\x02\x04f\a\x03\x13\x01o"}, - {"container/heap", "\xbb\x02"}, + {"compress/bzip2", "\x02\x02\xf6\x01A"}, + {"compress/flate", "\x02r\x03\x83\x01\f\x033\x01\x03"}, + {"compress/gzip", "\x02\x04g\a\x03\x15nU"}, + {"compress/lzw", "\x02r\x03\x83\x01"}, + {"compress/zlib", "\x02\x04g\a\x03\x13\x01o"}, + {"container/heap", "\xbc\x02"}, {"container/list", ""}, {"container/ring", ""}, - {"context", "s\\p\x01\r"}, - {"crypto", "\x89\x01pC"}, - {"crypto/aes", "\x10\n\t\x99\x02"}, - {"crypto/cipher", "\x03 \x01\x01 \x12\x1c,Z"}, - {"crypto/des", "\x10\x15 .,\x9d\x01\x03"}, - {"crypto/dsa", "E\x04*\x86\x01\r"}, - {"crypto/ecdh", "\x03\v\f\x10\x04\x17\x04\x0e\x1c\x86\x01"}, - {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x10\b\v\x06\x01\x04\r\x01\x1c\x86\x01\r\x05K\x01"}, - {"crypto/ed25519", "\x0e\x1e\x12\a\v\a\x1c\x86\x01C"}, - {"crypto/elliptic", "3@\x86\x01\r9"}, - {"crypto/fips140", "\"\x05"}, - {"crypto/hkdf", "/\x15\x01.\x16"}, - {"crypto/hmac", "\x1a\x16\x14\x01\x122"}, - {"crypto/internal/boring", "\x0e\x02\rl"}, - {"crypto/internal/boring/bbig", "\x1a\xec\x01M"}, - {"crypto/internal/boring/bcache", "\xc0\x02\x13"}, + {"context", "t\\p\x01\x0e"}, + {"crypto", "\x8a\x01pC"}, + {"crypto/aes", "\x10\v\t\x99\x02"}, + {"crypto/cipher", "\x03!\x01\x01 \x12\x1c,Z"}, + {"crypto/des", "\x10\x16 .,\x9d\x01\x03"}, + {"crypto/dsa", "F\x03+\x86\x01\r"}, + {"crypto/ecdh", "\x03\v\r\x10\x04\x17\x03\x0f\x1c\x86\x01"}, + {"crypto/ecdsa", "\x0e\x05\x03\x05\x01\x10\b\v\x06\x01\x03\x0e\x01\x1c\x86\x01\r\x05L\x01"}, + {"crypto/ed25519", "\x0e\x1f\x12\a\x03\b\a\x1cI=C"}, + {"crypto/elliptic", "4@\x86\x01\r9"}, + {"crypto/fips140", "#\x05\x95\x01\x98\x01"}, + {"crypto/hkdf", "0\x15\x01.\x16"}, + {"crypto/hmac", "\x1b\x16\x14\x01\x122"}, + {"crypto/hpke", "\x03\v\x02\x03\x04\x01\f\x01\x05\x1f\x05\a\x01\x01\x1d\x03\x13\x16\x9b\x01\x1c"}, + {"crypto/internal/boring", "\x0e\x02\x0el"}, + {"crypto/internal/boring/bbig", "\x1b\xec\x01N"}, + {"crypto/internal/boring/bcache", "\xc1\x02\x14"}, {"crypto/internal/boring/sig", ""}, {"crypto/internal/constanttime", ""}, - {"crypto/internal/cryptotest", "\x03\r\n\b&\x0f\x19\x06\x13\x12 \x04\x06\t\x19\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, - {"crypto/internal/entropy", "J"}, - {"crypto/internal/entropy/v1.0.0", "C0\x95\x018\x13"}, - {"crypto/internal/fips140", "B1\xbf\x01\v\x16"}, - {"crypto/internal/fips140/aes", "\x03\x1f\x03\x02\x14\x05\x01\x01\x06+\x95\x014"}, - {"crypto/internal/fips140/aes/gcm", "\"\x01\x02\x02\x02\x12\x05\x01\a+\x92\x01"}, - {"crypto/internal/fips140/alias", "\xd3\x02"}, - {"crypto/internal/fips140/bigmod", "'\x19\x01\a+\x95\x01"}, - {"crypto/internal/fips140/check", "\"\x0e\a\t\x02\xb7\x01Z"}, - {"crypto/internal/fips140/check/checktest", "'\x8b\x02!"}, - {"crypto/internal/fips140/drbg", "\x03\x1e\x01\x01\x04\x14\x05\t\x01)\x86\x01\x0f7\x01"}, - {"crypto/internal/fips140/ecdh", "\x03\x1f\x05\x02\n\r3\x86\x01\x0f7"}, - {"crypto/internal/fips140/ecdsa", "\x03\x1f\x04\x01\x02\a\x03\x06:\x16pF"}, - {"crypto/internal/fips140/ed25519", "\x03\x1f\x05\x02\x04\f:\xc9\x01\x03"}, - {"crypto/internal/fips140/edwards25519", "\x1e\t\a\x123\x95\x017"}, - {"crypto/internal/fips140/edwards25519/field", "'\x14\x053\x95\x01"}, - {"crypto/internal/fips140/hkdf", "\x03\x1f\x05\t\a<\x16"}, - {"crypto/internal/fips140/hmac", "\x03\x1f\x15\x01\x01:\x16"}, - {"crypto/internal/fips140/mldsa", "\x03\x1b\x04\x05\x02\x0e\x01\x03\x053\x95\x017"}, - {"crypto/internal/fips140/mlkem", "\x03\x1f\x05\x02\x0f\x03\x053\xcc\x01"}, - {"crypto/internal/fips140/nistec", "\x1e\t\r\f3\x95\x01*\r\x14"}, - {"crypto/internal/fips140/nistec/fiat", "'\x148\x95\x01"}, - {"crypto/internal/fips140/pbkdf2", "\x03\x1f\x05\t\a<\x16"}, - {"crypto/internal/fips140/rsa", "\x03\x1b\x04\x04\x01\x02\x0e\x01\x01\x028\x16pF"}, - {"crypto/internal/fips140/sha256", "\x03\x1f\x1e\x01\a+\x16\x7f"}, - {"crypto/internal/fips140/sha3", "\x03\x1f\x19\x05\x012\x95\x01K"}, - {"crypto/internal/fips140/sha512", "\x03\x1f\x1e\x01\a+\x16\x7f"}, - {"crypto/internal/fips140/ssh", "'b"}, - {"crypto/internal/fips140/subtle", "\x1e\a\x1b\xc8\x01"}, - {"crypto/internal/fips140/tls12", "\x03\x1f\x05\t\a\x02:\x16"}, - {"crypto/internal/fips140/tls13", "\x03\x1f\x05\b\b\t3\x16"}, - {"crypto/internal/fips140cache", "\xb2\x02\r&"}, + {"crypto/internal/cryptotest", "\x03\r\v\b%\x10\x19\x06\x13\x12 \x04\x06\t\x19\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\f"}, + {"crypto/internal/entropy", "K"}, + {"crypto/internal/entropy/v1.0.0", "D0\x95\x018\x14"}, + {"crypto/internal/fips140", "C1\xbf\x01\v\x17"}, + {"crypto/internal/fips140/aes", "\x03 \x03\x02\x14\x05\x01\x01\x05,\x95\x014"}, + {"crypto/internal/fips140/aes/gcm", "#\x01\x02\x02\x02\x12\x05\x01\x06,\x92\x01"}, + {"crypto/internal/fips140/alias", "\xd5\x02"}, + {"crypto/internal/fips140/bigmod", "(\x19\x01\x06,\x95\x01"}, + {"crypto/internal/fips140/check", "#\x0e\a\t\x02\xb7\x01["}, + {"crypto/internal/fips140/check/checktest", "(\x8b\x02\""}, + {"crypto/internal/fips140/drbg", "\x03\x1f\x01\x01\x04\x14\x05\n)\x86\x01\x0f7\x01"}, + {"crypto/internal/fips140/ecdh", "\x03 \x05\x02\n\r3\x86\x01\x0f7"}, + {"crypto/internal/fips140/ecdsa", "\x03 \x04\x01\x02\a\x03\x06:\x16pF"}, + {"crypto/internal/fips140/ed25519", "\x03 \x05\x02\x04\f:\xc9\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "\x1f\t\a\x123\x95\x017"}, + {"crypto/internal/fips140/edwards25519/field", "(\x14\x053\x95\x01"}, + {"crypto/internal/fips140/hkdf", "\x03 \x05\t\a<\x16"}, + {"crypto/internal/fips140/hmac", "\x03 \x15\x01\x01:\x16"}, + {"crypto/internal/fips140/mldsa", "\x03\x1c\x04\x05\x02\x0e\x01\x03\x053\x95\x017"}, + {"crypto/internal/fips140/mlkem", "\x03 \x05\x02\x0f\x03\x053\xcc\x01"}, + {"crypto/internal/fips140/nistec", "\x1f\t\r\f3\x95\x01*\r\x15"}, + {"crypto/internal/fips140/nistec/fiat", "(\x148\x95\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03 \x05\t\a<\x16"}, + {"crypto/internal/fips140/rsa", "\x03\x1c\x04\x04\x01\x02\x0e\x01\x01\x028\x16pF"}, + {"crypto/internal/fips140/sha256", "\x03 \x1e\x01\x06,\x16\x7f"}, + {"crypto/internal/fips140/sha3", "\x03 \x19\x05\x012\x95\x01L"}, + {"crypto/internal/fips140/sha512", "\x03 \x1e\x01\x06,\x16\x7f"}, + {"crypto/internal/fips140/ssh", "(b"}, + {"crypto/internal/fips140/subtle", "\x1f\a\x1b\xc8\x01"}, + {"crypto/internal/fips140/tls12", "\x03 \x05\t\a\x02:\x16"}, + {"crypto/internal/fips140/tls13", "\x03 \x05\b\b\t3\x16"}, + {"crypto/internal/fips140cache", "\xb3\x02\r'"}, {"crypto/internal/fips140deps", ""}, - {"crypto/internal/fips140deps/byteorder", "\x9f\x01"}, - {"crypto/internal/fips140deps/cpu", "\xb4\x01\a"}, - {"crypto/internal/fips140deps/godebug", "\xbc\x01"}, - {"crypto/internal/fips140deps/time", "\xcd\x02"}, - {"crypto/internal/fips140hash", "8\x1d4\xca\x01"}, - {"crypto/internal/fips140only", ")\x0e\x01\x01P3="}, + {"crypto/internal/fips140deps/byteorder", "\xa0\x01"}, + {"crypto/internal/fips140deps/cpu", "\xb5\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xbd\x01"}, + {"crypto/internal/fips140deps/time", "\xcf\x02"}, + {"crypto/internal/fips140hash", "9\x1d4\xcb\x01"}, + {"crypto/internal/fips140only", "\x17\x13\x0e\x01\x01Pp"}, {"crypto/internal/fips140test", ""}, - {"crypto/internal/hpke", "\x03\v\x01\x01\x03\x055\x03\x04\x01\x01\x16\a\x03\x13\xcc\x01"}, - {"crypto/internal/impl", "\xbd\x02"}, - {"crypto/internal/randutil", "\xf9\x01\x12"}, - {"crypto/internal/sysrand", "sq! \r\r\x01\x01\f\x06"}, - {"crypto/internal/sysrand/internal/seccomp", "s"}, - {"crypto/md5", "\x0e7.\x16\x16i"}, - {"crypto/mlkem", "\x0e$"}, - {"crypto/mlkem/mlkemtest", "2\x1b&"}, - {"crypto/pbkdf2", "5\x0f\x01.\x16"}, - {"crypto/rand", "\x1a\b\a\x1c\x04\x01)\x86\x01\rM"}, - {"crypto/rc4", "% .\xc9\x01"}, - {"crypto/rsa", "\x0e\f\x01\v\x10\x0e\x01\x04\a\a\x1c\x03\x133=\f\x01"}, - {"crypto/sha1", "\x0e\f+\x03+\x16\x16\x15T"}, - {"crypto/sha256", "\x0e\f\x1dR"}, - {"crypto/sha3", "\x0e*Q\xca\x01"}, - {"crypto/sha512", "\x0e\f\x1fP"}, - {"crypto/subtle", "\x1e\x1d\x9f\x01X"}, - {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\t\x01\x0e\n\x01\n\x05\x04\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x13\x16\x15\b=\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, - {"crypto/tls/internal/fips140tls", "\x17\xa9\x02"}, - {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x016\x06\x01\x01\x02\x05\x0e\x06\x02\x02\x03F\x03:\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\a\b\x02\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, - {"crypto/x509/pkix", "i\x06\a\x90\x01G"}, - {"database/sql", "\x03\nP\x16\x03\x83\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, - {"database/sql/driver", "\rf\x03\xb7\x01\x0f\x11"}, - {"debug/buildinfo", "\x03]\x02\x01\x01\b\a\x03g\x1a\x02\x01+\x0f\x1f"}, - {"debug/dwarf", "\x03i\a\x03\x83\x011\x11\x01\x01"}, - {"debug/elf", "\x03\x06V\r\a\x03g\x1b\x01\f \x17\x01\x16"}, - {"debug/gosym", "\x03i\n\xc5\x01\x01\x01\x02"}, - {"debug/macho", "\x03\x06V\r\ng\x1c,\x17\x01"}, - {"debug/pe", "\x03\x06V\r\a\x03g\x1c,\x17\x01\x16"}, - {"debug/plan9obj", "l\a\x03g\x1c,"}, - {"embed", "s+B\x19\x01S"}, + {"crypto/internal/impl", "\xbe\x02"}, + {"crypto/internal/rand", "\x1b\x0f s=["}, + {"crypto/internal/randutil", "\xfa\x01\x12"}, + {"crypto/internal/sysrand", "tq! \r\r\x01\x01\r\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "t"}, + {"crypto/md5", "\x0e8.\x16\x16i"}, + {"crypto/mlkem", "\x0e%"}, + {"crypto/mlkem/mlkemtest", "3\x13\b&"}, + {"crypto/pbkdf2", "6\x0f\x01.\x16"}, + {"crypto/rand", "\x1b\x0f\x1c\x03+\x86\x01\rN"}, + {"crypto/rc4", "& .\xc9\x01"}, + {"crypto/rsa", "\x0e\r\x01\v\x10\x0e\x01\x03\b\a\x1c\x03\x133=\f\x01"}, + {"crypto/sha1", "\x0e\r+\x02,\x16\x16\x15T"}, + {"crypto/sha256", "\x0e\r\x1dR"}, + {"crypto/sha3", "\x0e+Q\xcb\x01"}, + {"crypto/sha512", "\x0e\r\x1fP"}, + {"crypto/subtle", "\x1f\x1d\x9f\x01z"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\x01\t\x01\x18\x01\x0f\x01\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x13\x16\x15\b=\x16\x16\r\b\x01\x01\x01\x02\x01\x0e\x06\x02\x01\x0f"}, + {"crypto/tls/internal/fips140tls", "\x17\xaa\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x017\x06\x01\x01\x02\x05\x0e\x06\x02\x02\x03F\x03:\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\a\b\x02\x01\x02\x0f\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/pkix", "j\x06\a\x90\x01H"}, + {"database/sql", "\x03\nQ\x16\x03\x83\x01\v\a\"\x05\b\x02\x03\x01\x0e\x02\x02\x02"}, + {"database/sql/driver", "\rg\x03\xb7\x01\x0f\x12"}, + {"debug/buildinfo", "\x03^\x02\x01\x01\b\a\x03g\x1a\x02\x01+\x0f "}, + {"debug/dwarf", "\x03j\a\x03\x83\x011\x11\x01\x01"}, + {"debug/elf", "\x03\x06W\r\a\x03g\x1b\x01\f \x17\x01\x17"}, + {"debug/gosym", "\x03j\n$\xa1\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06W\r\ng\x1c,\x17\x01"}, + {"debug/pe", "\x03\x06W\r\a\x03g\x1c,\x17\x01\x17"}, + {"debug/plan9obj", "m\a\x03g\x1c,"}, + {"embed", "t+B\x19\x01T"}, {"embed/internal/embedtest", ""}, {"encoding", ""}, - {"encoding/ascii85", "\xf9\x01C"}, - {"encoding/asn1", "\x03p\x03g(\x01'\r\x02\x01\x10\x03\x01"}, - {"encoding/base32", "\xf9\x01A\x02"}, - {"encoding/base64", "\x9f\x01ZA\x02"}, - {"encoding/binary", "s\x86\x01\f(\r\x05"}, - {"encoding/csv", "\x02\x01p\x03\x83\x01D\x12\x02"}, - {"encoding/gob", "\x02e\x05\a\x03g\x1c\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, - {"encoding/hex", "s\x03\x83\x01A\x03"}, - {"encoding/json", "\x03\x01c\x04\b\x03\x83\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, - {"encoding/pem", "\x03h\b\x86\x01A\x03"}, - {"encoding/xml", "\x02\x01d\f\x03\x83\x014\x05\n\x01\x02\x10\x02"}, - {"errors", "\xcf\x01\x84\x01"}, - {"expvar", "pLA\b\v\x15\r\b\x02\x03\x01\x11"}, - {"flag", "g\f\x03\x83\x01,\b\x05\b\x02\x01\x10"}, - {"fmt", "sF'\x19\f \b\r\x02\x03\x12"}, - {"go/ast", "\x03\x01r\x0f\x01s\x03)\b\r\x02\x01\x12\x02"}, - {"go/build", "\x02\x01p\x03\x01\x02\x02\b\x02\x01\x17\x1f\x04\x02\b\x1c\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, - {"go/build/constraint", "s\xc9\x01\x01\x12\x02"}, - {"go/constant", "v\x10\x7f\x01\x024\x01\x02\x12"}, - {"go/doc", "\x04r\x01\x05\n=61\x10\x02\x01\x12\x02"}, - {"go/doc/comment", "\x03s\xc4\x01\x01\x01\x01\x12\x02"}, - {"go/format", "\x03s\x01\f\x01\x02sD"}, - {"go/importer", "x\a\x01\x02\x04\x01r9"}, - {"go/internal/gccgoimporter", "\x02\x01]\x13\x03\x04\f\x01p\x02,\x01\x05\x11\x01\f\b"}, - {"go/internal/gcimporter", "\x02t\x10\x010\x05\r0,\x15\x03\x02"}, - {"go/internal/scannerhooks", "\x86\x01"}, - {"go/internal/srcimporter", "v\x01\x01\v\x03\x01r,\x01\x05\x12\x02\x14"}, - {"go/parser", "\x03p\x03\x01\x02\b\x04\x01s\x01+\x06\x12"}, - {"go/printer", "v\x01\x02\x03\ns\f \x15\x02\x01\x02\v\x05\x02"}, - {"go/scanner", "\x03s\v\x05s2\x10\x01\x13\x02"}, - {"go/token", "\x04r\x86\x01>\x02\x03\x01\x0f\x02"}, - {"go/types", "\x03\x01\x06i\x03\x01\x03\t\x03\x024\x063\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, - {"go/version", "\xc1\x01|"}, - {"hash", "\xf9\x01"}, - {"hash/adler32", "s\x16\x16"}, - {"hash/crc32", "s\x16\x16\x15\x8b\x01\x01\x13"}, - {"hash/crc64", "s\x16\x16\xa0\x01"}, - {"hash/fnv", "s\x16\x16i"}, - {"hash/maphash", "\x89\x01\x11<}"}, - {"html", "\xbd\x02\x02\x12"}, - {"html/template", "\x03m\x06\x19-=\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, - {"image", "\x02q\x1fg\x0f4\x03\x01"}, + {"encoding/ascii85", "\xfa\x01C"}, + {"encoding/asn1", "\x03q\x03g(\x01'\r\x02\x01\x11\x03\x01"}, + {"encoding/base32", "\xfa\x01A\x02"}, + {"encoding/base64", "\xa0\x01ZA\x02"}, + {"encoding/binary", "t\x86\x01\f(\r\x05"}, + {"encoding/csv", "\x02\x01q\x03\x83\x01D\x13\x02"}, + {"encoding/gob", "\x02f\x05\a\x03g\x1c\v\x01\x03\x1d\b\x12\x01\x10\x02"}, + {"encoding/hex", "t\x03\x83\x01A\x03"}, + {"encoding/json", "\x03\x01d\x04\b\x03\x83\x01\f(\r\x02\x01\x02\x11\x01\x01\x02"}, + {"encoding/pem", "\x03i\b\x86\x01A\x03"}, + {"encoding/xml", "\x02\x01e\f\x03\x83\x014\x05\n\x01\x02\x11\x02"}, + {"errors", "\xd0\x01\x85\x01"}, + {"expvar", "qLA\b\v\x15\r\b\x02\x03\x01\x12"}, + {"flag", "h\f\x03\x83\x01,\b\x05\b\x02\x01\x11"}, + {"fmt", "tF'\x19\f \b\r\x02\x03\x13"}, + {"go/ast", "\x03\x01s\x0f\x01s\x03)\b\r\x02\x01\x13\x02"}, + {"go/build", "\x02\x01q\x03\x01\x02\x02\b\x02\x01\x17\x1f\x04\x02\b\x1c\x13\x01+\x01\x04\x01\a\b\x02\x01\x13\x02\x02"}, + {"go/build/constraint", "t\xc9\x01\x01\x13\x02"}, + {"go/constant", "w\x10\x7f\x01\x024\x01\x02\x13"}, + {"go/doc", "\x04s\x01\x05\n=61\x10\x02\x01\x13\x02"}, + {"go/doc/comment", "\x03t\xc4\x01\x01\x01\x01\x13\x02"}, + {"go/format", "\x03t\x01\f\x01\x02sD"}, + {"go/importer", "y\a\x01\x02\x04\x01r9"}, + {"go/internal/gccgoimporter", "\x02\x01^\x13\x03\x04\f\x01p\x02,\x01\x05\x11\x01\r\b"}, + {"go/internal/gcimporter", "\x02u\x10\x010\x05\r0,\x15\x03\x02"}, + {"go/internal/scannerhooks", "\x87\x01"}, + {"go/internal/srcimporter", "w\x01\x01\v\x03\x01r,\x01\x05\x12\x02\x15"}, + {"go/parser", "\x03q\x03\x01\x02\b\x04\x01s\x01+\x06\x12"}, + {"go/printer", "w\x01\x02\x03\ns\f \x15\x02\x01\x02\f\x05\x02"}, + {"go/scanner", "\x03t\v\x05s2\x10\x01\x14\x02"}, + {"go/token", "\x04s\x86\x01>\x02\x03\x01\x10\x02"}, + {"go/types", "\x03\x01\x06j\x03\x01\x03\t\x03\x024\x063\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x10\x02\x02"}, + {"go/version", "\xc2\x01|"}, + {"hash", "\xfa\x01"}, + {"hash/adler32", "t\x16\x16"}, + {"hash/crc32", "t\x16\x16\x15\x8b\x01\x01\x14"}, + {"hash/crc64", "t\x16\x16\xa0\x01"}, + {"hash/fnv", "t\x16\x16i"}, + {"hash/maphash", "\x8a\x01\x11<~"}, + {"html", "\xbe\x02\x02\x13"}, + {"html/template", "\x03n\x06\x19-=\x01\n!\x05\x01\x02\x03\f\x01\x02\r\x01\x03\x02"}, + {"image", "\x02r\x1fg\x0f4\x03\x01"}, {"image/color", ""}, - {"image/color/palette", "\x92\x01"}, - {"image/draw", "\x91\x01\x01\x04"}, - {"image/gif", "\x02\x01\x05k\x03\x1b\x01\x01\x01\vZ\x0f"}, - {"image/internal/imageutil", "\x91\x01"}, - {"image/jpeg", "\x02q\x1e\x01\x04c"}, - {"image/png", "\x02\ac\n\x13\x02\x06\x01gC"}, - {"index/suffixarray", "\x03i\a\x86\x01\f+\n\x01"}, - {"internal/abi", "\xbb\x01\x98\x01"}, - {"internal/asan", "\xd3\x02"}, - {"internal/bisect", "\xb2\x02\r\x01"}, - {"internal/buildcfg", "vHg\x06\x02\x05\n\x01"}, - {"internal/bytealg", "\xb4\x01\x9f\x01"}, + {"image/color/palette", "\x93\x01"}, + {"image/draw", "\x92\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05l\x03\x1b\x01\x01\x01\vZ\x0f"}, + {"image/internal/imageutil", "\x92\x01"}, + {"image/jpeg", "\x02r\x1e\x01\x04c"}, + {"image/png", "\x02\ad\n\x13\x02\x06\x01gC"}, + {"index/suffixarray", "\x03j\a\x86\x01\f+\n\x01"}, + {"internal/abi", "\xbc\x01\x99\x01"}, + {"internal/asan", "\xd5\x02"}, + {"internal/bisect", "\xb3\x02\r\x01"}, + {"internal/buildcfg", "wHg\x06\x02\x05\n\x01"}, + {"internal/bytealg", "\xb5\x01\xa0\x01"}, {"internal/byteorder", ""}, {"internal/cfg", ""}, - {"internal/cgrouptest", "v[T\x06\x0f\x02\x01\x04\x01"}, - {"internal/chacha8rand", "\x9f\x01\x15\a\x98\x01"}, + {"internal/cgrouptest", "w[T\x06\x0f\x02\x01\x04\x01"}, + {"internal/chacha8rand", "\xa0\x01\x15\a\x99\x01"}, {"internal/copyright", ""}, {"internal/coverage", ""}, {"internal/coverage/calloc", ""}, - {"internal/coverage/cfile", "p\x06\x17\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02',\x06\a\n\x01\x03\r\x06"}, - {"internal/coverage/cformat", "\x04r.\x04Q\v6\x01\x02\r"}, - {"internal/coverage/cmerge", "v.a"}, - {"internal/coverage/decodecounter", "l\n.\v\x02H,\x17\x17"}, - {"internal/coverage/decodemeta", "\x02j\n\x17\x17\v\x02H,"}, - {"internal/coverage/encodecounter", "\x02j\n.\f\x01\x02F\v!\x15"}, - {"internal/coverage/encodemeta", "\x02\x01i\n\x13\x04\x17\r\x02F,."}, - {"internal/coverage/pods", "\x04r.\x81\x01\x06\x05\n\x02\x01"}, - {"internal/coverage/rtcov", "\xd3\x02"}, - {"internal/coverage/slicereader", "l\n\x83\x01Z"}, - {"internal/coverage/slicewriter", "v\x83\x01"}, - {"internal/coverage/stringtab", "v9\x04F"}, + {"internal/coverage/cfile", "q\x06\x17\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02',\x06\a\n\x01\x03\x0e\x06"}, + {"internal/coverage/cformat", "\x04s.\x04Q\v6\x01\x02\x0e"}, + {"internal/coverage/cmerge", "w.a"}, + {"internal/coverage/decodecounter", "m\n.\v\x02H,\x17\x18"}, + {"internal/coverage/decodemeta", "\x02k\n\x17\x17\v\x02H,"}, + {"internal/coverage/encodecounter", "\x02k\n.\f\x01\x02F\v!\x15"}, + {"internal/coverage/encodemeta", "\x02\x01j\n\x13\x04\x17\r\x02F,/"}, + {"internal/coverage/pods", "\x04s.\x81\x01\x06\x05\n\x02\x01"}, + {"internal/coverage/rtcov", "\xd5\x02"}, + {"internal/coverage/slicereader", "m\n\x83\x01["}, + {"internal/coverage/slicewriter", "w\x83\x01"}, + {"internal/coverage/stringtab", "w9\x04F"}, {"internal/coverage/test", ""}, {"internal/coverage/uleb128", ""}, - {"internal/cpu", "\xd3\x02"}, - {"internal/dag", "\x04r\xc4\x01\x03"}, - {"internal/diff", "\x03s\xc5\x01\x02"}, - {"internal/exportdata", "\x02\x01p\x03\x02e\x1c,\x01\x05\x11\x01\x02"}, - {"internal/filepathlite", "s+B\x1a@"}, - {"internal/fmtsort", "\x04\xa9\x02\r"}, - {"internal/fuzz", "\x03\nG\x18\x04\x03\x03\x01\f\x036=\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, + {"internal/cpu", "\xd5\x02"}, + {"internal/dag", "\x04s\xc4\x01\x03"}, + {"internal/diff", "\x03t\xc5\x01\x02"}, + {"internal/exportdata", "\x02\x01q\x03\x02e\x1c,\x01\x05\x11\x01\x02"}, + {"internal/filepathlite", "t+B\x1a@"}, + {"internal/fmtsort", "\x04\xaa\x02\r"}, + {"internal/fuzz", "\x03\nH\x18\x04\x03\x03\x01\f\x036=\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\r\x04\x02"}, {"internal/goarch", ""}, - {"internal/godebug", "\x9c\x01!\x82\x01\x01\x13"}, + {"internal/godebug", "\x9d\x01!\x82\x01\x01\x14"}, {"internal/godebugs", ""}, {"internal/goexperiment", ""}, {"internal/goos", ""}, - {"internal/goroot", "\xa5\x02\x01\x05\x12\x02"}, + {"internal/goroot", "\xa6\x02\x01\x05\x12\x02"}, {"internal/gover", "\x04"}, {"internal/goversion", ""}, - {"internal/lazyregexp", "\xa5\x02\v\r\x02"}, - {"internal/lazytemplate", "\xf9\x01,\x18\x02\f"}, - {"internal/msan", "\xd3\x02"}, + {"internal/lazyregexp", "\xa6\x02\v\r\x02"}, + {"internal/lazytemplate", "\xfa\x01,\x18\x02\r"}, + {"internal/msan", "\xd5\x02"}, {"internal/nettrace", ""}, - {"internal/obscuretestdata", "k\x8e\x01,"}, - {"internal/oserror", "s"}, - {"internal/pkgbits", "\x03Q\x18\a\x03\x04\fs\r\x1f\r\n\x01"}, + {"internal/obscuretestdata", "l\x8e\x01,"}, + {"internal/oserror", "t"}, + {"internal/pkgbits", "\x03R\x18\a\x03\x04\fs\r\x1f\r\n\x01"}, {"internal/platform", ""}, - {"internal/poll", "sl\x05\x159\r\x01\x01\f\x06"}, - {"internal/profile", "\x03\x04l\x03\x83\x017\n\x01\x01\x01\x10"}, + {"internal/poll", "tl\x05\x159\r\x01\x01\r\x06"}, + {"internal/profile", "\x03\x04m\x03\x83\x017\n\x01\x01\x01\x11"}, {"internal/profilerecord", ""}, - {"internal/race", "\x9a\x01\xb9\x01"}, - {"internal/reflectlite", "\x9a\x01!;\x16"}, - {"vendor/golang.org/x/text/unicode/norm", "l\n\x83\x01F\x12\x11"}, - {"weak", "\x9a\x01\x98\x01!"}, + {"vendor/golang.org/x/crypto/internal/alias", "\xd5\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "X\x15\x9c\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "t\xc7\x01"}, + {"vendor/golang.org/x/net/http/httpguts", "\x90\x02\x14\x1a\x15\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "t\x03\x99\x01\x10\x05\x01\x18\x15\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03q\x03\x83\x01F"}, + {"vendor/golang.org/x/net/idna", "w\x8f\x018\x15\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03j\a\x03\x83\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\f"}, + {"vendor/golang.org/x/sys/cpu", "\xa6\x02\r\n\x01\x17"}, + {"vendor/golang.org/x/text/secure/bidirule", "t\xdf\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03q\x86\x01Y"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bl\x87\x01>\x17"}, + {"vendor/golang.org/x/text/unicode/norm", "m\n\x83\x01F\x13\x11"}, + {"weak", "\x9b\x01\x98\x01\""}, } // bootstrap is the list of bootstrap packages extracted from cmd/dist. diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index f1e24625a..33e4f505f 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -626,7 +626,7 @@ var PackageSymbols = map[string][]Symbol{ {"PublicKey", Type, 0, ""}, {"PublicKey.Parameters", Field, 0, ""}, {"PublicKey.Y", Field, 0, ""}, - {"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"}, + {"Sign", Func, 0, "func(random io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"}, {"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"}, }, "crypto/ecdh": { @@ -674,7 +674,7 @@ var PackageSymbols = map[string][]Symbol{ {"(PublicKey).Params", Method, 0, ""}, {"(PublicKey).ScalarBaseMult", Method, 0, ""}, {"(PublicKey).ScalarMult", Method, 0, ""}, - {"GenerateKey", Func, 0, "func(c elliptic.Curve, rand io.Reader) (*PrivateKey, error)"}, + {"GenerateKey", Func, 0, "func(c elliptic.Curve, r io.Reader) (*PrivateKey, error)"}, {"ParseRawPrivateKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PrivateKey, error)"}, {"ParseUncompressedPublicKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PublicKey, error)"}, {"PrivateKey", Type, 0, ""}, @@ -685,7 +685,7 @@ var PackageSymbols = map[string][]Symbol{ {"PublicKey.X", Field, 0, ""}, {"PublicKey.Y", Field, 0, ""}, {"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"}, - {"SignASN1", Func, 15, "func(rand io.Reader, priv *PrivateKey, hash []byte) ([]byte, error)"}, + {"SignASN1", Func, 15, "func(r io.Reader, priv *PrivateKey, hash []byte) ([]byte, error)"}, {"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"}, {"VerifyASN1", Func, 15, "func(pub *PublicKey, hash []byte, sig []byte) bool"}, }, @@ -696,7 +696,7 @@ var PackageSymbols = map[string][]Symbol{ {"(PrivateKey).Seed", Method, 13, ""}, {"(PrivateKey).Sign", Method, 13, ""}, {"(PublicKey).Equal", Method, 15, ""}, - {"GenerateKey", Func, 13, "func(rand io.Reader) (PublicKey, PrivateKey, error)"}, + {"GenerateKey", Func, 13, "func(random io.Reader) (PublicKey, PrivateKey, error)"}, {"NewKeyFromSeed", Func, 13, "func(seed []byte) PrivateKey"}, {"Options", Type, 20, ""}, {"Options.Context", Field, 20, ""}, @@ -745,7 +745,9 @@ var PackageSymbols = map[string][]Symbol{ }, "crypto/fips140": { {"Enabled", Func, 24, "func() bool"}, + {"Enforced", Func, 26, "func() bool"}, {"Version", Func, 26, "func() string"}, + {"WithoutEnforcement", Func, 26, "func(f func())"}, }, "crypto/hkdf": { {"Expand", Func, 24, "func[H hash.Hash](h func() H, pseudorandomKey []byte, info string, keyLength int) ([]byte, error)"}, @@ -756,6 +758,54 @@ var PackageSymbols = map[string][]Symbol{ {"Equal", Func, 1, "func(mac1 []byte, mac2 []byte) bool"}, {"New", Func, 0, "func(h func() hash.Hash, key []byte) hash.Hash"}, }, + "crypto/hpke": { + {"(*Recipient).Export", Method, 26, ""}, + {"(*Recipient).Open", Method, 26, ""}, + {"(*Sender).Export", Method, 26, ""}, + {"(*Sender).Seal", Method, 26, ""}, + {"(AEAD).ID", Method, 26, ""}, + {"(KDF).ID", Method, 26, ""}, + {"(KEM).DeriveKeyPair", Method, 26, ""}, + {"(KEM).GenerateKey", Method, 26, ""}, + {"(KEM).ID", Method, 26, ""}, + {"(KEM).NewPrivateKey", Method, 26, ""}, + {"(KEM).NewPublicKey", Method, 26, ""}, + {"(PrivateKey).Bytes", Method, 26, ""}, + {"(PrivateKey).KEM", Method, 26, ""}, + {"(PrivateKey).PublicKey", Method, 26, ""}, + {"(PublicKey).Bytes", Method, 26, ""}, + {"(PublicKey).KEM", Method, 26, ""}, + {"AES128GCM", Func, 26, "func() AEAD"}, + {"AES256GCM", Func, 26, "func() AEAD"}, + {"ChaCha20Poly1305", Func, 26, "func() AEAD"}, + {"DHKEM", Func, 26, "func(curve ecdh.Curve) KEM"}, + {"ExportOnly", Func, 26, "func() AEAD"}, + {"HKDFSHA256", Func, 26, "func() KDF"}, + {"HKDFSHA384", Func, 26, "func() KDF"}, + {"HKDFSHA512", Func, 26, "func() KDF"}, + {"MLKEM1024", Func, 26, "func() KEM"}, + {"MLKEM1024P384", Func, 26, "func() KEM"}, + {"MLKEM768", Func, 26, "func() KEM"}, + {"MLKEM768P256", Func, 26, "func() KEM"}, + {"MLKEM768X25519", Func, 26, "func() KEM"}, + {"NewAEAD", Func, 26, "func(id uint16) (AEAD, error)"}, + {"NewDHKEMPrivateKey", Func, 26, "func(priv ecdh.KeyExchanger) (PrivateKey, error)"}, + {"NewDHKEMPublicKey", Func, 26, "func(pub *ecdh.PublicKey) (PublicKey, error)"}, + {"NewHybridPrivateKey", Func, 26, "func(pq crypto.Decapsulator, t ecdh.KeyExchanger) (PrivateKey, error)"}, + {"NewHybridPublicKey", Func, 26, "func(pq crypto.Encapsulator, t *ecdh.PublicKey) (PublicKey, error)"}, + {"NewKDF", Func, 26, "func(id uint16) (KDF, error)"}, + {"NewKEM", Func, 26, "func(id uint16) (KEM, error)"}, + {"NewMLKEMPrivateKey", Func, 26, "func(priv crypto.Decapsulator) (PrivateKey, error)"}, + {"NewMLKEMPublicKey", Func, 26, "func(pub crypto.Encapsulator) (PublicKey, error)"}, + {"NewRecipient", Func, 26, "func(enc []byte, k PrivateKey, kdf KDF, aead AEAD, info []byte) (*Recipient, error)"}, + {"NewSender", Func, 26, "func(pk PublicKey, kdf KDF, aead AEAD, info []byte) (enc []byte, s *Sender, err error)"}, + {"Open", Func, 26, "func(k PrivateKey, kdf KDF, aead AEAD, info []byte, ciphertext []byte) ([]byte, error)"}, + {"Recipient", Type, 26, ""}, + {"SHAKE128", Func, 26, "func() KDF"}, + {"SHAKE256", Func, 26, "func() KDF"}, + {"Seal", Func, 26, "func(pk PublicKey, kdf KDF, aead AEAD, info []byte, plaintext []byte) ([]byte, error)"}, + {"Sender", Type, 26, ""}, + }, "crypto/md5": { {"BlockSize", Const, 0, ""}, {"New", Func, 0, "func() hash.Hash"}, @@ -801,7 +851,7 @@ var PackageSymbols = map[string][]Symbol{ }, "crypto/rand": { {"Int", Func, 0, "func(rand io.Reader, max *big.Int) (n *big.Int, err error)"}, - {"Prime", Func, 0, "func(rand io.Reader, bits int) (*big.Int, error)"}, + {"Prime", Func, 0, "func(r io.Reader, bits int) (*big.Int, error)"}, {"Read", Func, 0, "func(b []byte) (n int, err error)"}, {"Reader", Var, 0, ""}, {"Text", Func, 24, "func() string"}, @@ -865,7 +915,7 @@ var PackageSymbols = map[string][]Symbol{ {"PublicKey.E", Field, 0, ""}, {"PublicKey.N", Field, 0, ""}, {"SignPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)"}, - {"SignPSS", Func, 2, "func(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error)"}, + {"SignPSS", Func, 2, "func(random io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error)"}, {"VerifyPKCS1v15", Func, 0, "func(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) error"}, {"VerifyPSS", Func, 2, "func(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error"}, }, @@ -1158,6 +1208,8 @@ var PackageSymbols = map[string][]Symbol{ {"RequestClientCert", Const, 0, ""}, {"RequireAndVerifyClientCert", Const, 0, ""}, {"RequireAnyClientCert", Const, 0, ""}, + {"SecP256r1MLKEM768", Const, 26, ""}, + {"SecP384r1MLKEM1024", Const, 26, ""}, {"Server", Func, 0, "func(conn net.Conn, config *Config) *Conn"}, {"SessionState", Type, 21, ""}, {"SessionState.EarlyData", Field, 21, ""}, @@ -1222,6 +1274,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*RevocationList).CheckSignatureFrom", Method, 19, ""}, {"(CertificateInvalidError).Error", Method, 0, ""}, {"(ConstraintViolationError).Error", Method, 0, ""}, + {"(ExtKeyUsage).OID", Method, 26, ""}, {"(ExtKeyUsage).String", Method, 26, ""}, {"(HostnameError).Error", Method, 0, ""}, {"(InsecureAlgorithmError).Error", Method, 6, ""}, @@ -1380,6 +1433,7 @@ var PackageSymbols = map[string][]Symbol{ {"NoValidChains", Const, 24, ""}, {"NotAuthorizedToSign", Const, 0, ""}, {"OID", Type, 22, ""}, + {"OIDFromASN1OID", Func, 26, "func(asn1OID asn1.ObjectIdentifier) (OID, error)"}, {"OIDFromInts", Func, 22, "func(oid []uint64) (OID, error)"}, {"PEMCipher", Type, 1, ""}, {"PEMCipher3DES", Const, 1, ""}, @@ -1690,10 +1744,6 @@ var PackageSymbols = map[string][]Symbol{ {"(Rows).Next", Method, 0, ""}, {"(RowsAffected).LastInsertId", Method, 0, ""}, {"(RowsAffected).RowsAffected", Method, 0, ""}, - {"(RowsColumnScanner).Close", Method, 26, ""}, - {"(RowsColumnScanner).Columns", Method, 26, ""}, - {"(RowsColumnScanner).Next", Method, 26, ""}, - {"(RowsColumnScanner).ScanColumn", Method, 26, ""}, {"(RowsColumnTypeDatabaseTypeName).Close", Method, 8, ""}, {"(RowsColumnTypeDatabaseTypeName).ColumnTypeDatabaseTypeName", Method, 8, ""}, {"(RowsColumnTypeDatabaseTypeName).Columns", Method, 8, ""}, @@ -1765,7 +1815,6 @@ var PackageSymbols = map[string][]Symbol{ {"ResultNoRows", Var, 0, ""}, {"Rows", Type, 0, ""}, {"RowsAffected", Type, 0, ""}, - {"RowsColumnScanner", Type, 26, ""}, {"RowsColumnTypeDatabaseTypeName", Type, 8, ""}, {"RowsColumnTypeLength", Type, 8, ""}, {"RowsColumnTypeNullable", Type, 8, ""}, @@ -17367,6 +17416,9 @@ var PackageSymbols = map[string][]Symbol{ {"Testing", Func, 21, "func() bool"}, {"Verbose", Func, 1, "func() bool"}, }, + "testing/cryptotest": { + {"SetGlobalRandom", Func, 26, "func(t *testing.T, seed uint64)"}, + }, "testing/fstest": { {"(MapFS).Glob", Method, 16, ""}, {"(MapFS).Lstat", Method, 25, ""}, diff --git a/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go index d29bb5f87..7cbe8af84 100644 --- a/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go +++ b/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go @@ -172,28 +172,28 @@ func copyFile(src, dst string) (err error) { in, err := os.Open(src) if err != nil { - return + return //nolint:nakedret } defer in.Close() out, err := os.Create(dst) if err != nil { - return + return //nolint:nakedret } if _, err = io.Copy(out, in); err != nil { out.Close() - return + return //nolint:nakedret } // Check for write errors on Close if err = out.Close(); err != nil { - return + return //nolint:nakedret } si, err := os.Stat(src) if err != nil { - return + return //nolint:nakedret } // Temporary fix for Go < 1.9 @@ -205,7 +205,7 @@ func copyFile(src, dst string) (err error) { } err = os.Chmod(dst, si.Mode()) - return + return //nolint:nakedret } // cloneSymlink will create a new symlink that points to the resolved path of sl. diff --git a/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go b/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go index ae62d0e6f..8f8b28fcb 100644 --- a/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go +++ b/vendor/helm.sh/helm/v3/internal/third_party/k8s.io/kubernetes/deployment/util/deploymentutil.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package util //nolint:revive import ( "context" diff --git a/vendor/helm.sh/helm/v3/internal/version/version.go b/vendor/helm.sh/helm/v3/internal/version/version.go index 2b22ccbf7..c704a0149 100644 --- a/vendor/helm.sh/helm/v3/internal/version/version.go +++ b/vendor/helm.sh/helm/v3/internal/version/version.go @@ -29,7 +29,7 @@ var ( // // Increment major number for new feature additions and behavioral changes. // Increment minor number for bug fixes and performance enhancements. - version = "v3.19" + version = "v3.20" // metadata is extra build time data metadata = "" diff --git a/vendor/helm.sh/helm/v3/pkg/action/install.go b/vendor/helm.sh/helm/v3/pkg/action/install.go index f81f65749..f8f740005 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/install.go +++ b/vendor/helm.sh/helm/v3/pkg/action/install.go @@ -619,7 +619,7 @@ func writeToFile(outputDir string, name string, data string, append bool) error defer f.Close() - _, err = f.WriteString(fmt.Sprintf("---\n# Source: %s\n%s\n", name, data)) + _, err = fmt.Fprintf(f, "---\n# Source: %s\n%s\n", name, data) if err != nil { return err diff --git a/vendor/helm.sh/helm/v3/pkg/action/rollback.go b/vendor/helm.sh/helm/v3/pkg/action/rollback.go index b0be17d13..fcaaab177 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/rollback.go +++ b/vendor/helm.sh/helm/v3/pkg/action/rollback.go @@ -26,6 +26,7 @@ import ( "helm.sh/helm/v3/pkg/chartutil" "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/storage/driver" helmtime "helm.sh/helm/v3/pkg/time" ) @@ -249,7 +250,7 @@ func (r *Rollback) performRollback(currentRelease, targetRelease *release.Releas } deployed, err := r.cfg.Releases.DeployedAll(currentRelease.Name) - if err != nil && !strings.Contains(err.Error(), "has no deployed releases") { + if err != nil && !errors.Is(err, driver.ErrNoDeployedReleases) { return nil, err } // Supersede all previous deployments, see issue #2941. diff --git a/vendor/helm.sh/helm/v3/pkg/action/uninstall.go b/vendor/helm.sh/helm/v3/pkg/action/uninstall.go index ac0c4fee8..c30cb65e2 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/uninstall.go +++ b/vendor/helm.sh/helm/v3/pkg/action/uninstall.go @@ -28,6 +28,7 @@ import ( "helm.sh/helm/v3/pkg/kube" "helm.sh/helm/v3/pkg/release" "helm.sh/helm/v3/pkg/releaseutil" + "helm.sh/helm/v3/pkg/storage/driver" helmtime "helm.sh/helm/v3/pkg/time" ) @@ -170,6 +171,19 @@ func (u *Uninstall) Run(name string) (*release.UninstallReleaseResponse, error) u.cfg.Log("uninstall: Failed to store updated release: %s", err) } + // Supersede all previous deployments, see issue #12556 (which is a + // variation on #2941). + deployed, err := u.cfg.Releases.DeployedAll(name) + if err != nil && !errors.Is(err, driver.ErrNoDeployedReleases) { + return nil, err + } + for _, reli := range deployed { + reli.Info.Status = release.StatusSuperseded + if err = u.cfg.Releases.Update(reli); err != nil { + u.cfg.Log("uninstall: Failed to store updated release: %s", err) + } + } + if len(errs) > 0 { return res, errors.Errorf("uninstallation completed with %d error(s): %s", len(errs), joinErrors(errs)) } diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go index 40bce2a68..4aa28e222 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go @@ -283,6 +283,11 @@ func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, pref if dst == nil { return src } + for key, val := range dst { + if val == nil { + src[key] = nil + } + } // Because dest has higher precedence than src, dest values override src // values. for key, val := range src { diff --git a/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go index 75e85098d..3a859e8ff 100644 --- a/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go +++ b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go @@ -38,7 +38,7 @@ type lookupFunc = func(apiversion string, resource string, namespace string, nam // // This function is considered deprecated, and will be renamed in Helm 4. It will no // longer be a public function. -func NewLookupFunction(config *rest.Config) lookupFunc { +func NewLookupFunction(config *rest.Config) lookupFunc { //nolint:revive return newLookupFunction(clientProviderFromConfig{config: config}) } diff --git a/vendor/helm.sh/helm/v3/pkg/registry/client.go b/vendor/helm.sh/helm/v3/pkg/registry/client.go index 347e972e1..25b2a7c4a 100644 --- a/vendor/helm.sh/helm/v3/pkg/registry/client.go +++ b/vendor/helm.sh/helm/v3/pkg/registry/client.go @@ -891,6 +891,7 @@ func (c *Client) Resolve(ref string) (desc ocispec.Descriptor, err error) { return desc, err } remoteRepository.PlainHTTP = c.plainHTTP + remoteRepository.Client = c.authorizer parsedReference, err := newReference(ref) if err != nil { diff --git a/vendor/helm.sh/helm/v3/pkg/time/time.go b/vendor/helm.sh/helm/v3/pkg/time/time.go index 1abe8ae3d..678e368dd 100644 --- a/vendor/helm.sh/helm/v3/pkg/time/time.go +++ b/vendor/helm.sh/helm/v3/pkg/time/time.go @@ -19,7 +19,7 @@ limitations under the License. // where the serializer doesn't omit an empty value for time: // https://github.com/golang/go/issues/11939. As such, this can be removed if a // proposal is ever accepted for Go -package time +package time //nolint:revive import ( "bytes" diff --git a/vendor/modules.txt b/vendor/modules.txt index 3e280f250..62e31409f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -473,8 +473,8 @@ github.com/google/go-containerregistry/pkg/v1/tarball github.com/google/go-containerregistry/pkg/v1/types # github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d ## explicit; go 1.24.0 -# github.com/google/renameio/v2 v2.0.1 -## explicit; go 1.13 +# github.com/google/renameio/v2 v2.0.2 +## explicit; go 1.25 github.com/google/renameio/v2 # github.com/google/uuid v1.6.0 ## explicit @@ -746,8 +746,8 @@ github.com/prometheus/procfs/internal/util # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg -# github.com/rubenv/sql-migrate v1.8.0 -## explicit; go 1.23.0 +# github.com/rubenv/sql-migrate v1.8.1 +## explicit; go 1.24.0 github.com/rubenv/sql-migrate github.com/rubenv/sql-migrate/sqlparse # github.com/russross/blackfriday/v2 v2.1.0 @@ -769,7 +769,7 @@ github.com/sigstore/fulcio/pkg/certificate # github.com/sigstore/protobuf-specs v0.5.0 ## explicit; go 1.22.0 github.com/sigstore/protobuf-specs/gen/pb-go/common/v1 -# github.com/sigstore/sigstore v1.10.3 +# github.com/sigstore/sigstore v1.10.4 ## explicit; go 1.25.0 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature @@ -991,7 +991,7 @@ go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 ## explicit; go 1.16 go.yaml.in/yaml/v3 -# golang.org/x/crypto v0.46.0 +# golang.org/x/crypto v0.47.0 ## explicit; go 1.24.0 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -1015,13 +1015,13 @@ golang.org/x/crypto/scrypt ## explicit; go 1.23.0 golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.31.0 +# golang.org/x/mod v0.32.0 ## explicit; go 1.24.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.48.0 +# golang.org/x/net v0.49.0 ## explicit; go 1.24.0 golang.org/x/net/context golang.org/x/net/html @@ -1045,17 +1045,17 @@ golang.org/x/oauth2/internal golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.39.0 +# golang.org/x/sys v0.40.0 ## explicit; go 1.24.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.38.0 +# golang.org/x/term v0.39.0 ## explicit; go 1.24.0 golang.org/x/term -# golang.org/x/text v0.32.0 +# golang.org/x/text v0.33.0 ## explicit; go 1.24.0 golang.org/x/text/cases golang.org/x/text/encoding @@ -1083,7 +1083,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.14.0 ## explicit; go 1.24.0 golang.org/x/time/rate -# golang.org/x/tools v0.40.0 +# golang.org/x/tools v0.41.0 ## explicit; go 1.24.0 golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/analysistest @@ -1258,8 +1258,8 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.19.4 -## explicit; go 1.24.0 +# helm.sh/helm/v3 v3.20.0 +## explicit; go 1.25.0 helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/resolver helm.sh/helm/v3/internal/sympath @@ -1504,7 +1504,7 @@ k8s.io/apiserver/pkg/warning k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics -# k8s.io/cli-runtime v0.34.2 => k8s.io/cli-runtime v0.34.0 +# k8s.io/cli-runtime v0.35.0 => k8s.io/cli-runtime v0.34.0 ## explicit; go 1.24.0 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions @@ -1922,7 +1922,7 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/kubectl v0.34.2 => k8s.io/kubectl v0.34.0 +# k8s.io/kubectl v0.35.0 => k8s.io/kubectl v0.34.0 ## explicit; go 1.24.0 k8s.io/kubectl/pkg/cmd/util k8s.io/kubectl/pkg/scheme