diff options
| author | Taras Madan <tarasmadan@google.com> | 2025-01-22 16:07:17 +0100 |
|---|---|---|
| committer | Taras Madan <tarasmadan@google.com> | 2025-01-23 10:42:36 +0000 |
| commit | 7b4377ad9d8a7205416df8d6217ef2b010f89481 (patch) | |
| tree | e6fec4fd12ff807a16d847923f501075bf71d16c /vendor/github.com/argoproj | |
| parent | 475a4c203afb8b7d3af51c4fd32bb170ff32a45e (diff) | |
vendor: delete
Diffstat (limited to 'vendor/github.com/argoproj')
57 files changed, 0 insertions, 69987 deletions
diff --git a/vendor/github.com/argoproj/argo-workflows/v3/LICENSE b/vendor/github.com/argoproj/argo-workflows/v3/LICENSE deleted file mode 100644 index 67e99b065..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2017-2018 The Argo Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/argoproj/argo-workflows/v3/errors/errors.go b/vendor/github.com/argoproj/argo-workflows/v3/errors/errors.go deleted file mode 100644 index 35777b57d..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/errors/errors.go +++ /dev/null @@ -1,168 +0,0 @@ -package errors - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" -) - -// Externally visible error codes -const ( - CodeUnauthorized = "ERR_UNAUTHORIZED" - CodeBadRequest = "ERR_BAD_REQUEST" - CodeForbidden = "ERR_FORBIDDEN" - CodeNotFound = "ERR_NOT_FOUND" - CodeNotImplemented = "ERR_NOT_IMPLEMENTED" - CodeTimeout = "ERR_TIMEOUT" - CodeInternal = "ERR_INTERNAL" -) - -// ArgoError is an error interface that additionally adds support for -// stack trace, error code, and a JSON representation of the error -type ArgoError interface { - Error() string - Code() string - HTTPCode() int - JSON() []byte -} - -// argoerr is the internal implementation of an Argo error which wraps the error from pkg/errors -type argoerr struct { - code string - message string - err error -} - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(code string, message string) error { - err := errors.New(message) - return argoerr{code, message, err} -} - -// Errorf returns an error and formats according to a format specifier -func Errorf(code string, format string, args ...interface{}) error { - return New(code, fmt.Sprintf(format, args...)) -} - -// InternalError is a convenience function to create a Internal error with a message -func InternalError(message string) error { - return New(CodeInternal, message) -} - -// InternalErrorf is a convenience function to format an Internal error -func InternalErrorf(format string, args ...interface{}) error { - return Errorf(CodeInternal, format, args...) -} - -// InternalWrapError annotates the error with the ERR_INTERNAL code and a stack trace, optional message -func InternalWrapError(err error, message ...string) error { - if len(message) == 0 { - return Wrap(err, CodeInternal, err.Error()) - } - return Wrap(err, CodeInternal, message[0]) -} - -// InternalWrapErrorf annotates the error with the ERR_INTERNAL code and a stack trace, optional message -func InternalWrapErrorf(err error, format string, args ...interface{}) error { - return Wrap(err, CodeInternal, fmt.Sprintf(format, args...)) -} - -// Wrap returns an error annotating err with a stack trace at the point Wrap is called, -// and a new supplied message. The previous original is preserved and accessible via Cause(). -// If err is nil, Wrap returns nil. -func Wrap(err error, code string, message string) error { - if err == nil { - return nil - } - err = fmt.Errorf(message+": %w", err) - return argoerr{code, message, err} -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - if argoErr, ok := err.(argoerr); ok { - return unwrapCauseArgoErr(argoErr.err) - } - return unwrapCause(err) -} - -func unwrapCauseArgoErr(err error) error { - innerErr := errors.Unwrap(err) - for innerErr != nil { - err = innerErr - innerErr = errors.Unwrap(err) - } - return err -} - -func unwrapCause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} - -func (e argoerr) Error() string { - return e.message -} - -func (e argoerr) Code() string { - return e.code -} - -func (e argoerr) JSON() []byte { - type errBean struct { - Code string `json:"code"` - Message string `json:"message"` - } - eb := errBean{e.code, e.message} - j, _ := json.Marshal(eb) - return j -} - -func (e argoerr) HTTPCode() int { - switch e.Code() { - case CodeUnauthorized: - return http.StatusUnauthorized - case CodeForbidden: - return http.StatusForbidden - case CodeNotFound: - return http.StatusNotFound - case CodeBadRequest: - return http.StatusBadRequest - case CodeNotImplemented: - return http.StatusNotImplemented - case CodeTimeout, CodeInternal: - return http.StatusInternalServerError - default: - return http.StatusInternalServerError - } -} - -// IsCode is a helper to determine if the error is of a specific code -func IsCode(code string, err error) bool { - if argoErr, ok := err.(argoerr); ok { - return argoErr.code == code - } - return false -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/common.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/common.go deleted file mode 100644 index cc939aaa7..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/common.go +++ /dev/null @@ -1,53 +0,0 @@ -package workflow - -import ( - "time" - - "k8s.io/client-go/rest" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -type ClientConfig struct { - // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. - // If a URL is given then the (optional) Path of that URL represents a prefix that must - // be appended to all request URIs used to access the apiserver. This allows a frontend - // proxy to easily relocate all of the apiserver endpoints. - Host string - // APIPath is a sub-path that points to an API root. - APIPath string - - // ContentConfig contains settings that affect how objects are transformed when - // sent to the server. - rest.ContentConfig - - // KubeService requires Basic authentication - Username string - Password string - - // KubeService requires Bearer authentication. This client will not attempt to use - // refresh tokens for an OAuth2 flow. - // TODO: demonstrate an OAuth2 compatible client. - BearerToken string - - // Impersonate is the configuration that RESTClient will use for impersonation. - Impersonate rest.ImpersonationConfig - - AuthProvider *clientcmdapi.AuthProviderConfig - - // TLSClientConfig contains settings to enable transport layer security - rest.TLSClientConfig - - // UserAgent is an optional field that specifies the caller of this request. - UserAgent string - - // QPS indicates the maximum QPS to the master from this client. - // If it's zero, the created RESTClient will use DefaultQPS: 5 - QPS float32 - - // Maximum burst for throttle. - // If it's zero, the created RESTClient will use DefaultBurst: 10. - Burst int - - // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. - Timeout time.Duration -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/register.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/register.go deleted file mode 100644 index 82f124aa0..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/register.go +++ /dev/null @@ -1,41 +0,0 @@ -package workflow - -// Workflow constants -const ( - Group string = "argoproj.io" - Version string = "v1alpha1" - APIVersion string = Group + "/" + Version - WorkflowKind string = "Workflow" - WorkflowSingular string = "workflow" - WorkflowPlural string = "workflows" - WorkflowShortName string = "wf" - WorkflowFullName string = WorkflowPlural + "." + Group - WorkflowTemplateKind string = "WorkflowTemplate" - WorkflowTemplateSingular string = "workflowtemplate" - WorkflowTemplatePlural string = "workflowtemplates" - WorkflowTemplateShortName string = "wftmpl" - WorkflowTemplateFullName string = WorkflowTemplatePlural + "." + Group - WorkflowEventBindingPlural string = "workfloweventbindings" - CronWorkflowKind string = "CronWorkflow" - CronWorkflowSingular string = "cronworkflow" - CronWorkflowPlural string = "cronworkflows" - CronWorkflowShortName string = "cronwf" - CronWorkflowFullName string = CronWorkflowPlural + "." + Group - ClusterWorkflowTemplateKind string = "ClusterWorkflowTemplate" - ClusterWorkflowTemplateSingular string = "clusterworkflowtemplate" - ClusterWorkflowTemplatePlural string = "clusterworkflowtemplates" - ClusterWorkflowTemplateShortName string = "cwftmpl" - ClusterWorkflowTemplateFullName string = ClusterWorkflowTemplatePlural + "." + Group - WorkflowEventBindingKind string = "WorkflowEventBinding" - WorkflowTaskSetKind string = "WorkflowTaskSet" - WorkflowTaskSetSingular string = "workflowtaskset" - WorkflowTaskSetPlural string = "workflowtasksets" - WorkflowTaskSetShortName string = "wfts" - WorkflowTaskSetFullName string = WorkflowTaskSetPlural + "." + Group - WorkflowTaskResultKind string = "WorkflowTaskResult" - WorkflowArtifactGCTaskKind string = "WorkflowArtifactGCTask" - WorkflowArtifactGCTaskSingular string = "workflowartifactgctask" - WorkflowArtifactGCTaskPlural string = "workflowartifactgctasks" - WorkflowArtifactGCTaskShortName string = "wfat" - WorkflowArtifactGCTaskFullName string = WorkflowArtifactGCTaskPlural + "." + Group -) diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/amount.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/amount.go deleted file mode 100644 index 7fb40985d..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/amount.go +++ /dev/null @@ -1,33 +0,0 @@ -package v1alpha1 - -import ( - "encoding/json" - "strconv" -) - -// Amount represent a numeric amount. -// +kubebuilder:validation:Type=number -type Amount struct { - Value json.Number `json:"-" protobuf:"bytes,1,opt,name=value,casttype=encoding/json.Number"` -} - -func (a *Amount) UnmarshalJSON(data []byte) error { - a.Value = json.Number(data) - return nil -} - -func (a Amount) MarshalJSON() ([]byte, error) { - return []byte(a.Value), nil -} - -func (a Amount) OpenAPISchemaType() []string { - return []string{"number"} -} - -func (a Amount) OpenAPISchemaFormat() string { - return "" -} - -func (a *Amount) Float64() (float64, error) { - return strconv.ParseFloat(string(a.Value), 64) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/anystring.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/anystring.go deleted file mode 100644 index f3459f31f..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/anystring.go +++ /dev/null @@ -1,52 +0,0 @@ -package v1alpha1 - -import ( - "encoding/json" - "fmt" - "strconv" -) - -// * It's JSON type is just string. -// * It will unmarshall int64, int32, float64, float32, boolean, a plain string and represents it as string. -// * It will marshall back to string - marshalling is not symmetric. -type AnyString string - -func ParseAnyString(val interface{}) AnyString { - return AnyString(fmt.Sprintf("%v", val)) -} - -func AnyStringPtr(val interface{}) *AnyString { - i := ParseAnyString(val) - return &i -} - -func (i *AnyString) UnmarshalJSON(value []byte) error { - var v interface{} - err := json.Unmarshal(value, &v) - if err != nil { - return err - } - switch v := v.(type) { - case float64: - *i = AnyString(strconv.FormatFloat(v, 'f', -1, 64)) - case float32: - *i = AnyString(strconv.FormatFloat(float64(v), 'f', -1, 32)) - case int64: - *i = AnyString(strconv.FormatInt(v, 10)) - case int32: - *i = AnyString(strconv.FormatInt(int64(v), 10)) - case bool: - *i = AnyString(strconv.FormatBool(v)) - case string: - *i = AnyString(v) - } - return nil -} - -func (i AnyString) MarshalJSON() ([]byte, error) { - return json.Marshal(string(i)) -} - -func (i AnyString) String() string { - return string(i) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_gc_task_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_gc_task_types.go deleted file mode 100644 index dbc840d58..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_gc_task_types.go +++ /dev/null @@ -1,63 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// WorkflowArtifactGCTask specifies the Artifacts that need to be deleted as well as the status of deletion -// +genclient -// +kubebuilder:resource:shortName=wfat -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -type WorkflowArtifactGCTask struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Spec ArtifactGCSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - Status ArtifactGCStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ArtifactGCSpec specifies the Artifacts that need to be deleted -type ArtifactGCSpec struct { - // ArtifactsByNode maps Node name to information pertaining to Artifacts on that Node - ArtifactsByNode map[string]ArtifactNodeSpec `json:"artifactsByNode,omitempty" protobuf:"bytes,1,rep,name=artifactsByNode"` -} - -// ArtifactNodeSpec specifies the Artifacts that need to be deleted for a given Node -type ArtifactNodeSpec struct { - // ArchiveLocation is the template-level Artifact location specification - ArchiveLocation *ArtifactLocation `json:"archiveLocation,omitempty" protobuf:"bytes,1,opt,name=archiveLocation"` - // Artifacts maps artifact name to Artifact description - Artifacts map[string]Artifact `json:"artifacts,omitempty" protobuf:"bytes,2,rep,name=artifacts"` -} - -// ArtifactGCStatus describes the result of the deletion -type ArtifactGCStatus struct { - // ArtifactResultsByNode maps Node name to result - ArtifactResultsByNode map[string]ArtifactResultNodeStatus `json:"artifactResultsByNode,omitempty" protobuf:"bytes,1,rep,name=artifactResultsByNode"` -} - -// ArtifactResultNodeStatus describes the result of the deletion on a given node -type ArtifactResultNodeStatus struct { - // ArtifactResults maps Artifact name to result of the deletion - ArtifactResults map[string]ArtifactResult `json:"artifactResults,omitempty" protobuf:"bytes,1,rep,name=artifactResults"` -} - -// ArtifactResult describes the result of attempting to delete a given Artifact -type ArtifactResult struct { - // Name is the name of the Artifact - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // Success describes whether the deletion succeeded - Success bool `json:"success,omitempty" protobuf:"varint,2,opt,name=success"` - - // Error is an optional error message which should be set if Success==false - Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` -} - -// WorkflowArtifactGCTaskList is list of WorkflowArtifactGCTask resources -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type WorkflowArtifactGCTaskList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Items []WorkflowArtifactGCTask `json:"items" protobuf:"bytes,2,opt,name=items"` -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_repository_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_repository_types.go deleted file mode 100644 index 8c2ae9945..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_repository_types.go +++ /dev/null @@ -1,181 +0,0 @@ -package v1alpha1 - -import ( - "fmt" - "path" - "strings" -) - -var ( - // DefaultArchivePattern is the default pattern when storing artifacts in an archive repository - DefaultArchivePattern = "{{workflow.name}}/{{pod.name}}" -) - -// ArtifactRepository represents an artifact repository in which a controller will store its artifacts -type ArtifactRepository struct { - // ArchiveLogs enables log archiving - ArchiveLogs *bool `json:"archiveLogs,omitempty" protobuf:"varint,1,opt,name=archiveLogs"` - // S3 stores artifact in a S3-compliant object store - S3 *S3ArtifactRepository `json:"s3,omitempty" protobuf:"bytes,2,opt,name=s3"` - // Artifactory stores artifacts to JFrog Artifactory - Artifactory *ArtifactoryArtifactRepository `json:"artifactory,omitempty" protobuf:"bytes,3,opt,name=artifactory"` - // HDFS stores artifacts in HDFS - HDFS *HDFSArtifactRepository `json:"hdfs,omitempty" protobuf:"bytes,4,opt,name=hdfs"` - // OSS stores artifact in a OSS-compliant object store - OSS *OSSArtifactRepository `json:"oss,omitempty" protobuf:"bytes,5,opt,name=oss"` - // GCS stores artifact in a GCS object store - GCS *GCSArtifactRepository `json:"gcs,omitempty" protobuf:"bytes,6,opt,name=gcs"` - // Azure stores artifact in an Azure Storage account - Azure *AzureArtifactRepository `json:"azure,omitempty" protobuf:"bytes,7,opt,name=azure"` -} - -func (a *ArtifactRepository) IsArchiveLogs() bool { - return a != nil && a.ArchiveLogs != nil && *a.ArchiveLogs -} - -type ArtifactRepositoryType interface { - IntoArtifactLocation(l *ArtifactLocation) -} - -func (a *ArtifactRepository) Get() ArtifactRepositoryType { - if a == nil { - return nil - } else if a.Artifactory != nil { - return a.Artifactory - } else if a.Azure != nil { - return a.Azure - } else if a.GCS != nil { - return a.GCS - } else if a.HDFS != nil { - return a.HDFS - } else if a.OSS != nil { - return a.OSS - } else if a.S3 != nil { - return a.S3 - } - return nil -} - -// ToArtifactLocation returns the artifact location set with default template key: -// key = `{{workflow.name}}/{{pod.name}}` -func (a *ArtifactRepository) ToArtifactLocation() *ArtifactLocation { - if a == nil { - return nil - } - l := &ArtifactLocation{ArchiveLogs: a.ArchiveLogs} - v := a.Get() - if v != nil { - v.IntoArtifactLocation(l) - } - return l -} - -// S3ArtifactRepository defines the controller configuration for an S3 artifact repository -type S3ArtifactRepository struct { - S3Bucket `json:",inline" protobuf:"bytes,1,opt,name=s3Bucket"` - - // KeyFormat defines the format of how to store keys and can reference workflow variables. - KeyFormat string `json:"keyFormat,omitempty" protobuf:"bytes,2,opt,name=keyFormat"` - - // KeyPrefix is prefix used as part of the bucket key in which the controller will store artifacts. - // DEPRECATED. Use KeyFormat instead - KeyPrefix string `json:"keyPrefix,omitempty" protobuf:"bytes,3,opt,name=keyPrefix"` -} - -func (r *S3ArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { - k := r.KeyFormat - if k == "" { - k = path.Join(r.KeyPrefix, DefaultArchivePattern) - } - l.S3 = &S3Artifact{S3Bucket: r.S3Bucket, Key: k} -} - -// OSSArtifactRepository defines the controller configuration for an OSS artifact repository -type OSSArtifactRepository struct { - OSSBucket `json:",inline" protobuf:"bytes,1,opt,name=oSSBucket"` - - // KeyFormat defines the format of how to store keys and can reference workflow variables. - KeyFormat string `json:"keyFormat,omitempty" protobuf:"bytes,2,opt,name=keyFormat"` -} - -func (r *OSSArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { - k := r.KeyFormat - if k == "" { - k = DefaultArchivePattern - } - l.OSS = &OSSArtifact{OSSBucket: r.OSSBucket, Key: k} -} - -// GCSArtifactRepository defines the controller configuration for a GCS artifact repository -type GCSArtifactRepository struct { - GCSBucket `json:",inline" protobuf:"bytes,1,opt,name=gCSBucket"` - - // KeyFormat defines the format of how to store keys and can reference workflow variables. - KeyFormat string `json:"keyFormat,omitempty" protobuf:"bytes,2,opt,name=keyFormat"` -} - -func (r *GCSArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { - k := r.KeyFormat - if k == "" { - k = DefaultArchivePattern - } - l.GCS = &GCSArtifact{GCSBucket: r.GCSBucket, Key: k} -} - -// ArtifactoryArtifactRepository defines the controller configuration for an artifactory artifact repository -type ArtifactoryArtifactRepository struct { - ArtifactoryAuth `json:",inline" protobuf:"bytes,1,opt,name=artifactoryAuth"` - // RepoURL is the url for artifactory repo. - RepoURL string `json:"repoURL,omitempty" protobuf:"bytes,2,opt,name=repoURL"` - // KeyFormat defines the format of how to store keys and can reference workflow variables. - KeyFormat string `json:"keyFormat,omitempty" protobuf:"bytes,3,opt,name=keyFormat"` -} - -func (r *ArtifactoryArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { - url := r.RepoURL - if !strings.HasSuffix(url, "/") { - url = url + "/" - } - k := r.KeyFormat - if k == "" { - k = DefaultArchivePattern - } - l.Artifactory = &ArtifactoryArtifact{ArtifactoryAuth: r.ArtifactoryAuth, URL: fmt.Sprintf("%s%s", url, k)} -} - -// AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository -type AzureArtifactRepository struct { - AzureBlobContainer `json:",inline" protobuf:"bytes,1,opt,name=blobContainer"` - - // BlobNameFormat is defines the format of how to store blob names. Can reference workflow variables - BlobNameFormat string `json:"blobNameFormat,omitempty" protobuf:"bytes,2,opt,name=blobNameFormat"` -} - -func (r *AzureArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { - k := r.BlobNameFormat - if k == "" { - k = DefaultArchivePattern - } - l.Azure = &AzureArtifact{AzureBlobContainer: r.AzureBlobContainer, Blob: k} -} - -// HDFSArtifactRepository defines the controller configuration for an HDFS artifact repository -type HDFSArtifactRepository struct { - HDFSConfig `json:",inline" protobuf:"bytes,1,opt,name=hDFSConfig"` - - // PathFormat is defines the format of path to store a file. Can reference workflow variables - PathFormat string `json:"pathFormat,omitempty" protobuf:"bytes,2,opt,name=pathFormat"` - - // Force copies a file forcibly even if it exists - Force bool `json:"force,omitempty" protobuf:"varint,3,opt,name=force"` -} - -func (r *HDFSArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { - p := r.PathFormat - if p == "" { - p = DefaultArchivePattern - } - l.HDFS = &HDFSArtifact{HDFSConfig: r.HDFSConfig, Path: p, Force: r.Force} -} - -// MetricsConfig defines a config for a metrics server diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cluster_workflow_template_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cluster_workflow_template_types.go deleted file mode 100644 index a9c27f620..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cluster_workflow_template_types.go +++ /dev/null @@ -1,63 +0,0 @@ -package v1alpha1 - -import ( - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope -// +genclient -// +genclient:noStatus -// +genclient:nonNamespaced -// +kubebuilder:resource:scope=Cluster,shortName=clusterwftmpl;cwft -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type ClusterWorkflowTemplate struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Spec WorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` -} - -type ClusterWorkflowTemplates []ClusterWorkflowTemplate - -func (w ClusterWorkflowTemplates) Len() int { - return len(w) -} - -func (w ClusterWorkflowTemplates) Less(i, j int) bool { - return strings.Compare(w[j].ObjectMeta.Name, w[i].ObjectMeta.Name) > 0 -} - -func (w ClusterWorkflowTemplates) Swap(i, j int) { - w[i], w[j] = w[j], w[i] -} - -// ClusterWorkflowTemplateList is list of ClusterWorkflowTemplate resources -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type ClusterWorkflowTemplateList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Items ClusterWorkflowTemplates `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -var _ TemplateHolder = &ClusterWorkflowTemplate{} - -// GetTemplateByName retrieves a defined template by its name -func (cwftmpl *ClusterWorkflowTemplate) GetTemplateByName(name string) *Template { - for _, t := range cwftmpl.Spec.Templates { - if t.Name == name { - return &t - } - } - return nil -} - -// GetResourceScope returns the template scope of workflow template. -func (cwftmpl *ClusterWorkflowTemplate) GetResourceScope() ResourceScope { - return ResourceScopeCluster -} - -// GetWorkflowSpec returns the WorkflowSpec of cluster workflow template. -func (cwftmpl *ClusterWorkflowTemplate) GetWorkflowSpec() *WorkflowSpec { - return &cwftmpl.Spec -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/common.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/common.go deleted file mode 100644 index 6a7c584b4..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/common.go +++ /dev/null @@ -1,74 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type ResourceScope string - -const ( - ResourceScopeLocal ResourceScope = "local" - ResourceScopeNamespaced ResourceScope = "namespaced" - ResourceScopeCluster ResourceScope = "cluster" -) - -// TemplateHolder is an object that holds templates; e.g. Workflow, WorkflowTemplate, and ClusterWorkflowTemplate -type TemplateHolder interface { - GetNamespace() string - GetName() string - GroupVersionKind() schema.GroupVersionKind - GetTemplateByName(name string) *Template - GetResourceScope() ResourceScope -} - -// WorkflowSpecHolder is an object that holds a WorkflowSpec; e.g., WorkflowTemplate, and ClusterWorkflowTemplate -type WorkflowSpecHolder interface { - metav1.Object - GetWorkflowSpec() *WorkflowSpec -} - -// TemplateReferenceHolder is an object that holds a reference to other templates; e.g. WorkflowStep, DAGTask, and NodeStatus -type TemplateReferenceHolder interface { - // GetTemplate returns the template. This maybe nil. This is first precedence. - GetTemplate() *Template - // GetTemplateRef returns the template ref. This maybe nil. This is second precedence. - GetTemplateRef() *TemplateRef - // GetTemplateName returns the template name. This maybe empty. This is last precedence. - GetTemplateName() string - // GetName returns the name of the template reference holder. - GetName() string - // IsDAGTask returns true if the template reference is a DAGTask. - IsDAGTask() bool - // IsWorkflowStep returns true if the template reference is a WorkflowStep. - IsWorkflowStep() bool -} - -// SubmitOpts are workflow submission options -type SubmitOpts struct { - // Name overrides metadata.name - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // GenerateName overrides metadata.generateName - GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` - // Entrypoint overrides spec.entrypoint - Entrypoint string `json:"entryPoint,omitempty" protobuf:"bytes,4,opt,name=entrypoint"` - // Parameters passes input parameters to workflow - Parameters []string `json:"parameters,omitempty" protobuf:"bytes,5,rep,name=parameters"` - // ServiceAccount runs all pods in the workflow using specified ServiceAccount. - ServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,7,opt,name=serviceAccount"` - // DryRun validates the workflow on the client-side without creating it. This option is not supported in API - DryRun bool `json:"dryRun,omitempty" protobuf:"varint,8,opt,name=dryRun"` - // ServerDryRun validates the workflow on the server-side without creating it - ServerDryRun bool `json:"serverDryRun,omitempty" protobuf:"varint,9,opt,name=serverDryRun"` - // Labels adds to metadata.labels - Labels string `json:"labels,omitempty" protobuf:"bytes,10,opt,name=labels"` - // OwnerReference creates a metadata.ownerReference - OwnerReference *metav1.OwnerReference `json:"ownerReference,omitempty" protobuf:"bytes,11,opt,name=ownerReference"` - // Annotations adds to metadata.labels - Annotations string `json:"annotations,omitempty" protobuf:"bytes,12,opt,name=annotations"` - // Set the podPriorityClassName of the workflow - PodPriorityClassName string `json:"podPriorityClassName,omitempty" protobuf:"bytes,13,opt,name=podPriorityClassName"` - // Priority is used if controller is configured to process limited number of workflows in parallel, higher priority workflows - // are processed first. - Priority *int32 `json:"priority,omitempty" protobuf:"bytes,14,opt,name=priority"` -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/container_set_template_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/container_set_template_types.go deleted file mode 100644 index ac1a4f442..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/container_set_template_types.go +++ /dev/null @@ -1,142 +0,0 @@ -package v1alpha1 - -import ( - "fmt" - "time" - - corev1 "k8s.io/api/core/v1" - intstr "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" -) - -type ContainerSetTemplate struct { - Containers []ContainerNode `json:"containers" protobuf:"bytes,4,rep,name=containers"` - VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,3,rep,name=volumeMounts"` - // RetryStrategy describes how to retry container nodes if the container set fails. - // Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers. - RetryStrategy *ContainerSetRetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,5,opt,name=retryStrategy"` -} - -// ContainerSetRetryStrategy provides controls on how to retry a container set -type ContainerSetRetryStrategy struct { - // Duration is the time between each retry, examples values are "300ms", "1s" or "5m". - // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". - Duration string `json:"duration,omitempty" protobuf:"bytes,1,opt,name=duration"` - // Retries is the maximum number of retry attempts for each container. It does not include the - // first, original attempt; the maximum number of total attempts will be `retries + 1`. - Retries *intstr.IntOrString `json:"retries" protobuf:"bytes,2,rep,name=retries"` -} - -func (t *ContainerSetTemplate) GetRetryStrategy() (wait.Backoff, error) { - if t == nil || t.RetryStrategy == nil || t.RetryStrategy.Retries == nil { - return wait.Backoff{Steps: 1}, nil - } - - backoff := wait.Backoff{Steps: t.RetryStrategy.Retries.IntValue()} - - if t.RetryStrategy.Duration == "" { - return backoff, nil - } - - baseDuration, err := time.ParseDuration(t.RetryStrategy.Duration) - if err != nil { - return wait.Backoff{}, err - } - - if baseDuration < time.Duration(0) { - return wait.Backoff{}, fmt.Errorf("duration has to be positive, current duration: %v ", baseDuration) - } - - backoff.Duration = baseDuration - return backoff, nil -} - -func (in *ContainerSetTemplate) GetContainers() []corev1.Container { - var ctrs []corev1.Container - for _, t := range in.GetGraph() { - c := t.Container - c.VolumeMounts = append(c.VolumeMounts, in.VolumeMounts...) - ctrs = append(ctrs, c) - } - return ctrs -} - -func (in *ContainerSetTemplate) HasContainerNamed(n string) bool { - for _, c := range in.GetContainers() { - if n == c.Name { - return true - } - } - return false -} - -func (in *ContainerSetTemplate) GetGraph() []ContainerNode { - if in == nil { - return nil - } - return in.Containers -} - -func (in *ContainerSetTemplate) HasSequencedContainers() bool { - for _, n := range in.GetGraph() { - if len(n.Dependencies) > 0 { - return true - } - } - return false -} - -// Validate checks if the ContainerSetTemplate is valid -func (in *ContainerSetTemplate) Validate() error { - if len(in.Containers) == 0 { - return fmt.Errorf("containers must have at least one container") - } - - names := make([]string, 0) - for _, ctr := range in.Containers { - names = append(names, ctr.Name) - } - err := validateWorkflowFieldNames(names, false) - if err != nil { - return fmt.Errorf("containers%s", err.Error()) - } - - // Ensure there are no collisions with volume mountPaths and artifact load paths - mountPaths := make(map[string]string) - for i, volMount := range in.VolumeMounts { - if prev, ok := mountPaths[volMount.MountPath]; ok { - return fmt.Errorf("volumeMounts[%d].mountPath '%s' already mounted in %s", i, volMount.MountPath, prev) - } - mountPaths[volMount.MountPath] = fmt.Sprintf("volumeMounts.%s", volMount.Name) - } - - // Ensure the dependencies are defined - nameToContainer := make(map[string]ContainerNode) - for _, ctr := range in.Containers { - nameToContainer[ctr.Name] = ctr - } - for _, ctr := range in.Containers { - for _, depName := range ctr.Dependencies { - _, ok := nameToContainer[depName] - if !ok { - return fmt.Errorf("containers.%s dependency '%s' not defined", ctr.Name, depName) - } - } - } - - // Ensure there is no dependency cycle - depGraph := make(map[string][]string) - for _, ctr := range in.Containers { - depGraph[ctr.Name] = append(depGraph[ctr.Name], ctr.Dependencies...) - } - err = validateNoCycles(depGraph) - if err != nil { - return fmt.Errorf("containers %s", err.Error()) - } - return nil -} - -type ContainerNode struct { - corev1.Container `json:",inline" protobuf:"bytes,1,opt,name=container"` - Dependencies []string `json:"dependencies,omitempty" protobuf:"bytes,2,rep,name=dependencies"` -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cron_workflow_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cron_workflow_types.go deleted file mode 100644 index 8a420d978..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/cron_workflow_types.go +++ /dev/null @@ -1,112 +0,0 @@ -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow" -) - -// CronWorkflow is the definition of a scheduled workflow resource -// +genclient -// +genclient:noStatus -// +kubebuilder:resource:shortName=cwf;cronwf -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type CronWorkflow struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Spec CronWorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - Status CronWorkflowStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// CronWorkflowList is list of CronWorkflow resources -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type CronWorkflowList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Items []CronWorkflow `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -type ConcurrencyPolicy string - -const ( - AllowConcurrent ConcurrencyPolicy = "Allow" - ForbidConcurrent ConcurrencyPolicy = "Forbid" - ReplaceConcurrent ConcurrencyPolicy = "Replace" -) - -const annotationKeyLatestSchedule = workflow.CronWorkflowFullName + "/last-used-schedule" - -// CronWorkflowSpec is the specification of a CronWorkflow -type CronWorkflowSpec struct { - // WorkflowSpec is the spec of the workflow to be run - WorkflowSpec WorkflowSpec `json:"workflowSpec" protobuf:"bytes,1,opt,name=workflowSpec,casttype=WorkflowSpec"` - // Schedule is a schedule to run the Workflow in Cron format - Schedule string `json:"schedule" protobuf:"bytes,2,opt,name=schedule"` - // ConcurrencyPolicy is the K8s-style concurrency policy that will be used - ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` - // Suspend is a flag that will stop new CronWorkflows from running if set to true - Suspend bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"` - // StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its - // original scheduled time if it is missed. - StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=startingDeadlineSeconds"` - // SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time - SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"` - // FailedJobsHistoryLimit is the number of failed jobs to be kept at a time - FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"` - // Timezone is the timezone against which the cron schedule will be calculated, e.g. "Asia/Tokyo". Default is machine's local time. - Timezone string `json:"timezone,omitempty" protobuf:"bytes,8,opt,name=timezone"` - // WorkflowMetadata contains some metadata of the workflow to be run - WorkflowMetadata *metav1.ObjectMeta `json:"workflowMetadata,omitempty" protobuf:"bytes,9,opt,name=workflowMeta"` -} - -// CronWorkflowStatus is the status of a CronWorkflow -type CronWorkflowStatus struct { - // Active is a list of active workflows stemming from this CronWorkflow - Active []v1.ObjectReference `json:"active" protobuf:"bytes,1,rep,name=active"` - // LastScheduleTime is the last time the CronWorkflow was scheduled - LastScheduledTime *metav1.Time `json:"lastScheduledTime" protobuf:"bytes,2,opt,name=lastScheduledTime"` - // Conditions is a list of conditions the CronWorkflow may have - Conditions Conditions `json:"conditions" protobuf:"bytes,3,rep,name=conditions"` -} - -func (c *CronWorkflow) IsUsingNewSchedule() bool { - lastUsedSchedule, exists := c.Annotations[annotationKeyLatestSchedule] - // If last-used-schedule does not exist, or if it does not match the current schedule then the CronWorkflow schedule - // was just updated - return !exists || lastUsedSchedule != c.Spec.GetScheduleString() -} - -func (c *CronWorkflow) SetSchedule(schedule string) { - if c.Annotations == nil { - c.Annotations = map[string]string{} - } - c.Annotations[annotationKeyLatestSchedule] = schedule -} - -func (c *CronWorkflow) GetLatestSchedule() string { - return c.Annotations[annotationKeyLatestSchedule] -} - -func (c *CronWorkflowSpec) GetScheduleString() string { - scheduleString := c.Schedule - if c.Timezone != "" { - scheduleString = "CRON_TZ=" + c.Timezone + " " + scheduleString - } - return scheduleString -} - -func (c *CronWorkflowStatus) HasActiveUID(uid types.UID) bool { - for _, ref := range c.Active { - if uid == ref.UID { - return true - } - } - return false -} - -const ( - // ConditionTypeSubmissionError signifies that there was an error when submitting the CronWorkflow as a Workflow - ConditionTypeSubmissionError ConditionType = "SubmissionError" -) diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/data_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/data_types.go deleted file mode 100644 index 135cae559..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/data_types.go +++ /dev/null @@ -1,40 +0,0 @@ -package v1alpha1 - -// Data is a data template -type Data struct { - // Source sources external data into a data template - Source DataSource `json:"source" protobuf:"bytes,1,opt,name=source"` - - // Transformation applies a set of transformations - Transformation Transformation `json:"transformation" protobuf:"bytes,2,rep,name=transformation"` -} - -func (ds *DataSource) GetArtifactIfNeeded() (*Artifact, bool) { - if ds.ArtifactPaths != nil { - return &ds.ArtifactPaths.Artifact, true - } - return nil, false -} - -type Transformation []TransformationStep - -type TransformationStep struct { - // Expression defines an expr expression to apply - Expression string `json:"expression" protobuf:"bytes,1,opt,name=expression"` -} - -// DataSource sources external data into a data template -type DataSource struct { - // ArtifactPaths is a data transformation that collects a list of artifact paths - ArtifactPaths *ArtifactPaths `json:"artifactPaths,omitempty" protobuf:"bytes,1,opt,name=artifactPaths"` -} - -// ArtifactPaths expands a step from a collection of artifacts -type ArtifactPaths struct { - // Artifact is the artifact location from which to source the artifacts, it can be a directory - Artifact `json:",inline" protobuf:"bytes,1,opt,name=artifact"` -} - -type DataSourceProcessor interface { - ProcessArtifactPaths(*ArtifactPaths) (interface{}, error) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/doc.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/doc.go deleted file mode 100644 index c418575ac..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=argoproj.io -// +k8s:deepcopy-gen=package,register -// +k8s:openapi-gen=true -package v1alpha1 diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/estimated_duration.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/estimated_duration.go deleted file mode 100644 index cebc70871..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/estimated_duration.go +++ /dev/null @@ -1,14 +0,0 @@ -package v1alpha1 - -import "time" - -// EstimatedDuration is in seconds. -type EstimatedDuration int - -func (d EstimatedDuration) ToDuration() time.Duration { - return time.Second * time.Duration(d) -} - -func NewEstimatedDuration(d time.Duration) EstimatedDuration { - return EstimatedDuration(d.Seconds()) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/event_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/event_types.go deleted file mode 100644 index d6f49b0d8..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/event_types.go +++ /dev/null @@ -1,48 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// WorkflowEventBinding is the definition of an event resource -// +genclient -// +genclient:noStatus -// +kubebuilder:resource:shortName=wfeb -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type WorkflowEventBinding struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Spec WorkflowEventBindingSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` -} - -// WorkflowEventBindingList is list of event resources -// +kubebuilder:resource:shortName=wfebs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type WorkflowEventBindingList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Items []WorkflowEventBinding `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -type WorkflowEventBindingSpec struct { - // Event is the event to bind to - Event Event `json:"event" protobuf:"bytes,1,opt,name=event"` - // Submit is the workflow template to submit - Submit *Submit `json:"submit,omitempty" protobuf:"bytes,2,opt,name=submit"` -} - -type Event struct { - // Selector (https://github.com/expr-lang/expr) that we must must match the event. E.g. `payload.message == "test"` - Selector string `json:"selector" protobuf:"bytes,1,opt,name=selector"` -} - -type Submit struct { - // WorkflowTemplateRef the workflow template to submit - WorkflowTemplateRef WorkflowTemplateRef `json:"workflowTemplateRef" protobuf:"bytes,1,opt,name=workflowTemplateRef"` - - // Metadata optional means to customize select fields of the workflow metadata - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` - - // Arguments extracted from the event and then set as arguments to the workflow created. - Arguments *Arguments `json:"arguments,omitempty" protobuf:"bytes,2,opt,name=arguments"` -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.pb.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.pb.go deleted file mode 100644 index 57a5a70e9..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.pb.go +++ /dev/null @@ -1,47361 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto - -package v1alpha1 - -import ( - encoding_json "encoding/json" - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - v12 "k8s.io/api/policy/v1" - k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v11 "k8s.io/apimachinery/pkg/apis/meta/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *Amount) Reset() { *m = Amount{} } -func (*Amount) ProtoMessage() {} -func (*Amount) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{0} -} -func (m *Amount) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Amount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Amount) XXX_Merge(src proto.Message) { - xxx_messageInfo_Amount.Merge(m, src) -} -func (m *Amount) XXX_Size() int { - return m.Size() -} -func (m *Amount) XXX_DiscardUnknown() { - xxx_messageInfo_Amount.DiscardUnknown(m) -} - -var xxx_messageInfo_Amount proto.InternalMessageInfo - -func (m *ArchiveStrategy) Reset() { *m = ArchiveStrategy{} } -func (*ArchiveStrategy) ProtoMessage() {} -func (*ArchiveStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{1} -} -func (m *ArchiveStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArchiveStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArchiveStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArchiveStrategy.Merge(m, src) -} -func (m *ArchiveStrategy) XXX_Size() int { - return m.Size() -} -func (m *ArchiveStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_ArchiveStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_ArchiveStrategy proto.InternalMessageInfo - -func (m *Arguments) Reset() { *m = Arguments{} } -func (*Arguments) ProtoMessage() {} -func (*Arguments) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{2} -} -func (m *Arguments) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Arguments) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Arguments) XXX_Merge(src proto.Message) { - xxx_messageInfo_Arguments.Merge(m, src) -} -func (m *Arguments) XXX_Size() int { - return m.Size() -} -func (m *Arguments) XXX_DiscardUnknown() { - xxx_messageInfo_Arguments.DiscardUnknown(m) -} - -var xxx_messageInfo_Arguments proto.InternalMessageInfo - -func (m *ArtGCStatus) Reset() { *m = ArtGCStatus{} } -func (*ArtGCStatus) ProtoMessage() {} -func (*ArtGCStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{3} -} -func (m *ArtGCStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtGCStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtGCStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtGCStatus.Merge(m, src) -} -func (m *ArtGCStatus) XXX_Size() int { - return m.Size() -} -func (m *ArtGCStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ArtGCStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtGCStatus proto.InternalMessageInfo - -func (m *Artifact) Reset() { *m = Artifact{} } -func (*Artifact) ProtoMessage() {} -func (*Artifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{4} -} -func (m *Artifact) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Artifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Artifact) XXX_Merge(src proto.Message) { - xxx_messageInfo_Artifact.Merge(m, src) -} -func (m *Artifact) XXX_Size() int { - return m.Size() -} -func (m *Artifact) XXX_DiscardUnknown() { - xxx_messageInfo_Artifact.DiscardUnknown(m) -} - -var xxx_messageInfo_Artifact proto.InternalMessageInfo - -func (m *ArtifactGC) Reset() { *m = ArtifactGC{} } -func (*ArtifactGC) ProtoMessage() {} -func (*ArtifactGC) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{5} -} -func (m *ArtifactGC) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactGC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactGC) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactGC.Merge(m, src) -} -func (m *ArtifactGC) XXX_Size() int { - return m.Size() -} -func (m *ArtifactGC) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactGC.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactGC proto.InternalMessageInfo - -func (m *ArtifactGCSpec) Reset() { *m = ArtifactGCSpec{} } -func (*ArtifactGCSpec) ProtoMessage() {} -func (*ArtifactGCSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{6} -} -func (m *ArtifactGCSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactGCSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactGCSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactGCSpec.Merge(m, src) -} -func (m *ArtifactGCSpec) XXX_Size() int { - return m.Size() -} -func (m *ArtifactGCSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactGCSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactGCSpec proto.InternalMessageInfo - -func (m *ArtifactGCStatus) Reset() { *m = ArtifactGCStatus{} } -func (*ArtifactGCStatus) ProtoMessage() {} -func (*ArtifactGCStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{7} -} -func (m *ArtifactGCStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactGCStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactGCStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactGCStatus.Merge(m, src) -} -func (m *ArtifactGCStatus) XXX_Size() int { - return m.Size() -} -func (m *ArtifactGCStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactGCStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactGCStatus proto.InternalMessageInfo - -func (m *ArtifactLocation) Reset() { *m = ArtifactLocation{} } -func (*ArtifactLocation) ProtoMessage() {} -func (*ArtifactLocation) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{8} -} -func (m *ArtifactLocation) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactLocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactLocation) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactLocation.Merge(m, src) -} -func (m *ArtifactLocation) XXX_Size() int { - return m.Size() -} -func (m *ArtifactLocation) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactLocation.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactLocation proto.InternalMessageInfo - -func (m *ArtifactNodeSpec) Reset() { *m = ArtifactNodeSpec{} } -func (*ArtifactNodeSpec) ProtoMessage() {} -func (*ArtifactNodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{9} -} -func (m *ArtifactNodeSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactNodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactNodeSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactNodeSpec.Merge(m, src) -} -func (m *ArtifactNodeSpec) XXX_Size() int { - return m.Size() -} -func (m *ArtifactNodeSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactNodeSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactNodeSpec proto.InternalMessageInfo - -func (m *ArtifactPaths) Reset() { *m = ArtifactPaths{} } -func (*ArtifactPaths) ProtoMessage() {} -func (*ArtifactPaths) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{10} -} -func (m *ArtifactPaths) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactPaths) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactPaths.Merge(m, src) -} -func (m *ArtifactPaths) XXX_Size() int { - return m.Size() -} -func (m *ArtifactPaths) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactPaths.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactPaths proto.InternalMessageInfo - -func (m *ArtifactRepository) Reset() { *m = ArtifactRepository{} } -func (*ArtifactRepository) ProtoMessage() {} -func (*ArtifactRepository) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{11} -} -func (m *ArtifactRepository) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactRepository) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactRepository.Merge(m, src) -} -func (m *ArtifactRepository) XXX_Size() int { - return m.Size() -} -func (m *ArtifactRepository) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactRepository.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactRepository proto.InternalMessageInfo - -func (m *ArtifactRepositoryRef) Reset() { *m = ArtifactRepositoryRef{} } -func (*ArtifactRepositoryRef) ProtoMessage() {} -func (*ArtifactRepositoryRef) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{12} -} -func (m *ArtifactRepositoryRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactRepositoryRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactRepositoryRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactRepositoryRef.Merge(m, src) -} -func (m *ArtifactRepositoryRef) XXX_Size() int { - return m.Size() -} -func (m *ArtifactRepositoryRef) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactRepositoryRef.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactRepositoryRef proto.InternalMessageInfo - -func (m *ArtifactRepositoryRefStatus) Reset() { *m = ArtifactRepositoryRefStatus{} } -func (*ArtifactRepositoryRefStatus) ProtoMessage() {} -func (*ArtifactRepositoryRefStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{13} -} -func (m *ArtifactRepositoryRefStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactRepositoryRefStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactRepositoryRefStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactRepositoryRefStatus.Merge(m, src) -} -func (m *ArtifactRepositoryRefStatus) XXX_Size() int { - return m.Size() -} -func (m *ArtifactRepositoryRefStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactRepositoryRefStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactRepositoryRefStatus proto.InternalMessageInfo - -func (m *ArtifactResult) Reset() { *m = ArtifactResult{} } -func (*ArtifactResult) ProtoMessage() {} -func (*ArtifactResult) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{14} -} -func (m *ArtifactResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactResult.Merge(m, src) -} -func (m *ArtifactResult) XXX_Size() int { - return m.Size() -} -func (m *ArtifactResult) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactResult.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactResult proto.InternalMessageInfo - -func (m *ArtifactResultNodeStatus) Reset() { *m = ArtifactResultNodeStatus{} } -func (*ArtifactResultNodeStatus) ProtoMessage() {} -func (*ArtifactResultNodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{15} -} -func (m *ArtifactResultNodeStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactResultNodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactResultNodeStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactResultNodeStatus.Merge(m, src) -} -func (m *ArtifactResultNodeStatus) XXX_Size() int { - return m.Size() -} -func (m *ArtifactResultNodeStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactResultNodeStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactResultNodeStatus proto.InternalMessageInfo - -func (m *ArtifactSearchQuery) Reset() { *m = ArtifactSearchQuery{} } -func (*ArtifactSearchQuery) ProtoMessage() {} -func (*ArtifactSearchQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{16} -} -func (m *ArtifactSearchQuery) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactSearchQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactSearchQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactSearchQuery.Merge(m, src) -} -func (m *ArtifactSearchQuery) XXX_Size() int { - return m.Size() -} -func (m *ArtifactSearchQuery) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactSearchQuery.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactSearchQuery proto.InternalMessageInfo - -func (m *ArtifactSearchResult) Reset() { *m = ArtifactSearchResult{} } -func (*ArtifactSearchResult) ProtoMessage() {} -func (*ArtifactSearchResult) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{17} -} -func (m *ArtifactSearchResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactSearchResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactSearchResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactSearchResult.Merge(m, src) -} -func (m *ArtifactSearchResult) XXX_Size() int { - return m.Size() -} -func (m *ArtifactSearchResult) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactSearchResult.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactSearchResult proto.InternalMessageInfo - -func (m *ArtifactoryArtifact) Reset() { *m = ArtifactoryArtifact{} } -func (*ArtifactoryArtifact) ProtoMessage() {} -func (*ArtifactoryArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{18} -} -func (m *ArtifactoryArtifact) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactoryArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactoryArtifact) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactoryArtifact.Merge(m, src) -} -func (m *ArtifactoryArtifact) XXX_Size() int { - return m.Size() -} -func (m *ArtifactoryArtifact) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactoryArtifact.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactoryArtifact proto.InternalMessageInfo - -func (m *ArtifactoryArtifactRepository) Reset() { *m = ArtifactoryArtifactRepository{} } -func (*ArtifactoryArtifactRepository) ProtoMessage() {} -func (*ArtifactoryArtifactRepository) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{19} -} -func (m *ArtifactoryArtifactRepository) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactoryArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactoryArtifactRepository) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactoryArtifactRepository.Merge(m, src) -} -func (m *ArtifactoryArtifactRepository) XXX_Size() int { - return m.Size() -} -func (m *ArtifactoryArtifactRepository) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactoryArtifactRepository.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactoryArtifactRepository proto.InternalMessageInfo - -func (m *ArtifactoryAuth) Reset() { *m = ArtifactoryAuth{} } -func (*ArtifactoryAuth) ProtoMessage() {} -func (*ArtifactoryAuth) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{20} -} -func (m *ArtifactoryAuth) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ArtifactoryAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ArtifactoryAuth) XXX_Merge(src proto.Message) { - xxx_messageInfo_ArtifactoryAuth.Merge(m, src) -} -func (m *ArtifactoryAuth) XXX_Size() int { - return m.Size() -} -func (m *ArtifactoryAuth) XXX_DiscardUnknown() { - xxx_messageInfo_ArtifactoryAuth.DiscardUnknown(m) -} - -var xxx_messageInfo_ArtifactoryAuth proto.InternalMessageInfo - -func (m *AzureArtifact) Reset() { *m = AzureArtifact{} } -func (*AzureArtifact) ProtoMessage() {} -func (*AzureArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{21} -} -func (m *AzureArtifact) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AzureArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AzureArtifact) XXX_Merge(src proto.Message) { - xxx_messageInfo_AzureArtifact.Merge(m, src) -} -func (m *AzureArtifact) XXX_Size() int { - return m.Size() -} -func (m *AzureArtifact) XXX_DiscardUnknown() { - xxx_messageInfo_AzureArtifact.DiscardUnknown(m) -} - -var xxx_messageInfo_AzureArtifact proto.InternalMessageInfo - -func (m *AzureArtifactRepository) Reset() { *m = AzureArtifactRepository{} } -func (*AzureArtifactRepository) ProtoMessage() {} -func (*AzureArtifactRepository) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{22} -} -func (m *AzureArtifactRepository) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AzureArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AzureArtifactRepository) XXX_Merge(src proto.Message) { - xxx_messageInfo_AzureArtifactRepository.Merge(m, src) -} -func (m *AzureArtifactRepository) XXX_Size() int { - return m.Size() -} -func (m *AzureArtifactRepository) XXX_DiscardUnknown() { - xxx_messageInfo_AzureArtifactRepository.DiscardUnknown(m) -} - -var xxx_messageInfo_AzureArtifactRepository proto.InternalMessageInfo - -func (m *AzureBlobContainer) Reset() { *m = AzureBlobContainer{} } -func (*AzureBlobContainer) ProtoMessage() {} -func (*AzureBlobContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{23} -} -func (m *AzureBlobContainer) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AzureBlobContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AzureBlobContainer) XXX_Merge(src proto.Message) { - xxx_messageInfo_AzureBlobContainer.Merge(m, src) -} -func (m *AzureBlobContainer) XXX_Size() int { - return m.Size() -} -func (m *AzureBlobContainer) XXX_DiscardUnknown() { - xxx_messageInfo_AzureBlobContainer.DiscardUnknown(m) -} - -var xxx_messageInfo_AzureBlobContainer proto.InternalMessageInfo - -func (m *Backoff) Reset() { *m = Backoff{} } -func (*Backoff) ProtoMessage() {} -func (*Backoff) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{24} -} -func (m *Backoff) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Backoff) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Backoff) XXX_Merge(src proto.Message) { - xxx_messageInfo_Backoff.Merge(m, src) -} -func (m *Backoff) XXX_Size() int { - return m.Size() -} -func (m *Backoff) XXX_DiscardUnknown() { - xxx_messageInfo_Backoff.DiscardUnknown(m) -} - -var xxx_messageInfo_Backoff proto.InternalMessageInfo - -func (m *BasicAuth) Reset() { *m = BasicAuth{} } -func (*BasicAuth) ProtoMessage() {} -func (*BasicAuth) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{25} -} -func (m *BasicAuth) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BasicAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *BasicAuth) XXX_Merge(src proto.Message) { - xxx_messageInfo_BasicAuth.Merge(m, src) -} -func (m *BasicAuth) XXX_Size() int { - return m.Size() -} -func (m *BasicAuth) XXX_DiscardUnknown() { - xxx_messageInfo_BasicAuth.DiscardUnknown(m) -} - -var xxx_messageInfo_BasicAuth proto.InternalMessageInfo - -func (m *Cache) Reset() { *m = Cache{} } -func (*Cache) ProtoMessage() {} -func (*Cache) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{26} -} -func (m *Cache) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Cache) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Cache) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cache.Merge(m, src) -} -func (m *Cache) XXX_Size() int { - return m.Size() -} -func (m *Cache) XXX_DiscardUnknown() { - xxx_messageInfo_Cache.DiscardUnknown(m) -} - -var xxx_messageInfo_Cache proto.InternalMessageInfo - -func (m *ClientCertAuth) Reset() { *m = ClientCertAuth{} } -func (*ClientCertAuth) ProtoMessage() {} -func (*ClientCertAuth) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{27} -} -func (m *ClientCertAuth) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClientCertAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClientCertAuth) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientCertAuth.Merge(m, src) -} -func (m *ClientCertAuth) XXX_Size() int { - return m.Size() -} -func (m *ClientCertAuth) XXX_DiscardUnknown() { - xxx_messageInfo_ClientCertAuth.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientCertAuth proto.InternalMessageInfo - -func (m *ClusterWorkflowTemplate) Reset() { *m = ClusterWorkflowTemplate{} } -func (*ClusterWorkflowTemplate) ProtoMessage() {} -func (*ClusterWorkflowTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{28} -} -func (m *ClusterWorkflowTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterWorkflowTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterWorkflowTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterWorkflowTemplate.Merge(m, src) -} -func (m *ClusterWorkflowTemplate) XXX_Size() int { - return m.Size() -} -func (m *ClusterWorkflowTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterWorkflowTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterWorkflowTemplate proto.InternalMessageInfo - -func (m *ClusterWorkflowTemplateList) Reset() { *m = ClusterWorkflowTemplateList{} } -func (*ClusterWorkflowTemplateList) ProtoMessage() {} -func (*ClusterWorkflowTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{29} -} -func (m *ClusterWorkflowTemplateList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterWorkflowTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterWorkflowTemplateList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterWorkflowTemplateList.Merge(m, src) -} -func (m *ClusterWorkflowTemplateList) XXX_Size() int { - return m.Size() -} -func (m *ClusterWorkflowTemplateList) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterWorkflowTemplateList.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterWorkflowTemplateList proto.InternalMessageInfo - -func (m *Column) Reset() { *m = Column{} } -func (*Column) ProtoMessage() {} -func (*Column) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{30} -} -func (m *Column) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Column) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Column) XXX_Merge(src proto.Message) { - xxx_messageInfo_Column.Merge(m, src) -} -func (m *Column) XXX_Size() int { - return m.Size() -} -func (m *Column) XXX_DiscardUnknown() { - xxx_messageInfo_Column.DiscardUnknown(m) -} - -var xxx_messageInfo_Column proto.InternalMessageInfo - -func (m *Condition) Reset() { *m = Condition{} } -func (*Condition) ProtoMessage() {} -func (*Condition) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{31} -} -func (m *Condition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Condition) XXX_Merge(src proto.Message) { - xxx_messageInfo_Condition.Merge(m, src) -} -func (m *Condition) XXX_Size() int { - return m.Size() -} -func (m *Condition) XXX_DiscardUnknown() { - xxx_messageInfo_Condition.DiscardUnknown(m) -} - -var xxx_messageInfo_Condition proto.InternalMessageInfo - -func (m *ContainerNode) Reset() { *m = ContainerNode{} } -func (*ContainerNode) ProtoMessage() {} -func (*ContainerNode) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{32} -} -func (m *ContainerNode) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContainerNode) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerNode.Merge(m, src) -} -func (m *ContainerNode) XXX_Size() int { - return m.Size() -} -func (m *ContainerNode) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerNode.DiscardUnknown(m) -} - -var xxx_messageInfo_ContainerNode proto.InternalMessageInfo - -func (m *ContainerSetRetryStrategy) Reset() { *m = ContainerSetRetryStrategy{} } -func (*ContainerSetRetryStrategy) ProtoMessage() {} -func (*ContainerSetRetryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{33} -} -func (m *ContainerSetRetryStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerSetRetryStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContainerSetRetryStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerSetRetryStrategy.Merge(m, src) -} -func (m *ContainerSetRetryStrategy) XXX_Size() int { - return m.Size() -} -func (m *ContainerSetRetryStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerSetRetryStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_ContainerSetRetryStrategy proto.InternalMessageInfo - -func (m *ContainerSetTemplate) Reset() { *m = ContainerSetTemplate{} } -func (*ContainerSetTemplate) ProtoMessage() {} -func (*ContainerSetTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{34} -} -func (m *ContainerSetTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerSetTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContainerSetTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerSetTemplate.Merge(m, src) -} -func (m *ContainerSetTemplate) XXX_Size() int { - return m.Size() -} -func (m *ContainerSetTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerSetTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_ContainerSetTemplate proto.InternalMessageInfo - -func (m *ContinueOn) Reset() { *m = ContinueOn{} } -func (*ContinueOn) ProtoMessage() {} -func (*ContinueOn) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{35} -} -func (m *ContinueOn) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContinueOn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContinueOn) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContinueOn.Merge(m, src) -} -func (m *ContinueOn) XXX_Size() int { - return m.Size() -} -func (m *ContinueOn) XXX_DiscardUnknown() { - xxx_messageInfo_ContinueOn.DiscardUnknown(m) -} - -var xxx_messageInfo_ContinueOn proto.InternalMessageInfo - -func (m *Counter) Reset() { *m = Counter{} } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{36} -} -func (m *Counter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(m, src) -} -func (m *Counter) XXX_Size() int { - return m.Size() -} -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) -} - -var xxx_messageInfo_Counter proto.InternalMessageInfo - -func (m *CreateS3BucketOptions) Reset() { *m = CreateS3BucketOptions{} } -func (*CreateS3BucketOptions) ProtoMessage() {} -func (*CreateS3BucketOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{37} -} -func (m *CreateS3BucketOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateS3BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CreateS3BucketOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateS3BucketOptions.Merge(m, src) -} -func (m *CreateS3BucketOptions) XXX_Size() int { - return m.Size() -} -func (m *CreateS3BucketOptions) XXX_DiscardUnknown() { - xxx_messageInfo_CreateS3BucketOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateS3BucketOptions proto.InternalMessageInfo - -func (m *CronWorkflow) Reset() { *m = CronWorkflow{} } -func (*CronWorkflow) ProtoMessage() {} -func (*CronWorkflow) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{38} -} -func (m *CronWorkflow) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CronWorkflow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CronWorkflow) XXX_Merge(src proto.Message) { - xxx_messageInfo_CronWorkflow.Merge(m, src) -} -func (m *CronWorkflow) XXX_Size() int { - return m.Size() -} -func (m *CronWorkflow) XXX_DiscardUnknown() { - xxx_messageInfo_CronWorkflow.DiscardUnknown(m) -} - -var xxx_messageInfo_CronWorkflow proto.InternalMessageInfo - -func (m *CronWorkflowList) Reset() { *m = CronWorkflowList{} } -func (*CronWorkflowList) ProtoMessage() {} -func (*CronWorkflowList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{39} -} -func (m *CronWorkflowList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CronWorkflowList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CronWorkflowList) XXX_Merge(src proto.Message) { - xxx_messageInfo_CronWorkflowList.Merge(m, src) -} -func (m *CronWorkflowList) XXX_Size() int { - return m.Size() -} -func (m *CronWorkflowList) XXX_DiscardUnknown() { - xxx_messageInfo_CronWorkflowList.DiscardUnknown(m) -} - -var xxx_messageInfo_CronWorkflowList proto.InternalMessageInfo - -func (m *CronWorkflowSpec) Reset() { *m = CronWorkflowSpec{} } -func (*CronWorkflowSpec) ProtoMessage() {} -func (*CronWorkflowSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{40} -} -func (m *CronWorkflowSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CronWorkflowSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CronWorkflowSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_CronWorkflowSpec.Merge(m, src) -} -func (m *CronWorkflowSpec) XXX_Size() int { - return m.Size() -} -func (m *CronWorkflowSpec) XXX_DiscardUnknown() { - xxx_messageInfo_CronWorkflowSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_CronWorkflowSpec proto.InternalMessageInfo - -func (m *CronWorkflowStatus) Reset() { *m = CronWorkflowStatus{} } -func (*CronWorkflowStatus) ProtoMessage() {} -func (*CronWorkflowStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{41} -} -func (m *CronWorkflowStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CronWorkflowStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CronWorkflowStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_CronWorkflowStatus.Merge(m, src) -} -func (m *CronWorkflowStatus) XXX_Size() int { - return m.Size() -} -func (m *CronWorkflowStatus) XXX_DiscardUnknown() { - xxx_messageInfo_CronWorkflowStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_CronWorkflowStatus proto.InternalMessageInfo - -func (m *DAGTask) Reset() { *m = DAGTask{} } -func (*DAGTask) ProtoMessage() {} -func (*DAGTask) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{42} -} -func (m *DAGTask) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DAGTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DAGTask) XXX_Merge(src proto.Message) { - xxx_messageInfo_DAGTask.Merge(m, src) -} -func (m *DAGTask) XXX_Size() int { - return m.Size() -} -func (m *DAGTask) XXX_DiscardUnknown() { - xxx_messageInfo_DAGTask.DiscardUnknown(m) -} - -var xxx_messageInfo_DAGTask proto.InternalMessageInfo - -func (m *DAGTemplate) Reset() { *m = DAGTemplate{} } -func (*DAGTemplate) ProtoMessage() {} -func (*DAGTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{43} -} -func (m *DAGTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DAGTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DAGTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_DAGTemplate.Merge(m, src) -} -func (m *DAGTemplate) XXX_Size() int { - return m.Size() -} -func (m *DAGTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_DAGTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_DAGTemplate proto.InternalMessageInfo - -func (m *Data) Reset() { *m = Data{} } -func (*Data) ProtoMessage() {} -func (*Data) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{44} -} -func (m *Data) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Data) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Data) XXX_Merge(src proto.Message) { - xxx_messageInfo_Data.Merge(m, src) -} -func (m *Data) XXX_Size() int { - return m.Size() -} -func (m *Data) XXX_DiscardUnknown() { - xxx_messageInfo_Data.DiscardUnknown(m) -} - -var xxx_messageInfo_Data proto.InternalMessageInfo - -func (m *DataSource) Reset() { *m = DataSource{} } -func (*DataSource) ProtoMessage() {} -func (*DataSource) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{45} -} -func (m *DataSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DataSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_DataSource.Merge(m, src) -} -func (m *DataSource) XXX_Size() int { - return m.Size() -} -func (m *DataSource) XXX_DiscardUnknown() { - xxx_messageInfo_DataSource.DiscardUnknown(m) -} - -var xxx_messageInfo_DataSource proto.InternalMessageInfo - -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{46} -} -func (m *Event) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) -} -func (m *Event) XXX_Size() int { - return m.Size() -} -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) -} - -var xxx_messageInfo_Event proto.InternalMessageInfo - -func (m *ExecutorConfig) Reset() { *m = ExecutorConfig{} } -func (*ExecutorConfig) ProtoMessage() {} -func (*ExecutorConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{47} -} -func (m *ExecutorConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecutorConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExecutorConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecutorConfig.Merge(m, src) -} -func (m *ExecutorConfig) XXX_Size() int { - return m.Size() -} -func (m *ExecutorConfig) XXX_DiscardUnknown() { - xxx_messageInfo_ExecutorConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecutorConfig proto.InternalMessageInfo - -func (m *GCSArtifact) Reset() { *m = GCSArtifact{} } -func (*GCSArtifact) ProtoMessage() {} -func (*GCSArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{48} -} -func (m *GCSArtifact) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GCSArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GCSArtifact) XXX_Merge(src proto.Message) { - xxx_messageInfo_GCSArtifact.Merge(m, src) -} -func (m *GCSArtifact) XXX_Size() int { - return m.Size() -} -func (m *GCSArtifact) XXX_DiscardUnknown() { - xxx_messageInfo_GCSArtifact.DiscardUnknown(m) -} - -var xxx_messageInfo_GCSArtifact proto.InternalMessageInfo - -func (m *GCSArtifactRepository) Reset() { *m = GCSArtifactRepository{} } -func (*GCSArtifactRepository) ProtoMessage() {} -func (*GCSArtifactRepository) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{49} -} -func (m *GCSArtifactRepository) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GCSArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GCSArtifactRepository) XXX_Merge(src proto.Message) { - xxx_messageInfo_GCSArtifactRepository.Merge(m, src) -} -func (m *GCSArtifactRepository) XXX_Size() int { - return m.Size() -} -func (m *GCSArtifactRepository) XXX_DiscardUnknown() { - xxx_messageInfo_GCSArtifactRepository.DiscardUnknown(m) -} - -var xxx_messageInfo_GCSArtifactRepository proto.InternalMessageInfo - -func (m *GCSBucket) Reset() { *m = GCSBucket{} } -func (*GCSBucket) ProtoMessage() {} -func (*GCSBucket) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{50} -} -func (m *GCSBucket) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GCSBucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GCSBucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_GCSBucket.Merge(m, src) -} -func (m *GCSBucket) XXX_Size() int { - return m.Size() -} -func (m *GCSBucket) XXX_DiscardUnknown() { - xxx_messageInfo_GCSBucket.DiscardUnknown(m) -} - -var xxx_messageInfo_GCSBucket proto.InternalMessageInfo - -func (m *Gauge) Reset() { *m = Gauge{} } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{51} -} -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) -} -func (m *Gauge) XXX_Size() int { - return m.Size() -} -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) -} - -var xxx_messageInfo_Gauge proto.InternalMessageInfo - -func (m *GitArtifact) Reset() { *m = GitArtifact{} } -func (*GitArtifact) ProtoMessage() {} -func (*GitArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{52} -} -func (m *GitArtifact) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GitArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GitArtifact) XXX_Merge(src proto.Message) { - xxx_messageInfo_GitArtifact.Merge(m, src) -} -func (m *GitArtifact) XXX_Size() int { - return m.Size() -} -func (m *GitArtifact) XXX_DiscardUnknown() { - xxx_messageInfo_GitArtifact.DiscardUnknown(m) -} - -var xxx_messageInfo_GitArtifact proto.InternalMessageInfo - -func (m *HDFSArtifact) Reset() { *m = HDFSArtifact{} } -func (*HDFSArtifact) ProtoMessage() {} -func (*HDFSArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{53} -} -func (m *HDFSArtifact) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HDFSArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HDFSArtifact) XXX_Merge(src proto.Message) { - xxx_messageInfo_HDFSArtifact.Merge(m, src) -} -func (m *HDFSArtifact) XXX_Size() int { - return m.Size() -} -func (m *HDFSArtifact) XXX_DiscardUnknown() { - xxx_messageInfo_HDFSArtifact.DiscardUnknown(m) -} - -var xxx_messageInfo_HDFSArtifact proto.InternalMessageInfo - -func (m *HDFSArtifactRepository) Reset() { *m = HDFSArtifactRepository{} } -func (*HDFSArtifactRepository) ProtoMessage() {} -func (*HDFSArtifactRepository) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{54} -} -func (m *HDFSArtifactRepository) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HDFSArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HDFSArtifactRepository) XXX_Merge(src proto.Message) { - xxx_messageInfo_HDFSArtifactRepository.Merge(m, src) -} -func (m *HDFSArtifactRepository) XXX_Size() int { - return m.Size() -} -func (m *HDFSArtifactRepository) XXX_DiscardUnknown() { - xxx_messageInfo_HDFSArtifactRepository.DiscardUnknown(m) -} - -var xxx_messageInfo_HDFSArtifactRepository proto.InternalMessageInfo - -func (m *HDFSConfig) Reset() { *m = HDFSConfig{} } -func (*HDFSConfig) ProtoMessage() {} -func (*HDFSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{55} -} -func (m *HDFSConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HDFSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HDFSConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_HDFSConfig.Merge(m, src) -} -func (m *HDFSConfig) XXX_Size() int { - return m.Size() -} -func (m *HDFSConfig) XXX_DiscardUnknown() { - xxx_messageInfo_HDFSConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_HDFSConfig proto.InternalMessageInfo - -func (m *HDFSKrbConfig) Reset() { *m = HDFSKrbConfig{} } -func (*HDFSKrbConfig) ProtoMessage() {} -func (*HDFSKrbConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{56} -} -func (m *HDFSKrbConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HDFSKrbConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HDFSKrbConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_HDFSKrbConfig.Merge(m, src) -} -func (m *HDFSKrbConfig) XXX_Size() int { - return m.Size() -} -func (m *HDFSKrbConfig) XXX_DiscardUnknown() { - xxx_messageInfo_HDFSKrbConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_HDFSKrbConfig proto.InternalMessageInfo - -func (m *HTTP) Reset() { *m = HTTP{} } -func (*HTTP) ProtoMessage() {} -func (*HTTP) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{57} -} -func (m *HTTP) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HTTP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HTTP) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTTP.Merge(m, src) -} -func (m *HTTP) XXX_Size() int { - return m.Size() -} -func (m *HTTP) XXX_DiscardUnknown() { - xxx_messageInfo_HTTP.DiscardUnknown(m) -} - -var xxx_messageInfo_HTTP proto.InternalMessageInfo - -func (m *HTTPArtifact) Reset() { *m = HTTPArtifact{} } -func (*HTTPArtifact) ProtoMessage() {} -func (*HTTPArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{58} -} -func (m *HTTPArtifact) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HTTPArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HTTPArtifact) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTTPArtifact.Merge(m, src) -} -func (m *HTTPArtifact) XXX_Size() int { - return m.Size() -} -func (m *HTTPArtifact) XXX_DiscardUnknown() { - xxx_messageInfo_HTTPArtifact.DiscardUnknown(m) -} - -var xxx_messageInfo_HTTPArtifact proto.InternalMessageInfo - -func (m *HTTPAuth) Reset() { *m = HTTPAuth{} } -func (*HTTPAuth) ProtoMessage() {} -func (*HTTPAuth) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{59} -} -func (m *HTTPAuth) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HTTPAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HTTPAuth) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTTPAuth.Merge(m, src) -} -func (m *HTTPAuth) XXX_Size() int { - return m.Size() -} -func (m *HTTPAuth) XXX_DiscardUnknown() { - xxx_messageInfo_HTTPAuth.DiscardUnknown(m) -} - -var xxx_messageInfo_HTTPAuth proto.InternalMessageInfo - -func (m *HTTPBodySource) Reset() { *m = HTTPBodySource{} } -func (*HTTPBodySource) ProtoMessage() {} -func (*HTTPBodySource) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{60} -} -func (m *HTTPBodySource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HTTPBodySource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HTTPBodySource) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTTPBodySource.Merge(m, src) -} -func (m *HTTPBodySource) XXX_Size() int { - return m.Size() -} -func (m *HTTPBodySource) XXX_DiscardUnknown() { - xxx_messageInfo_HTTPBodySource.DiscardUnknown(m) -} - -var xxx_messageInfo_HTTPBodySource proto.InternalMessageInfo - -func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } -func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{61} -} -func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HTTPHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTTPHeader.Merge(m, src) -} -func (m *HTTPHeader) XXX_Size() int { - return m.Size() -} -func (m *HTTPHeader) XXX_DiscardUnknown() { - xxx_messageInfo_HTTPHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo - -func (m *HTTPHeaderSource) Reset() { *m = HTTPHeaderSource{} } -func (*HTTPHeaderSource) ProtoMessage() {} -func (*HTTPHeaderSource) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{62} -} -func (m *HTTPHeaderSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HTTPHeaderSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HTTPHeaderSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTTPHeaderSource.Merge(m, src) -} -func (m *HTTPHeaderSource) XXX_Size() int { - return m.Size() -} -func (m *HTTPHeaderSource) XXX_DiscardUnknown() { - xxx_messageInfo_HTTPHeaderSource.DiscardUnknown(m) -} - -var xxx_messageInfo_HTTPHeaderSource proto.InternalMessageInfo - -func (m *Header) Reset() { *m = Header{} } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{63} -} -func (m *Header) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_Header.Merge(m, src) -} -func (m *Header) XXX_Size() int { - return m.Size() -} -func (m *Header) XXX_DiscardUnknown() { - xxx_messageInfo_Header.DiscardUnknown(m) -} - -var xxx_messageInfo_Header proto.InternalMessageInfo - -func (m *Histogram) Reset() { *m = Histogram{} } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{64} -} -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return m.Size() -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) -} - -var xxx_messageInfo_Histogram proto.InternalMessageInfo - -func (m *Inputs) Reset() { *m = Inputs{} } -func (*Inputs) ProtoMessage() {} -func (*Inputs) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{65} -} -func (m *Inputs) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Inputs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Inputs) XXX_Merge(src proto.Message) { - xxx_messageInfo_Inputs.Merge(m, src) -} -func (m *Inputs) XXX_Size() int { - return m.Size() -} -func (m *Inputs) XXX_DiscardUnknown() { - xxx_messageInfo_Inputs.DiscardUnknown(m) -} - -var xxx_messageInfo_Inputs proto.InternalMessageInfo - -func (m *Item) Reset() { *m = Item{} } -func (*Item) ProtoMessage() {} -func (*Item) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{66} -} -func (m *Item) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Item) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Item) XXX_Merge(src proto.Message) { - xxx_messageInfo_Item.Merge(m, src) -} -func (m *Item) XXX_Size() int { - return m.Size() -} -func (m *Item) XXX_DiscardUnknown() { - xxx_messageInfo_Item.DiscardUnknown(m) -} - -var xxx_messageInfo_Item proto.InternalMessageInfo - -func (m *LabelKeys) Reset() { *m = LabelKeys{} } -func (*LabelKeys) ProtoMessage() {} -func (*LabelKeys) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{67} -} -func (m *LabelKeys) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelKeys) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LabelKeys) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelKeys.Merge(m, src) -} -func (m *LabelKeys) XXX_Size() int { - return m.Size() -} -func (m *LabelKeys) XXX_DiscardUnknown() { - xxx_messageInfo_LabelKeys.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelKeys proto.InternalMessageInfo - -func (m *LabelValueFrom) Reset() { *m = LabelValueFrom{} } -func (*LabelValueFrom) ProtoMessage() {} -func (*LabelValueFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{68} -} -func (m *LabelValueFrom) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelValueFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LabelValueFrom) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValueFrom.Merge(m, src) -} -func (m *LabelValueFrom) XXX_Size() int { - return m.Size() -} -func (m *LabelValueFrom) XXX_DiscardUnknown() { - xxx_messageInfo_LabelValueFrom.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelValueFrom proto.InternalMessageInfo - -func (m *LabelValues) Reset() { *m = LabelValues{} } -func (*LabelValues) ProtoMessage() {} -func (*LabelValues) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{69} -} -func (m *LabelValues) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelValues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LabelValues) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValues.Merge(m, src) -} -func (m *LabelValues) XXX_Size() int { - return m.Size() -} -func (m *LabelValues) XXX_DiscardUnknown() { - xxx_messageInfo_LabelValues.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelValues proto.InternalMessageInfo - -func (m *LifecycleHook) Reset() { *m = LifecycleHook{} } -func (*LifecycleHook) ProtoMessage() {} -func (*LifecycleHook) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{70} -} -func (m *LifecycleHook) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LifecycleHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LifecycleHook) XXX_Merge(src proto.Message) { - xxx_messageInfo_LifecycleHook.Merge(m, src) -} -func (m *LifecycleHook) XXX_Size() int { - return m.Size() -} -func (m *LifecycleHook) XXX_DiscardUnknown() { - xxx_messageInfo_LifecycleHook.DiscardUnknown(m) -} - -var xxx_messageInfo_LifecycleHook proto.InternalMessageInfo - -func (m *Link) Reset() { *m = Link{} } -func (*Link) ProtoMessage() {} -func (*Link) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{71} -} -func (m *Link) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Link) XXX_Merge(src proto.Message) { - xxx_messageInfo_Link.Merge(m, src) -} -func (m *Link) XXX_Size() int { - return m.Size() -} -func (m *Link) XXX_DiscardUnknown() { - xxx_messageInfo_Link.DiscardUnknown(m) -} - -var xxx_messageInfo_Link proto.InternalMessageInfo - -func (m *ManifestFrom) Reset() { *m = ManifestFrom{} } -func (*ManifestFrom) ProtoMessage() {} -func (*ManifestFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{72} -} -func (m *ManifestFrom) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ManifestFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ManifestFrom) XXX_Merge(src proto.Message) { - xxx_messageInfo_ManifestFrom.Merge(m, src) -} -func (m *ManifestFrom) XXX_Size() int { - return m.Size() -} -func (m *ManifestFrom) XXX_DiscardUnknown() { - xxx_messageInfo_ManifestFrom.DiscardUnknown(m) -} - -var xxx_messageInfo_ManifestFrom proto.InternalMessageInfo - -func (m *MemoizationStatus) Reset() { *m = MemoizationStatus{} } -func (*MemoizationStatus) ProtoMessage() {} -func (*MemoizationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{73} -} -func (m *MemoizationStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemoizationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MemoizationStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemoizationStatus.Merge(m, src) -} -func (m *MemoizationStatus) XXX_Size() int { - return m.Size() -} -func (m *MemoizationStatus) XXX_DiscardUnknown() { - xxx_messageInfo_MemoizationStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_MemoizationStatus proto.InternalMessageInfo - -func (m *Memoize) Reset() { *m = Memoize{} } -func (*Memoize) ProtoMessage() {} -func (*Memoize) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{74} -} -func (m *Memoize) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Memoize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Memoize) XXX_Merge(src proto.Message) { - xxx_messageInfo_Memoize.Merge(m, src) -} -func (m *Memoize) XXX_Size() int { - return m.Size() -} -func (m *Memoize) XXX_DiscardUnknown() { - xxx_messageInfo_Memoize.DiscardUnknown(m) -} - -var xxx_messageInfo_Memoize proto.InternalMessageInfo - -func (m *Metadata) Reset() { *m = Metadata{} } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{75} -} -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) -} -func (m *Metadata) XXX_Size() int { - return m.Size() -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) -} - -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func (m *MetricLabel) Reset() { *m = MetricLabel{} } -func (*MetricLabel) ProtoMessage() {} -func (*MetricLabel) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{76} -} -func (m *MetricLabel) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricLabel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MetricLabel) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricLabel.Merge(m, src) -} -func (m *MetricLabel) XXX_Size() int { - return m.Size() -} -func (m *MetricLabel) XXX_DiscardUnknown() { - xxx_messageInfo_MetricLabel.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricLabel proto.InternalMessageInfo - -func (m *Metrics) Reset() { *m = Metrics{} } -func (*Metrics) ProtoMessage() {} -func (*Metrics) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{77} -} -func (m *Metrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Metrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metrics.Merge(m, src) -} -func (m *Metrics) XXX_Size() int { - return m.Size() -} -func (m *Metrics) XXX_DiscardUnknown() { - xxx_messageInfo_Metrics.DiscardUnknown(m) -} - -var xxx_messageInfo_Metrics proto.InternalMessageInfo - -func (m *Mutex) Reset() { *m = Mutex{} } -func (*Mutex) ProtoMessage() {} -func (*Mutex) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{78} -} -func (m *Mutex) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Mutex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Mutex) XXX_Merge(src proto.Message) { - xxx_messageInfo_Mutex.Merge(m, src) -} -func (m *Mutex) XXX_Size() int { - return m.Size() -} -func (m *Mutex) XXX_DiscardUnknown() { - xxx_messageInfo_Mutex.DiscardUnknown(m) -} - -var xxx_messageInfo_Mutex proto.InternalMessageInfo - -func (m *MutexHolding) Reset() { *m = MutexHolding{} } -func (*MutexHolding) ProtoMessage() {} -func (*MutexHolding) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{79} -} -func (m *MutexHolding) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MutexHolding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MutexHolding) XXX_Merge(src proto.Message) { - xxx_messageInfo_MutexHolding.Merge(m, src) -} -func (m *MutexHolding) XXX_Size() int { - return m.Size() -} -func (m *MutexHolding) XXX_DiscardUnknown() { - xxx_messageInfo_MutexHolding.DiscardUnknown(m) -} - -var xxx_messageInfo_MutexHolding proto.InternalMessageInfo - -func (m *MutexStatus) Reset() { *m = MutexStatus{} } -func (*MutexStatus) ProtoMessage() {} -func (*MutexStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{80} -} -func (m *MutexStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MutexStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MutexStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_MutexStatus.Merge(m, src) -} -func (m *MutexStatus) XXX_Size() int { - return m.Size() -} -func (m *MutexStatus) XXX_DiscardUnknown() { - xxx_messageInfo_MutexStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_MutexStatus proto.InternalMessageInfo - -func (m *NodeFlag) Reset() { *m = NodeFlag{} } -func (*NodeFlag) ProtoMessage() {} -func (*NodeFlag) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{81} -} -func (m *NodeFlag) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeFlag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeFlag) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeFlag.Merge(m, src) -} -func (m *NodeFlag) XXX_Size() int { - return m.Size() -} -func (m *NodeFlag) XXX_DiscardUnknown() { - xxx_messageInfo_NodeFlag.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeFlag proto.InternalMessageInfo - -func (m *NodeResult) Reset() { *m = NodeResult{} } -func (*NodeResult) ProtoMessage() {} -func (*NodeResult) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{82} -} -func (m *NodeResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeResult.Merge(m, src) -} -func (m *NodeResult) XXX_Size() int { - return m.Size() -} -func (m *NodeResult) XXX_DiscardUnknown() { - xxx_messageInfo_NodeResult.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeResult proto.InternalMessageInfo - -func (m *NodeStatus) Reset() { *m = NodeStatus{} } -func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{83} -} -func (m *NodeStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeStatus.Merge(m, src) -} -func (m *NodeStatus) XXX_Size() int { - return m.Size() -} -func (m *NodeStatus) XXX_DiscardUnknown() { - xxx_messageInfo_NodeStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeStatus proto.InternalMessageInfo - -func (m *NodeSynchronizationStatus) Reset() { *m = NodeSynchronizationStatus{} } -func (*NodeSynchronizationStatus) ProtoMessage() {} -func (*NodeSynchronizationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{84} -} -func (m *NodeSynchronizationStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeSynchronizationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeSynchronizationStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeSynchronizationStatus.Merge(m, src) -} -func (m *NodeSynchronizationStatus) XXX_Size() int { - return m.Size() -} -func (m *NodeSynchronizationStatus) XXX_DiscardUnknown() { - xxx_messageInfo_NodeSynchronizationStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeSynchronizationStatus proto.InternalMessageInfo - -func (m *NoneStrategy) Reset() { *m = NoneStrategy{} } -func (*NoneStrategy) ProtoMessage() {} -func (*NoneStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{85} -} -func (m *NoneStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NoneStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NoneStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_NoneStrategy.Merge(m, src) -} -func (m *NoneStrategy) XXX_Size() int { - return m.Size() -} -func (m *NoneStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_NoneStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_NoneStrategy proto.InternalMessageInfo - -func (m *OAuth2Auth) Reset() { *m = OAuth2Auth{} } -func (*OAuth2Auth) ProtoMessage() {} -func (*OAuth2Auth) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{86} -} -func (m *OAuth2Auth) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OAuth2Auth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *OAuth2Auth) XXX_Merge(src proto.Message) { - xxx_messageInfo_OAuth2Auth.Merge(m, src) -} -func (m *OAuth2Auth) XXX_Size() int { - return m.Size() -} -func (m *OAuth2Auth) XXX_DiscardUnknown() { - xxx_messageInfo_OAuth2Auth.DiscardUnknown(m) -} - -var xxx_messageInfo_OAuth2Auth proto.InternalMessageInfo - -func (m *OAuth2EndpointParam) Reset() { *m = OAuth2EndpointParam{} } -func (*OAuth2EndpointParam) ProtoMessage() {} -func (*OAuth2EndpointParam) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{87} -} -func (m *OAuth2EndpointParam) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OAuth2EndpointParam) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *OAuth2EndpointParam) XXX_Merge(src proto.Message) { - xxx_messageInfo_OAuth2EndpointParam.Merge(m, src) -} -func (m *OAuth2EndpointParam) XXX_Size() int { - return m.Size() -} -func (m *OAuth2EndpointParam) XXX_DiscardUnknown() { - xxx_messageInfo_OAuth2EndpointParam.DiscardUnknown(m) -} - -var xxx_messageInfo_OAuth2EndpointParam proto.InternalMessageInfo - -func (m *OSSArtifact) Reset() { *m = OSSArtifact{} } -func (*OSSArtifact) ProtoMessage() {} -func (*OSSArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{88} -} -func (m *OSSArtifact) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OSSArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *OSSArtifact) XXX_Merge(src proto.Message) { - xxx_messageInfo_OSSArtifact.Merge(m, src) -} -func (m *OSSArtifact) XXX_Size() int { - return m.Size() -} -func (m *OSSArtifact) XXX_DiscardUnknown() { - xxx_messageInfo_OSSArtifact.DiscardUnknown(m) -} - -var xxx_messageInfo_OSSArtifact proto.InternalMessageInfo - -func (m *OSSArtifactRepository) Reset() { *m = OSSArtifactRepository{} } -func (*OSSArtifactRepository) ProtoMessage() {} -func (*OSSArtifactRepository) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{89} -} -func (m *OSSArtifactRepository) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OSSArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *OSSArtifactRepository) XXX_Merge(src proto.Message) { - xxx_messageInfo_OSSArtifactRepository.Merge(m, src) -} -func (m *OSSArtifactRepository) XXX_Size() int { - return m.Size() -} -func (m *OSSArtifactRepository) XXX_DiscardUnknown() { - xxx_messageInfo_OSSArtifactRepository.DiscardUnknown(m) -} - -var xxx_messageInfo_OSSArtifactRepository proto.InternalMessageInfo - -func (m *OSSBucket) Reset() { *m = OSSBucket{} } -func (*OSSBucket) ProtoMessage() {} -func (*OSSBucket) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{90} -} -func (m *OSSBucket) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OSSBucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *OSSBucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_OSSBucket.Merge(m, src) -} -func (m *OSSBucket) XXX_Size() int { - return m.Size() -} -func (m *OSSBucket) XXX_DiscardUnknown() { - xxx_messageInfo_OSSBucket.DiscardUnknown(m) -} - -var xxx_messageInfo_OSSBucket proto.InternalMessageInfo - -func (m *OSSLifecycleRule) Reset() { *m = OSSLifecycleRule{} } -func (*OSSLifecycleRule) ProtoMessage() {} -func (*OSSLifecycleRule) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{91} -} -func (m *OSSLifecycleRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OSSLifecycleRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *OSSLifecycleRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_OSSLifecycleRule.Merge(m, src) -} -func (m *OSSLifecycleRule) XXX_Size() int { - return m.Size() -} -func (m *OSSLifecycleRule) XXX_DiscardUnknown() { - xxx_messageInfo_OSSLifecycleRule.DiscardUnknown(m) -} - -var xxx_messageInfo_OSSLifecycleRule proto.InternalMessageInfo - -func (m *Object) Reset() { *m = Object{} } -func (*Object) ProtoMessage() {} -func (*Object) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{92} -} -func (m *Object) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Object) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Object) XXX_Merge(src proto.Message) { - xxx_messageInfo_Object.Merge(m, src) -} -func (m *Object) XXX_Size() int { - return m.Size() -} -func (m *Object) XXX_DiscardUnknown() { - xxx_messageInfo_Object.DiscardUnknown(m) -} - -var xxx_messageInfo_Object proto.InternalMessageInfo - -func (m *Outputs) Reset() { *m = Outputs{} } -func (*Outputs) ProtoMessage() {} -func (*Outputs) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{93} -} -func (m *Outputs) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Outputs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Outputs) XXX_Merge(src proto.Message) { - xxx_messageInfo_Outputs.Merge(m, src) -} -func (m *Outputs) XXX_Size() int { - return m.Size() -} -func (m *Outputs) XXX_DiscardUnknown() { - xxx_messageInfo_Outputs.DiscardUnknown(m) -} - -var xxx_messageInfo_Outputs proto.InternalMessageInfo - -func (m *ParallelSteps) Reset() { *m = ParallelSteps{} } -func (*ParallelSteps) ProtoMessage() {} -func (*ParallelSteps) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{94} -} -func (m *ParallelSteps) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ParallelSteps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ParallelSteps) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParallelSteps.Merge(m, src) -} -func (m *ParallelSteps) XXX_Size() int { - return m.Size() -} -func (m *ParallelSteps) XXX_DiscardUnknown() { - xxx_messageInfo_ParallelSteps.DiscardUnknown(m) -} - -var xxx_messageInfo_ParallelSteps proto.InternalMessageInfo - -func (m *Parameter) Reset() { *m = Parameter{} } -func (*Parameter) ProtoMessage() {} -func (*Parameter) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{95} -} -func (m *Parameter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Parameter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Parameter.Merge(m, src) -} -func (m *Parameter) XXX_Size() int { - return m.Size() -} -func (m *Parameter) XXX_DiscardUnknown() { - xxx_messageInfo_Parameter.DiscardUnknown(m) -} - -var xxx_messageInfo_Parameter proto.InternalMessageInfo - -func (m *Plugin) Reset() { *m = Plugin{} } -func (*Plugin) ProtoMessage() {} -func (*Plugin) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{96} -} -func (m *Plugin) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Plugin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Plugin) XXX_Merge(src proto.Message) { - xxx_messageInfo_Plugin.Merge(m, src) -} -func (m *Plugin) XXX_Size() int { - return m.Size() -} -func (m *Plugin) XXX_DiscardUnknown() { - xxx_messageInfo_Plugin.DiscardUnknown(m) -} - -var xxx_messageInfo_Plugin proto.InternalMessageInfo - -func (m *PodGC) Reset() { *m = PodGC{} } -func (*PodGC) ProtoMessage() {} -func (*PodGC) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{97} -} -func (m *PodGC) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodGC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodGC) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodGC.Merge(m, src) -} -func (m *PodGC) XXX_Size() int { - return m.Size() -} -func (m *PodGC) XXX_DiscardUnknown() { - xxx_messageInfo_PodGC.DiscardUnknown(m) -} - -var xxx_messageInfo_PodGC proto.InternalMessageInfo - -func (m *Prometheus) Reset() { *m = Prometheus{} } -func (*Prometheus) ProtoMessage() {} -func (*Prometheus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{98} -} -func (m *Prometheus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Prometheus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Prometheus) XXX_Merge(src proto.Message) { - xxx_messageInfo_Prometheus.Merge(m, src) -} -func (m *Prometheus) XXX_Size() int { - return m.Size() -} -func (m *Prometheus) XXX_DiscardUnknown() { - xxx_messageInfo_Prometheus.DiscardUnknown(m) -} - -var xxx_messageInfo_Prometheus proto.InternalMessageInfo - -func (m *RawArtifact) Reset() { *m = RawArtifact{} } -func (*RawArtifact) ProtoMessage() {} -func (*RawArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{99} -} -func (m *RawArtifact) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RawArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RawArtifact) XXX_Merge(src proto.Message) { - xxx_messageInfo_RawArtifact.Merge(m, src) -} -func (m *RawArtifact) XXX_Size() int { - return m.Size() -} -func (m *RawArtifact) XXX_DiscardUnknown() { - xxx_messageInfo_RawArtifact.DiscardUnknown(m) -} - -var xxx_messageInfo_RawArtifact proto.InternalMessageInfo - -func (m *ResourceTemplate) Reset() { *m = ResourceTemplate{} } -func (*ResourceTemplate) ProtoMessage() {} -func (*ResourceTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{100} -} -func (m *ResourceTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceTemplate.Merge(m, src) -} -func (m *ResourceTemplate) XXX_Size() int { - return m.Size() -} -func (m *ResourceTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceTemplate proto.InternalMessageInfo - -func (m *RetryAffinity) Reset() { *m = RetryAffinity{} } -func (*RetryAffinity) ProtoMessage() {} -func (*RetryAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{101} -} -func (m *RetryAffinity) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RetryAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RetryAffinity) XXX_Merge(src proto.Message) { - xxx_messageInfo_RetryAffinity.Merge(m, src) -} -func (m *RetryAffinity) XXX_Size() int { - return m.Size() -} -func (m *RetryAffinity) XXX_DiscardUnknown() { - xxx_messageInfo_RetryAffinity.DiscardUnknown(m) -} - -var xxx_messageInfo_RetryAffinity proto.InternalMessageInfo - -func (m *RetryNodeAntiAffinity) Reset() { *m = RetryNodeAntiAffinity{} } -func (*RetryNodeAntiAffinity) ProtoMessage() {} -func (*RetryNodeAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{102} -} -func (m *RetryNodeAntiAffinity) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RetryNodeAntiAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RetryNodeAntiAffinity) XXX_Merge(src proto.Message) { - xxx_messageInfo_RetryNodeAntiAffinity.Merge(m, src) -} -func (m *RetryNodeAntiAffinity) XXX_Size() int { - return m.Size() -} -func (m *RetryNodeAntiAffinity) XXX_DiscardUnknown() { - xxx_messageInfo_RetryNodeAntiAffinity.DiscardUnknown(m) -} - -var xxx_messageInfo_RetryNodeAntiAffinity proto.InternalMessageInfo - -func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } -func (*RetryStrategy) ProtoMessage() {} -func (*RetryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{103} -} -func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RetryStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RetryStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_RetryStrategy.Merge(m, src) -} -func (m *RetryStrategy) XXX_Size() int { - return m.Size() -} -func (m *RetryStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_RetryStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo - -func (m *S3Artifact) Reset() { *m = S3Artifact{} } -func (*S3Artifact) ProtoMessage() {} -func (*S3Artifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{104} -} -func (m *S3Artifact) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *S3Artifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *S3Artifact) XXX_Merge(src proto.Message) { - xxx_messageInfo_S3Artifact.Merge(m, src) -} -func (m *S3Artifact) XXX_Size() int { - return m.Size() -} -func (m *S3Artifact) XXX_DiscardUnknown() { - xxx_messageInfo_S3Artifact.DiscardUnknown(m) -} - -var xxx_messageInfo_S3Artifact proto.InternalMessageInfo - -func (m *S3ArtifactRepository) Reset() { *m = S3ArtifactRepository{} } -func (*S3ArtifactRepository) ProtoMessage() {} -func (*S3ArtifactRepository) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{105} -} -func (m *S3ArtifactRepository) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *S3ArtifactRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *S3ArtifactRepository) XXX_Merge(src proto.Message) { - xxx_messageInfo_S3ArtifactRepository.Merge(m, src) -} -func (m *S3ArtifactRepository) XXX_Size() int { - return m.Size() -} -func (m *S3ArtifactRepository) XXX_DiscardUnknown() { - xxx_messageInfo_S3ArtifactRepository.DiscardUnknown(m) -} - -var xxx_messageInfo_S3ArtifactRepository proto.InternalMessageInfo - -func (m *S3Bucket) Reset() { *m = S3Bucket{} } -func (*S3Bucket) ProtoMessage() {} -func (*S3Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{106} -} -func (m *S3Bucket) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *S3Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *S3Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_S3Bucket.Merge(m, src) -} -func (m *S3Bucket) XXX_Size() int { - return m.Size() -} -func (m *S3Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_S3Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_S3Bucket proto.InternalMessageInfo - -func (m *S3EncryptionOptions) Reset() { *m = S3EncryptionOptions{} } -func (*S3EncryptionOptions) ProtoMessage() {} -func (*S3EncryptionOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{107} -} -func (m *S3EncryptionOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *S3EncryptionOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *S3EncryptionOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_S3EncryptionOptions.Merge(m, src) -} -func (m *S3EncryptionOptions) XXX_Size() int { - return m.Size() -} -func (m *S3EncryptionOptions) XXX_DiscardUnknown() { - xxx_messageInfo_S3EncryptionOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_S3EncryptionOptions proto.InternalMessageInfo - -func (m *ScriptTemplate) Reset() { *m = ScriptTemplate{} } -func (*ScriptTemplate) ProtoMessage() {} -func (*ScriptTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{108} -} -func (m *ScriptTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScriptTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScriptTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScriptTemplate.Merge(m, src) -} -func (m *ScriptTemplate) XXX_Size() int { - return m.Size() -} -func (m *ScriptTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_ScriptTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_ScriptTemplate proto.InternalMessageInfo - -func (m *SemaphoreHolding) Reset() { *m = SemaphoreHolding{} } -func (*SemaphoreHolding) ProtoMessage() {} -func (*SemaphoreHolding) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{109} -} -func (m *SemaphoreHolding) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SemaphoreHolding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SemaphoreHolding) XXX_Merge(src proto.Message) { - xxx_messageInfo_SemaphoreHolding.Merge(m, src) -} -func (m *SemaphoreHolding) XXX_Size() int { - return m.Size() -} -func (m *SemaphoreHolding) XXX_DiscardUnknown() { - xxx_messageInfo_SemaphoreHolding.DiscardUnknown(m) -} - -var xxx_messageInfo_SemaphoreHolding proto.InternalMessageInfo - -func (m *SemaphoreRef) Reset() { *m = SemaphoreRef{} } -func (*SemaphoreRef) ProtoMessage() {} -func (*SemaphoreRef) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{110} -} -func (m *SemaphoreRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SemaphoreRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SemaphoreRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_SemaphoreRef.Merge(m, src) -} -func (m *SemaphoreRef) XXX_Size() int { - return m.Size() -} -func (m *SemaphoreRef) XXX_DiscardUnknown() { - xxx_messageInfo_SemaphoreRef.DiscardUnknown(m) -} - -var xxx_messageInfo_SemaphoreRef proto.InternalMessageInfo - -func (m *SemaphoreStatus) Reset() { *m = SemaphoreStatus{} } -func (*SemaphoreStatus) ProtoMessage() {} -func (*SemaphoreStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{111} -} -func (m *SemaphoreStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SemaphoreStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SemaphoreStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_SemaphoreStatus.Merge(m, src) -} -func (m *SemaphoreStatus) XXX_Size() int { - return m.Size() -} -func (m *SemaphoreStatus) XXX_DiscardUnknown() { - xxx_messageInfo_SemaphoreStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_SemaphoreStatus proto.InternalMessageInfo - -func (m *Sequence) Reset() { *m = Sequence{} } -func (*Sequence) ProtoMessage() {} -func (*Sequence) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{112} -} -func (m *Sequence) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Sequence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Sequence) XXX_Merge(src proto.Message) { - xxx_messageInfo_Sequence.Merge(m, src) -} -func (m *Sequence) XXX_Size() int { - return m.Size() -} -func (m *Sequence) XXX_DiscardUnknown() { - xxx_messageInfo_Sequence.DiscardUnknown(m) -} - -var xxx_messageInfo_Sequence proto.InternalMessageInfo - -func (m *Submit) Reset() { *m = Submit{} } -func (*Submit) ProtoMessage() {} -func (*Submit) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{113} -} -func (m *Submit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Submit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Submit) XXX_Merge(src proto.Message) { - xxx_messageInfo_Submit.Merge(m, src) -} -func (m *Submit) XXX_Size() int { - return m.Size() -} -func (m *Submit) XXX_DiscardUnknown() { - xxx_messageInfo_Submit.DiscardUnknown(m) -} - -var xxx_messageInfo_Submit proto.InternalMessageInfo - -func (m *SubmitOpts) Reset() { *m = SubmitOpts{} } -func (*SubmitOpts) ProtoMessage() {} -func (*SubmitOpts) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{114} -} -func (m *SubmitOpts) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SubmitOpts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SubmitOpts) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubmitOpts.Merge(m, src) -} -func (m *SubmitOpts) XXX_Size() int { - return m.Size() -} -func (m *SubmitOpts) XXX_DiscardUnknown() { - xxx_messageInfo_SubmitOpts.DiscardUnknown(m) -} - -var xxx_messageInfo_SubmitOpts proto.InternalMessageInfo - -func (m *SuppliedValueFrom) Reset() { *m = SuppliedValueFrom{} } -func (*SuppliedValueFrom) ProtoMessage() {} -func (*SuppliedValueFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{115} -} -func (m *SuppliedValueFrom) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SuppliedValueFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SuppliedValueFrom) XXX_Merge(src proto.Message) { - xxx_messageInfo_SuppliedValueFrom.Merge(m, src) -} -func (m *SuppliedValueFrom) XXX_Size() int { - return m.Size() -} -func (m *SuppliedValueFrom) XXX_DiscardUnknown() { - xxx_messageInfo_SuppliedValueFrom.DiscardUnknown(m) -} - -var xxx_messageInfo_SuppliedValueFrom proto.InternalMessageInfo - -func (m *SuspendTemplate) Reset() { *m = SuspendTemplate{} } -func (*SuspendTemplate) ProtoMessage() {} -func (*SuspendTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{116} -} -func (m *SuspendTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SuspendTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SuspendTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_SuspendTemplate.Merge(m, src) -} -func (m *SuspendTemplate) XXX_Size() int { - return m.Size() -} -func (m *SuspendTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_SuspendTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_SuspendTemplate proto.InternalMessageInfo - -func (m *Synchronization) Reset() { *m = Synchronization{} } -func (*Synchronization) ProtoMessage() {} -func (*Synchronization) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{117} -} -func (m *Synchronization) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Synchronization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Synchronization) XXX_Merge(src proto.Message) { - xxx_messageInfo_Synchronization.Merge(m, src) -} -func (m *Synchronization) XXX_Size() int { - return m.Size() -} -func (m *Synchronization) XXX_DiscardUnknown() { - xxx_messageInfo_Synchronization.DiscardUnknown(m) -} - -var xxx_messageInfo_Synchronization proto.InternalMessageInfo - -func (m *SynchronizationStatus) Reset() { *m = SynchronizationStatus{} } -func (*SynchronizationStatus) ProtoMessage() {} -func (*SynchronizationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{118} -} -func (m *SynchronizationStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SynchronizationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SynchronizationStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_SynchronizationStatus.Merge(m, src) -} -func (m *SynchronizationStatus) XXX_Size() int { - return m.Size() -} -func (m *SynchronizationStatus) XXX_DiscardUnknown() { - xxx_messageInfo_SynchronizationStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_SynchronizationStatus proto.InternalMessageInfo - -func (m *TTLStrategy) Reset() { *m = TTLStrategy{} } -func (*TTLStrategy) ProtoMessage() {} -func (*TTLStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{119} -} -func (m *TTLStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TTLStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TTLStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_TTLStrategy.Merge(m, src) -} -func (m *TTLStrategy) XXX_Size() int { - return m.Size() -} -func (m *TTLStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_TTLStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_TTLStrategy proto.InternalMessageInfo - -func (m *TarStrategy) Reset() { *m = TarStrategy{} } -func (*TarStrategy) ProtoMessage() {} -func (*TarStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{120} -} -func (m *TarStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TarStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TarStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_TarStrategy.Merge(m, src) -} -func (m *TarStrategy) XXX_Size() int { - return m.Size() -} -func (m *TarStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_TarStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_TarStrategy proto.InternalMessageInfo - -func (m *Template) Reset() { *m = Template{} } -func (*Template) ProtoMessage() {} -func (*Template) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{121} -} -func (m *Template) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Template) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Template) XXX_Merge(src proto.Message) { - xxx_messageInfo_Template.Merge(m, src) -} -func (m *Template) XXX_Size() int { - return m.Size() -} -func (m *Template) XXX_DiscardUnknown() { - xxx_messageInfo_Template.DiscardUnknown(m) -} - -var xxx_messageInfo_Template proto.InternalMessageInfo - -func (m *TemplateRef) Reset() { *m = TemplateRef{} } -func (*TemplateRef) ProtoMessage() {} -func (*TemplateRef) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{122} -} -func (m *TemplateRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TemplateRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TemplateRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_TemplateRef.Merge(m, src) -} -func (m *TemplateRef) XXX_Size() int { - return m.Size() -} -func (m *TemplateRef) XXX_DiscardUnknown() { - xxx_messageInfo_TemplateRef.DiscardUnknown(m) -} - -var xxx_messageInfo_TemplateRef proto.InternalMessageInfo - -func (m *TransformationStep) Reset() { *m = TransformationStep{} } -func (*TransformationStep) ProtoMessage() {} -func (*TransformationStep) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{123} -} -func (m *TransformationStep) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TransformationStep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TransformationStep) XXX_Merge(src proto.Message) { - xxx_messageInfo_TransformationStep.Merge(m, src) -} -func (m *TransformationStep) XXX_Size() int { - return m.Size() -} -func (m *TransformationStep) XXX_DiscardUnknown() { - xxx_messageInfo_TransformationStep.DiscardUnknown(m) -} - -var xxx_messageInfo_TransformationStep proto.InternalMessageInfo - -func (m *UserContainer) Reset() { *m = UserContainer{} } -func (*UserContainer) ProtoMessage() {} -func (*UserContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{124} -} -func (m *UserContainer) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *UserContainer) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserContainer.Merge(m, src) -} -func (m *UserContainer) XXX_Size() int { - return m.Size() -} -func (m *UserContainer) XXX_DiscardUnknown() { - xxx_messageInfo_UserContainer.DiscardUnknown(m) -} - -var xxx_messageInfo_UserContainer proto.InternalMessageInfo - -func (m *ValueFrom) Reset() { *m = ValueFrom{} } -func (*ValueFrom) ProtoMessage() {} -func (*ValueFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{125} -} -func (m *ValueFrom) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ValueFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ValueFrom) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValueFrom.Merge(m, src) -} -func (m *ValueFrom) XXX_Size() int { - return m.Size() -} -func (m *ValueFrom) XXX_DiscardUnknown() { - xxx_messageInfo_ValueFrom.DiscardUnknown(m) -} - -var xxx_messageInfo_ValueFrom proto.InternalMessageInfo - -func (m *Version) Reset() { *m = Version{} } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{126} -} -func (m *Version) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(m, src) -} -func (m *Version) XXX_Size() int { - return m.Size() -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func (m *VolumeClaimGC) Reset() { *m = VolumeClaimGC{} } -func (*VolumeClaimGC) ProtoMessage() {} -func (*VolumeClaimGC) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{127} -} -func (m *VolumeClaimGC) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeClaimGC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeClaimGC) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeClaimGC.Merge(m, src) -} -func (m *VolumeClaimGC) XXX_Size() int { - return m.Size() -} -func (m *VolumeClaimGC) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeClaimGC.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeClaimGC proto.InternalMessageInfo - -func (m *Workflow) Reset() { *m = Workflow{} } -func (*Workflow) ProtoMessage() {} -func (*Workflow) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{128} -} -func (m *Workflow) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Workflow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Workflow) XXX_Merge(src proto.Message) { - xxx_messageInfo_Workflow.Merge(m, src) -} -func (m *Workflow) XXX_Size() int { - return m.Size() -} -func (m *Workflow) XXX_DiscardUnknown() { - xxx_messageInfo_Workflow.DiscardUnknown(m) -} - -var xxx_messageInfo_Workflow proto.InternalMessageInfo - -func (m *WorkflowArtifactGCTask) Reset() { *m = WorkflowArtifactGCTask{} } -func (*WorkflowArtifactGCTask) ProtoMessage() {} -func (*WorkflowArtifactGCTask) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{129} -} -func (m *WorkflowArtifactGCTask) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowArtifactGCTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowArtifactGCTask) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowArtifactGCTask.Merge(m, src) -} -func (m *WorkflowArtifactGCTask) XXX_Size() int { - return m.Size() -} -func (m *WorkflowArtifactGCTask) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowArtifactGCTask.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowArtifactGCTask proto.InternalMessageInfo - -func (m *WorkflowArtifactGCTaskList) Reset() { *m = WorkflowArtifactGCTaskList{} } -func (*WorkflowArtifactGCTaskList) ProtoMessage() {} -func (*WorkflowArtifactGCTaskList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{130} -} -func (m *WorkflowArtifactGCTaskList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowArtifactGCTaskList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowArtifactGCTaskList) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowArtifactGCTaskList.Merge(m, src) -} -func (m *WorkflowArtifactGCTaskList) XXX_Size() int { - return m.Size() -} -func (m *WorkflowArtifactGCTaskList) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowArtifactGCTaskList.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowArtifactGCTaskList proto.InternalMessageInfo - -func (m *WorkflowEventBinding) Reset() { *m = WorkflowEventBinding{} } -func (*WorkflowEventBinding) ProtoMessage() {} -func (*WorkflowEventBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{131} -} -func (m *WorkflowEventBinding) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowEventBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowEventBinding) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowEventBinding.Merge(m, src) -} -func (m *WorkflowEventBinding) XXX_Size() int { - return m.Size() -} -func (m *WorkflowEventBinding) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowEventBinding.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowEventBinding proto.InternalMessageInfo - -func (m *WorkflowEventBindingList) Reset() { *m = WorkflowEventBindingList{} } -func (*WorkflowEventBindingList) ProtoMessage() {} -func (*WorkflowEventBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{132} -} -func (m *WorkflowEventBindingList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowEventBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowEventBindingList) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowEventBindingList.Merge(m, src) -} -func (m *WorkflowEventBindingList) XXX_Size() int { - return m.Size() -} -func (m *WorkflowEventBindingList) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowEventBindingList.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowEventBindingList proto.InternalMessageInfo - -func (m *WorkflowEventBindingSpec) Reset() { *m = WorkflowEventBindingSpec{} } -func (*WorkflowEventBindingSpec) ProtoMessage() {} -func (*WorkflowEventBindingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{133} -} -func (m *WorkflowEventBindingSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowEventBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowEventBindingSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowEventBindingSpec.Merge(m, src) -} -func (m *WorkflowEventBindingSpec) XXX_Size() int { - return m.Size() -} -func (m *WorkflowEventBindingSpec) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowEventBindingSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowEventBindingSpec proto.InternalMessageInfo - -func (m *WorkflowLevelArtifactGC) Reset() { *m = WorkflowLevelArtifactGC{} } -func (*WorkflowLevelArtifactGC) ProtoMessage() {} -func (*WorkflowLevelArtifactGC) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{134} -} -func (m *WorkflowLevelArtifactGC) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowLevelArtifactGC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowLevelArtifactGC) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowLevelArtifactGC.Merge(m, src) -} -func (m *WorkflowLevelArtifactGC) XXX_Size() int { - return m.Size() -} -func (m *WorkflowLevelArtifactGC) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowLevelArtifactGC.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowLevelArtifactGC proto.InternalMessageInfo - -func (m *WorkflowList) Reset() { *m = WorkflowList{} } -func (*WorkflowList) ProtoMessage() {} -func (*WorkflowList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{135} -} -func (m *WorkflowList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowList) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowList.Merge(m, src) -} -func (m *WorkflowList) XXX_Size() int { - return m.Size() -} -func (m *WorkflowList) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowList.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowList proto.InternalMessageInfo - -func (m *WorkflowMetadata) Reset() { *m = WorkflowMetadata{} } -func (*WorkflowMetadata) ProtoMessage() {} -func (*WorkflowMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{136} -} -func (m *WorkflowMetadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowMetadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowMetadata.Merge(m, src) -} -func (m *WorkflowMetadata) XXX_Size() int { - return m.Size() -} -func (m *WorkflowMetadata) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowMetadata.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowMetadata proto.InternalMessageInfo - -func (m *WorkflowSpec) Reset() { *m = WorkflowSpec{} } -func (*WorkflowSpec) ProtoMessage() {} -func (*WorkflowSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{137} -} -func (m *WorkflowSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowSpec.Merge(m, src) -} -func (m *WorkflowSpec) XXX_Size() int { - return m.Size() -} -func (m *WorkflowSpec) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowSpec proto.InternalMessageInfo - -func (m *WorkflowStatus) Reset() { *m = WorkflowStatus{} } -func (*WorkflowStatus) ProtoMessage() {} -func (*WorkflowStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{138} -} -func (m *WorkflowStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowStatus.Merge(m, src) -} -func (m *WorkflowStatus) XXX_Size() int { - return m.Size() -} -func (m *WorkflowStatus) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowStatus proto.InternalMessageInfo - -func (m *WorkflowStep) Reset() { *m = WorkflowStep{} } -func (*WorkflowStep) ProtoMessage() {} -func (*WorkflowStep) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{139} -} -func (m *WorkflowStep) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowStep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowStep) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowStep.Merge(m, src) -} -func (m *WorkflowStep) XXX_Size() int { - return m.Size() -} -func (m *WorkflowStep) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowStep.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowStep proto.InternalMessageInfo - -func (m *WorkflowTaskResult) Reset() { *m = WorkflowTaskResult{} } -func (*WorkflowTaskResult) ProtoMessage() {} -func (*WorkflowTaskResult) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{140} -} -func (m *WorkflowTaskResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowTaskResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowTaskResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowTaskResult.Merge(m, src) -} -func (m *WorkflowTaskResult) XXX_Size() int { - return m.Size() -} -func (m *WorkflowTaskResult) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowTaskResult.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowTaskResult proto.InternalMessageInfo - -func (m *WorkflowTaskResultList) Reset() { *m = WorkflowTaskResultList{} } -func (*WorkflowTaskResultList) ProtoMessage() {} -func (*WorkflowTaskResultList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{141} -} -func (m *WorkflowTaskResultList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowTaskResultList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowTaskResultList) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowTaskResultList.Merge(m, src) -} -func (m *WorkflowTaskResultList) XXX_Size() int { - return m.Size() -} -func (m *WorkflowTaskResultList) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowTaskResultList.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowTaskResultList proto.InternalMessageInfo - -func (m *WorkflowTaskSet) Reset() { *m = WorkflowTaskSet{} } -func (*WorkflowTaskSet) ProtoMessage() {} -func (*WorkflowTaskSet) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{142} -} -func (m *WorkflowTaskSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowTaskSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowTaskSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowTaskSet.Merge(m, src) -} -func (m *WorkflowTaskSet) XXX_Size() int { - return m.Size() -} -func (m *WorkflowTaskSet) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowTaskSet.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowTaskSet proto.InternalMessageInfo - -func (m *WorkflowTaskSetList) Reset() { *m = WorkflowTaskSetList{} } -func (*WorkflowTaskSetList) ProtoMessage() {} -func (*WorkflowTaskSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{143} -} -func (m *WorkflowTaskSetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowTaskSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowTaskSetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowTaskSetList.Merge(m, src) -} -func (m *WorkflowTaskSetList) XXX_Size() int { - return m.Size() -} -func (m *WorkflowTaskSetList) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowTaskSetList.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowTaskSetList proto.InternalMessageInfo - -func (m *WorkflowTaskSetSpec) Reset() { *m = WorkflowTaskSetSpec{} } -func (*WorkflowTaskSetSpec) ProtoMessage() {} -func (*WorkflowTaskSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{144} -} -func (m *WorkflowTaskSetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowTaskSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowTaskSetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowTaskSetSpec.Merge(m, src) -} -func (m *WorkflowTaskSetSpec) XXX_Size() int { - return m.Size() -} -func (m *WorkflowTaskSetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowTaskSetSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowTaskSetSpec proto.InternalMessageInfo - -func (m *WorkflowTaskSetStatus) Reset() { *m = WorkflowTaskSetStatus{} } -func (*WorkflowTaskSetStatus) ProtoMessage() {} -func (*WorkflowTaskSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{145} -} -func (m *WorkflowTaskSetStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowTaskSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowTaskSetStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowTaskSetStatus.Merge(m, src) -} -func (m *WorkflowTaskSetStatus) XXX_Size() int { - return m.Size() -} -func (m *WorkflowTaskSetStatus) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowTaskSetStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowTaskSetStatus proto.InternalMessageInfo - -func (m *WorkflowTemplate) Reset() { *m = WorkflowTemplate{} } -func (*WorkflowTemplate) ProtoMessage() {} -func (*WorkflowTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{146} -} -func (m *WorkflowTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowTemplate.Merge(m, src) -} -func (m *WorkflowTemplate) XXX_Size() int { - return m.Size() -} -func (m *WorkflowTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowTemplate proto.InternalMessageInfo - -func (m *WorkflowTemplateList) Reset() { *m = WorkflowTemplateList{} } -func (*WorkflowTemplateList) ProtoMessage() {} -func (*WorkflowTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{147} -} -func (m *WorkflowTemplateList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowTemplateList) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowTemplateList.Merge(m, src) -} -func (m *WorkflowTemplateList) XXX_Size() int { - return m.Size() -} -func (m *WorkflowTemplateList) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowTemplateList.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowTemplateList proto.InternalMessageInfo - -func (m *WorkflowTemplateRef) Reset() { *m = WorkflowTemplateRef{} } -func (*WorkflowTemplateRef) ProtoMessage() {} -func (*WorkflowTemplateRef) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{148} -} -func (m *WorkflowTemplateRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkflowTemplateRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkflowTemplateRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowTemplateRef.Merge(m, src) -} -func (m *WorkflowTemplateRef) XXX_Size() int { - return m.Size() -} -func (m *WorkflowTemplateRef) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowTemplateRef.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkflowTemplateRef proto.InternalMessageInfo - -func (m *ZipStrategy) Reset() { *m = ZipStrategy{} } -func (*ZipStrategy) ProtoMessage() {} -func (*ZipStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{149} -} -func (m *ZipStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ZipStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ZipStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_ZipStrategy.Merge(m, src) -} -func (m *ZipStrategy) XXX_Size() int { - return m.Size() -} -func (m *ZipStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_ZipStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_ZipStrategy proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Amount)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Amount") - proto.RegisterType((*ArchiveStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArchiveStrategy") - proto.RegisterType((*Arguments)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Arguments") - proto.RegisterType((*ArtGCStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtGCStatus") - proto.RegisterMapType((map[string]bool)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtGCStatus.PodsRecoupedEntry") - proto.RegisterMapType((map[ArtifactGCStrategy]bool)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtGCStatus.StrategiesProcessedEntry") - proto.RegisterType((*Artifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Artifact") - proto.RegisterType((*ArtifactGC)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactGC") - proto.RegisterType((*ArtifactGCSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactGCSpec") - proto.RegisterMapType((map[string]ArtifactNodeSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactGCSpec.ArtifactsByNodeEntry") - proto.RegisterType((*ArtifactGCStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactGCStatus") - proto.RegisterMapType((map[string]ArtifactResultNodeStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactGCStatus.ArtifactResultsByNodeEntry") - proto.RegisterType((*ArtifactLocation)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactLocation") - proto.RegisterType((*ArtifactNodeSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactNodeSpec") - proto.RegisterMapType((map[string]Artifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactNodeSpec.ArtifactsEntry") - proto.RegisterType((*ArtifactPaths)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactPaths") - proto.RegisterType((*ArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactRepository") - proto.RegisterType((*ArtifactRepositoryRef)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactRepositoryRef") - proto.RegisterType((*ArtifactRepositoryRefStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactRepositoryRefStatus") - proto.RegisterType((*ArtifactResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactResult") - proto.RegisterType((*ArtifactResultNodeStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactResultNodeStatus") - proto.RegisterMapType((map[string]ArtifactResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactResultNodeStatus.ArtifactResultsEntry") - proto.RegisterType((*ArtifactSearchQuery)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactSearchQuery") - proto.RegisterMapType((map[ArtifactGCStrategy]bool)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactSearchQuery.ArtifactGCStrategiesEntry") - proto.RegisterMapType((map[NodeType]bool)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactSearchQuery.NodeTypesEntry") - proto.RegisterType((*ArtifactSearchResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactSearchResult") - proto.RegisterType((*ArtifactoryArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactoryArtifact") - proto.RegisterType((*ArtifactoryArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactoryArtifactRepository") - proto.RegisterType((*ArtifactoryAuth)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ArtifactoryAuth") - proto.RegisterType((*AzureArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.AzureArtifact") - proto.RegisterType((*AzureArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.AzureArtifactRepository") - proto.RegisterType((*AzureBlobContainer)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.AzureBlobContainer") - proto.RegisterType((*Backoff)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Backoff") - proto.RegisterType((*BasicAuth)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.BasicAuth") - proto.RegisterType((*Cache)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Cache") - proto.RegisterType((*ClientCertAuth)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ClientCertAuth") - proto.RegisterType((*ClusterWorkflowTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate") - proto.RegisterType((*ClusterWorkflowTemplateList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplateList") - proto.RegisterType((*Column)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Column") - proto.RegisterType((*Condition)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Condition") - proto.RegisterType((*ContainerNode)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ContainerNode") - proto.RegisterType((*ContainerSetRetryStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ContainerSetRetryStrategy") - proto.RegisterType((*ContainerSetTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ContainerSetTemplate") - proto.RegisterType((*ContinueOn)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ContinueOn") - proto.RegisterType((*Counter)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Counter") - proto.RegisterType((*CreateS3BucketOptions)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.CreateS3BucketOptions") - proto.RegisterType((*CronWorkflow)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.CronWorkflow") - proto.RegisterType((*CronWorkflowList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.CronWorkflowList") - proto.RegisterType((*CronWorkflowSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.CronWorkflowSpec") - proto.RegisterType((*CronWorkflowStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.CronWorkflowStatus") - proto.RegisterType((*DAGTask)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.DAGTask") - proto.RegisterMapType((LifecycleHooks)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.DAGTask.HooksEntry") - proto.RegisterType((*DAGTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.DAGTemplate") - proto.RegisterType((*Data)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Data") - proto.RegisterType((*DataSource)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.DataSource") - proto.RegisterType((*Event)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Event") - proto.RegisterType((*ExecutorConfig)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ExecutorConfig") - proto.RegisterType((*GCSArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.GCSArtifact") - proto.RegisterType((*GCSArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.GCSArtifactRepository") - proto.RegisterType((*GCSBucket)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.GCSBucket") - proto.RegisterType((*Gauge)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Gauge") - proto.RegisterType((*GitArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.GitArtifact") - proto.RegisterType((*HDFSArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HDFSArtifact") - proto.RegisterType((*HDFSArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HDFSArtifactRepository") - proto.RegisterType((*HDFSConfig)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HDFSConfig") - proto.RegisterType((*HDFSKrbConfig)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HDFSKrbConfig") - proto.RegisterType((*HTTP)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HTTP") - proto.RegisterType((*HTTPArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HTTPArtifact") - proto.RegisterType((*HTTPAuth)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HTTPAuth") - proto.RegisterType((*HTTPBodySource)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HTTPBodySource") - proto.RegisterType((*HTTPHeader)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HTTPHeader") - proto.RegisterType((*HTTPHeaderSource)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.HTTPHeaderSource") - proto.RegisterType((*Header)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Header") - proto.RegisterType((*Histogram)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Histogram") - proto.RegisterType((*Inputs)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Inputs") - proto.RegisterType((*Item)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Item") - proto.RegisterType((*LabelKeys)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.LabelKeys") - proto.RegisterType((*LabelValueFrom)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.LabelValueFrom") - proto.RegisterType((*LabelValues)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.LabelValues") - proto.RegisterType((*LifecycleHook)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.LifecycleHook") - proto.RegisterType((*Link)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Link") - proto.RegisterType((*ManifestFrom)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ManifestFrom") - proto.RegisterType((*MemoizationStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.MemoizationStatus") - proto.RegisterType((*Memoize)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Memoize") - proto.RegisterType((*Metadata)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Metadata") - proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Metadata.AnnotationsEntry") - proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Metadata.LabelsEntry") - proto.RegisterType((*MetricLabel)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.MetricLabel") - proto.RegisterType((*Metrics)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Metrics") - proto.RegisterType((*Mutex)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Mutex") - proto.RegisterType((*MutexHolding)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.MutexHolding") - proto.RegisterType((*MutexStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.MutexStatus") - proto.RegisterType((*NodeFlag)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeFlag") - proto.RegisterType((*NodeResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeResult") - proto.RegisterType((*NodeStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeStatus") - proto.RegisterMapType((ResourcesDuration)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeStatus.ResourcesDurationEntry") - proto.RegisterType((*NodeSynchronizationStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeSynchronizationStatus") - proto.RegisterType((*NoneStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NoneStrategy") - proto.RegisterType((*OAuth2Auth)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.OAuth2Auth") - proto.RegisterType((*OAuth2EndpointParam)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.OAuth2EndpointParam") - proto.RegisterType((*OSSArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.OSSArtifact") - proto.RegisterType((*OSSArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.OSSArtifactRepository") - proto.RegisterType((*OSSBucket)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.OSSBucket") - proto.RegisterType((*OSSLifecycleRule)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.OSSLifecycleRule") - proto.RegisterType((*Object)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Object") - proto.RegisterType((*Outputs)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Outputs") - proto.RegisterType((*ParallelSteps)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ParallelSteps") - proto.RegisterType((*Parameter)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Parameter") - proto.RegisterType((*Plugin)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Plugin") - proto.RegisterType((*PodGC)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.PodGC") - proto.RegisterType((*Prometheus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Prometheus") - proto.RegisterType((*RawArtifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.RawArtifact") - proto.RegisterType((*ResourceTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ResourceTemplate") - proto.RegisterType((*RetryAffinity)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.RetryAffinity") - proto.RegisterType((*RetryNodeAntiAffinity)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.RetryNodeAntiAffinity") - proto.RegisterType((*RetryStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.RetryStrategy") - proto.RegisterType((*S3Artifact)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.S3Artifact") - proto.RegisterType((*S3ArtifactRepository)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.S3ArtifactRepository") - proto.RegisterType((*S3Bucket)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.S3Bucket") - proto.RegisterType((*S3EncryptionOptions)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.S3EncryptionOptions") - proto.RegisterType((*ScriptTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ScriptTemplate") - proto.RegisterType((*SemaphoreHolding)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SemaphoreHolding") - proto.RegisterType((*SemaphoreRef)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SemaphoreRef") - proto.RegisterType((*SemaphoreStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SemaphoreStatus") - proto.RegisterType((*Sequence)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Sequence") - proto.RegisterType((*Submit)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Submit") - proto.RegisterType((*SubmitOpts)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SubmitOpts") - proto.RegisterType((*SuppliedValueFrom)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SuppliedValueFrom") - proto.RegisterType((*SuspendTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SuspendTemplate") - proto.RegisterType((*Synchronization)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Synchronization") - proto.RegisterType((*SynchronizationStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.SynchronizationStatus") - proto.RegisterType((*TTLStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.TTLStrategy") - proto.RegisterType((*TarStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.TarStrategy") - proto.RegisterType((*Template)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Template") - proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Template.NodeSelectorEntry") - proto.RegisterType((*TemplateRef)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.TemplateRef") - proto.RegisterType((*TransformationStep)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.TransformationStep") - proto.RegisterType((*UserContainer)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.UserContainer") - proto.RegisterType((*ValueFrom)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ValueFrom") - proto.RegisterType((*Version)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Version") - proto.RegisterType((*VolumeClaimGC)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.VolumeClaimGC") - proto.RegisterType((*Workflow)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Workflow") - proto.RegisterType((*WorkflowArtifactGCTask)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowArtifactGCTask") - proto.RegisterType((*WorkflowArtifactGCTaskList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowArtifactGCTaskList") - proto.RegisterType((*WorkflowEventBinding)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowEventBinding") - proto.RegisterType((*WorkflowEventBindingList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowEventBindingList") - proto.RegisterType((*WorkflowEventBindingSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowEventBindingSpec") - proto.RegisterType((*WorkflowLevelArtifactGC)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowLevelArtifactGC") - proto.RegisterType((*WorkflowList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowList") - proto.RegisterType((*WorkflowMetadata)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowMetadata") - proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowMetadata.AnnotationsEntry") - proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowMetadata.LabelsEntry") - proto.RegisterMapType((map[string]LabelValueFrom)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowMetadata.LabelsFromEntry") - proto.RegisterType((*WorkflowSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowSpec") - proto.RegisterMapType((LifecycleHooks)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowSpec.HooksEntry") - proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowSpec.NodeSelectorEntry") - proto.RegisterType((*WorkflowStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus") - proto.RegisterMapType((Nodes)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus.NodesEntry") - proto.RegisterMapType((ResourcesDuration)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus.ResourcesDurationEntry") - proto.RegisterMapType((map[string]Template)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus.StoredTemplatesEntry") - proto.RegisterMapType((map[string]bool)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus.TaskResultsCompletionStatusEntry") - proto.RegisterType((*WorkflowStep)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStep") - proto.RegisterMapType((LifecycleHooks)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStep.HooksEntry") - proto.RegisterType((*WorkflowTaskResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskResult") - proto.RegisterType((*WorkflowTaskResultList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskResultList") - proto.RegisterType((*WorkflowTaskSet)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskSet") - proto.RegisterType((*WorkflowTaskSetList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskSetList") - proto.RegisterType((*WorkflowTaskSetSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskSetSpec") - proto.RegisterMapType((map[string]Template)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskSetSpec.TasksEntry") - proto.RegisterType((*WorkflowTaskSetStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskSetStatus") - proto.RegisterMapType((map[string]NodeResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskSetStatus.NodesEntry") - proto.RegisterType((*WorkflowTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTemplate") - proto.RegisterType((*WorkflowTemplateList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTemplateList") - proto.RegisterType((*WorkflowTemplateRef)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTemplateRef") - proto.RegisterType((*ZipStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ZipStrategy") -} - -func init() { - proto.RegisterFile("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto", fileDescriptor_724696e352c3df5f) -} - -var fileDescriptor_724696e352c3df5f = []byte{ - // 10907 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x7d, 0x6d, 0x70, 0x24, 0xc7, - 0x75, 0x18, 0x67, 0x81, 0xc5, 0xc7, 0xc3, 0xc7, 0xe1, 0xfa, 0xbe, 0x96, 0x20, 0x79, 0xa0, 0x87, - 0x22, 0x43, 0xda, 0x14, 0xce, 0x3c, 0x4a, 0x09, 0x23, 0x25, 0x92, 0xf0, 0x71, 0xc0, 0x1d, 0x01, - 0x1c, 0xc0, 0x5e, 0x1c, 0xcf, 0xa4, 0x68, 0x49, 0x83, 0xdd, 0xc6, 0xee, 0x10, 0xbb, 0x33, 0xcb, - 0x99, 0x59, 0xe0, 0xc0, 0x0f, 0x49, 0xa1, 0xbe, 0x63, 0xd9, 0x4a, 0x64, 0x49, 0x96, 0x94, 0xa4, - 0x4a, 0x51, 0xa4, 0x84, 0x25, 0xbb, 0x92, 0xb2, 0x7f, 0xa5, 0xec, 0x7f, 0xa9, 0x94, 0x4b, 0x29, - 0xa7, 0x2a, 0x72, 0x59, 0x29, 0xe9, 0x87, 0x0d, 0x46, 0x48, 0xa2, 0x1f, 0x49, 0x54, 0x95, 0xa8, - 0x62, 0xc7, 0xbe, 0x7c, 0x54, 0xaa, 0x3f, 0xa7, 0x7b, 0x76, 0x16, 0xb7, 0xc0, 0x35, 0x70, 0x2a, - 0xfb, 0x17, 0xb0, 0xaf, 0x5f, 0xbf, 0xd7, 0xdd, 0xd3, 0xfd, 0xfa, 0xf5, 0x7b, 0xaf, 0x5f, 0xc3, - 0x5a, 0xcd, 0x4f, 0xea, 0xed, 0x8d, 0xe9, 0x4a, 0xd8, 0xbc, 0xe4, 0x45, 0xb5, 0xb0, 0x15, 0x85, - 0x2f, 0xb3, 0x7f, 0xde, 0xb9, 0x13, 0x46, 0x5b, 0x9b, 0x8d, 0x70, 0x27, 0xbe, 0xb4, 0xfd, 0xf4, - 0xa5, 0xd6, 0x56, 0xed, 0x92, 0xd7, 0xf2, 0xe3, 0x4b, 0x12, 0x7a, 0x69, 0xfb, 0x29, 0xaf, 0xd1, - 0xaa, 0x7b, 0x4f, 0x5d, 0xaa, 0x91, 0x80, 0x44, 0x5e, 0x42, 0xaa, 0xd3, 0xad, 0x28, 0x4c, 0x42, - 0xf4, 0x81, 0x94, 0xe2, 0xb4, 0xa4, 0xc8, 0xfe, 0xf9, 0xb0, 0xa2, 0x38, 0xbd, 0xfd, 0xf4, 0x74, - 0x6b, 0xab, 0x36, 0x4d, 0x29, 0x4e, 0x4b, 0xe8, 0xb4, 0xa4, 0x38, 0xf9, 0x4e, 0xad, 0x4d, 0xb5, - 0xb0, 0x16, 0x5e, 0x62, 0x84, 0x37, 0xda, 0x9b, 0xec, 0x17, 0xfb, 0xc1, 0xfe, 0xe3, 0x0c, 0x27, - 0xdd, 0xad, 0x67, 0xe2, 0x69, 0x3f, 0xa4, 0xed, 0xbb, 0x54, 0x09, 0x23, 0x72, 0x69, 0xbb, 0xa3, - 0x51, 0x93, 0xef, 0xd0, 0x70, 0x5a, 0x61, 0xc3, 0xaf, 0xec, 0xe6, 0x61, 0xbd, 0x2b, 0xc5, 0x6a, - 0x7a, 0x95, 0xba, 0x1f, 0x90, 0x68, 0x37, 0xed, 0x7a, 0x93, 0x24, 0x5e, 0x5e, 0xad, 0x4b, 0xdd, - 0x6a, 0x45, 0xed, 0x20, 0xf1, 0x9b, 0xa4, 0xa3, 0xc2, 0x5f, 0xbf, 0x53, 0x85, 0xb8, 0x52, 0x27, - 0x4d, 0xaf, 0xa3, 0xde, 0xd3, 0xdd, 0xea, 0xb5, 0x13, 0xbf, 0x71, 0xc9, 0x0f, 0x92, 0x38, 0x89, - 0xb2, 0x95, 0xdc, 0x2b, 0x30, 0x30, 0xd3, 0x0c, 0xdb, 0x41, 0x82, 0xde, 0x0b, 0xc5, 0x6d, 0xaf, - 0xd1, 0x26, 0x25, 0xe7, 0x61, 0xe7, 0xf1, 0xe1, 0xd9, 0x47, 0xbf, 0xbb, 0x37, 0x75, 0xdf, 0xfe, - 0xde, 0x54, 0xf1, 0x79, 0x0a, 0xbc, 0xbd, 0x37, 0x75, 0x96, 0x04, 0x95, 0xb0, 0xea, 0x07, 0xb5, - 0x4b, 0x2f, 0xc7, 0x61, 0x30, 0x7d, 0xbd, 0xdd, 0xdc, 0x20, 0x11, 0xe6, 0x75, 0xdc, 0x3f, 0x2a, - 0xc0, 0xa9, 0x99, 0xa8, 0x52, 0xf7, 0xb7, 0x49, 0x39, 0xa1, 0xf4, 0x6b, 0xbb, 0xa8, 0x0e, 0x7d, - 0x89, 0x17, 0x31, 0x72, 0x23, 0x97, 0x57, 0xa6, 0xef, 0xf6, 0xbb, 0x4f, 0xaf, 0x7b, 0x91, 0xa4, - 0x3d, 0x3b, 0xb8, 0xbf, 0x37, 0xd5, 0xb7, 0xee, 0x45, 0x98, 0xb2, 0x40, 0x0d, 0xe8, 0x0f, 0xc2, - 0x80, 0x94, 0x0a, 0x8c, 0xd5, 0xf5, 0xbb, 0x67, 0x75, 0x3d, 0x0c, 0x54, 0x3f, 0x66, 0x87, 0xf6, - 0xf7, 0xa6, 0xfa, 0x29, 0x04, 0x33, 0x2e, 0xb4, 0x5f, 0xaf, 0xfa, 0xad, 0x52, 0x9f, 0xad, 0x7e, - 0xbd, 0xe8, 0xb7, 0xcc, 0x7e, 0xbd, 0xe8, 0xb7, 0x30, 0x65, 0xe1, 0x7e, 0xae, 0x00, 0xc3, 0x33, - 0x51, 0xad, 0xdd, 0x24, 0x41, 0x12, 0xa3, 0x8f, 0x01, 0xb4, 0xbc, 0xc8, 0x6b, 0x92, 0x84, 0x44, - 0x71, 0xc9, 0x79, 0xb8, 0xef, 0xf1, 0x91, 0xcb, 0x4b, 0x77, 0xcf, 0x7e, 0x4d, 0xd2, 0x9c, 0x45, - 0xe2, 0x93, 0x83, 0x02, 0xc5, 0x58, 0x63, 0x89, 0x5e, 0x83, 0x61, 0x2f, 0x4a, 0xfc, 0x4d, 0xaf, - 0x92, 0xc4, 0xa5, 0x02, 0xe3, 0xff, 0xec, 0xdd, 0xf3, 0x9f, 0x11, 0x24, 0x67, 0x4f, 0x0b, 0xf6, - 0xc3, 0x12, 0x12, 0xe3, 0x94, 0x9f, 0xfb, 0xbb, 0xfd, 0x30, 0x32, 0x13, 0x25, 0x8b, 0x73, 0xe5, - 0xc4, 0x4b, 0xda, 0x31, 0xfa, 0x03, 0x07, 0xce, 0xc4, 0x7c, 0xd8, 0x7c, 0x12, 0xaf, 0x45, 0x61, - 0x85, 0xc4, 0x31, 0xa9, 0x8a, 0x71, 0xd9, 0xb4, 0xd2, 0x2e, 0xc9, 0x6c, 0xba, 0xdc, 0xc9, 0xe8, - 0x4a, 0x90, 0x44, 0xbb, 0xb3, 0x4f, 0x89, 0x36, 0x9f, 0xc9, 0xc1, 0x78, 0xf3, 0xed, 0x29, 0x24, - 0xbb, 0x42, 0x29, 0xf1, 0x4f, 0x8c, 0xf3, 0x5a, 0x8d, 0xbe, 0xe6, 0xc0, 0x68, 0x2b, 0xac, 0xc6, - 0x98, 0x54, 0xc2, 0x76, 0x8b, 0x54, 0xc5, 0xf0, 0x7e, 0xd8, 0x6e, 0x37, 0xd6, 0x34, 0x0e, 0xbc, - 0xfd, 0x67, 0x45, 0xfb, 0x47, 0xf5, 0x22, 0x6c, 0x34, 0x05, 0x3d, 0x03, 0xa3, 0x41, 0x98, 0x94, - 0x5b, 0xa4, 0xe2, 0x6f, 0xfa, 0xa4, 0xca, 0x26, 0xfe, 0x50, 0x5a, 0xf3, 0xba, 0x56, 0x86, 0x0d, - 0xcc, 0xc9, 0x05, 0x28, 0x75, 0x1b, 0x39, 0x34, 0x01, 0x7d, 0x5b, 0x64, 0x97, 0x0b, 0x1b, 0x4c, - 0xff, 0x45, 0x67, 0xa5, 0x00, 0xa2, 0xcb, 0x78, 0x48, 0x48, 0x96, 0xf7, 0x14, 0x9e, 0x71, 0x26, - 0xdf, 0x0f, 0xa7, 0x3b, 0x9a, 0x7e, 0x18, 0x02, 0xee, 0xf7, 0x06, 0x60, 0x48, 0x7e, 0x0a, 0xf4, - 0x30, 0xf4, 0x07, 0x5e, 0x53, 0xca, 0xb9, 0x51, 0xd1, 0x8f, 0xfe, 0xeb, 0x5e, 0x93, 0xae, 0x70, - 0xaf, 0x49, 0x28, 0x46, 0xcb, 0x4b, 0xea, 0x8c, 0x8e, 0x86, 0xb1, 0xe6, 0x25, 0x75, 0xcc, 0x4a, - 0xd0, 0x83, 0xd0, 0xdf, 0x0c, 0xab, 0x84, 0x8d, 0x45, 0x91, 0x4b, 0x88, 0x95, 0xb0, 0x4a, 0x30, - 0x83, 0xd2, 0xfa, 0x9b, 0x51, 0xd8, 0x2c, 0xf5, 0x9b, 0xf5, 0x17, 0xa2, 0xb0, 0x89, 0x59, 0x09, - 0xfa, 0xaa, 0x03, 0x13, 0x72, 0x6e, 0x2f, 0x87, 0x15, 0x2f, 0xf1, 0xc3, 0xa0, 0x54, 0x64, 0x12, - 0x05, 0xdb, 0x5b, 0x52, 0x92, 0xf2, 0x6c, 0x49, 0x34, 0x61, 0x22, 0x5b, 0x82, 0x3b, 0x5a, 0x81, - 0x2e, 0x03, 0xd4, 0x1a, 0xe1, 0x86, 0xd7, 0xa0, 0x03, 0x52, 0x1a, 0x60, 0x5d, 0x50, 0x92, 0x61, - 0x51, 0x95, 0x60, 0x0d, 0x0b, 0xdd, 0x82, 0x41, 0x8f, 0x4b, 0xff, 0xd2, 0x20, 0xeb, 0xc4, 0x73, - 0x36, 0x3a, 0x61, 0x6c, 0x27, 0xb3, 0x23, 0xfb, 0x7b, 0x53, 0x83, 0x02, 0x88, 0x25, 0x3b, 0xf4, - 0x24, 0x0c, 0x85, 0x2d, 0xda, 0x6e, 0xaf, 0x51, 0x1a, 0x62, 0x13, 0x73, 0x42, 0xb4, 0x75, 0x68, - 0x55, 0xc0, 0xb1, 0xc2, 0x40, 0x4f, 0xc0, 0x60, 0xdc, 0xde, 0xa0, 0xdf, 0xb1, 0x34, 0xcc, 0x3a, - 0x76, 0x4a, 0x20, 0x0f, 0x96, 0x39, 0x18, 0xcb, 0x72, 0xf4, 0x6e, 0x18, 0x89, 0x48, 0xa5, 0x1d, - 0xc5, 0x84, 0x7e, 0xd8, 0x12, 0x30, 0xda, 0x67, 0x04, 0xfa, 0x08, 0x4e, 0x8b, 0xb0, 0x8e, 0x87, - 0xde, 0x07, 0xe3, 0xf4, 0x03, 0x5f, 0xb9, 0xd5, 0x8a, 0x48, 0x1c, 0xd3, 0xaf, 0x3a, 0xc2, 0x18, - 0x9d, 0x17, 0x35, 0xc7, 0x17, 0x8c, 0x52, 0x9c, 0xc1, 0x46, 0xaf, 0x03, 0x78, 0x4a, 0x66, 0x94, - 0x46, 0xd9, 0x60, 0x2e, 0xdb, 0x9b, 0x11, 0x8b, 0x73, 0xb3, 0xe3, 0xf4, 0x3b, 0xa6, 0xbf, 0xb1, - 0xc6, 0x8f, 0x8e, 0x4f, 0x95, 0x34, 0x48, 0x42, 0xaa, 0xa5, 0x31, 0xd6, 0x61, 0x35, 0x3e, 0xf3, - 0x1c, 0x8c, 0x65, 0xb9, 0xfb, 0x0f, 0x0a, 0xa0, 0x51, 0x41, 0xb3, 0x30, 0x24, 0xe4, 0x9a, 0x58, - 0x92, 0xb3, 0x8f, 0xc9, 0xef, 0x20, 0xbf, 0xe0, 0xed, 0xbd, 0x5c, 0x79, 0xa8, 0xea, 0xa1, 0x37, - 0x60, 0xa4, 0x15, 0x56, 0x57, 0x48, 0xe2, 0x55, 0xbd, 0xc4, 0x13, 0xbb, 0xb9, 0x85, 0x1d, 0x46, - 0x52, 0x9c, 0x3d, 0x45, 0x3f, 0xdd, 0x5a, 0xca, 0x02, 0xeb, 0xfc, 0xd0, 0xb3, 0x80, 0x62, 0x12, - 0x6d, 0xfb, 0x15, 0x32, 0x53, 0xa9, 0x50, 0x95, 0x88, 0x2d, 0x80, 0x3e, 0xd6, 0x99, 0x49, 0xd1, - 0x19, 0x54, 0xee, 0xc0, 0xc0, 0x39, 0xb5, 0xdc, 0xef, 0x17, 0x60, 0x5c, 0xeb, 0x6b, 0x8b, 0x54, - 0xd0, 0x5b, 0x0e, 0x9c, 0x52, 0xdb, 0xd9, 0xec, 0xee, 0x75, 0x3a, 0xab, 0xf8, 0x66, 0x45, 0x6c, - 0x7e, 0x5f, 0xca, 0x4b, 0xfd, 0x14, 0x7c, 0xb8, 0xac, 0xbf, 0x20, 0xfa, 0x70, 0x2a, 0x53, 0x8a, - 0xb3, 0xcd, 0x9a, 0xfc, 0x8a, 0x03, 0x67, 0xf3, 0x48, 0xe4, 0xc8, 0xdc, 0xba, 0x2e, 0x73, 0xad, - 0x0a, 0x2f, 0xca, 0x95, 0x76, 0x46, 0x97, 0xe3, 0xff, 0xaf, 0x00, 0x13, 0xfa, 0x14, 0x62, 0x9a, - 0xc0, 0xbf, 0x72, 0xe0, 0x9c, 0xec, 0x01, 0x26, 0x71, 0xbb, 0x91, 0x19, 0xde, 0xa6, 0xd5, 0xe1, - 0xe5, 0x3b, 0xe9, 0x4c, 0x1e, 0x3f, 0x3e, 0xcc, 0x0f, 0x89, 0x61, 0x3e, 0x97, 0x8b, 0x83, 0xf3, - 0x9b, 0x3a, 0xf9, 0x2d, 0x07, 0x26, 0xbb, 0x13, 0xcd, 0x19, 0xf8, 0x96, 0x39, 0xf0, 0x2f, 0xda, - 0xeb, 0x24, 0x67, 0xcf, 0x86, 0x9f, 0x75, 0x56, 0xff, 0x00, 0xbf, 0x35, 0x04, 0x1d, 0x7b, 0x08, - 0x7a, 0x0a, 0x46, 0x84, 0x38, 0x5e, 0x0e, 0x6b, 0x31, 0x6b, 0xe4, 0x10, 0x5f, 0x6b, 0x33, 0x29, - 0x18, 0xeb, 0x38, 0xa8, 0x0a, 0x85, 0xf8, 0x69, 0xd1, 0x74, 0x0b, 0xe2, 0xad, 0xfc, 0xb4, 0xd2, - 0x22, 0x07, 0xf6, 0xf7, 0xa6, 0x0a, 0xe5, 0xa7, 0x71, 0x21, 0x7e, 0x9a, 0x6a, 0xea, 0x35, 0x3f, - 0xb1, 0xa7, 0xa9, 0x2f, 0xfa, 0x89, 0xe2, 0xc3, 0x34, 0xf5, 0x45, 0x3f, 0xc1, 0x94, 0x05, 0x3d, - 0x81, 0xd4, 0x93, 0xa4, 0xc5, 0x76, 0x7c, 0x2b, 0x27, 0x90, 0xab, 0xeb, 0xeb, 0x6b, 0x8a, 0x17, - 0xd3, 0x2f, 0x28, 0x04, 0x33, 0x2e, 0xe8, 0xb3, 0x0e, 0x1d, 0x71, 0x5e, 0x18, 0x46, 0xbb, 0x42, - 0x71, 0xb8, 0x61, 0x6f, 0x0a, 0x84, 0xd1, 0xae, 0x62, 0x2e, 0x3e, 0xa4, 0x2a, 0xc0, 0x3a, 0x6b, - 0xd6, 0xf1, 0xea, 0x66, 0xcc, 0xf4, 0x04, 0x3b, 0x1d, 0x9f, 0x5f, 0x28, 0x67, 0x3a, 0x3e, 0xbf, - 0x50, 0xc6, 0x8c, 0x0b, 0xfd, 0xa0, 0x91, 0xb7, 0x23, 0x74, 0x0c, 0x0b, 0x1f, 0x14, 0x7b, 0x3b, - 0xe6, 0x07, 0xc5, 0xde, 0x0e, 0xa6, 0x2c, 0x28, 0xa7, 0x30, 0x8e, 0x99, 0x4a, 0x61, 0x85, 0xd3, - 0x6a, 0xb9, 0x6c, 0x72, 0x5a, 0x2d, 0x97, 0x31, 0x65, 0xc1, 0x26, 0x69, 0x25, 0x66, 0xfa, 0x88, - 0x9d, 0x49, 0x3a, 0x97, 0xe1, 0xb4, 0x38, 0x57, 0xc6, 0x94, 0x05, 0x15, 0x19, 0xde, 0xab, 0xed, - 0x88, 0x2b, 0x33, 0x23, 0x97, 0x57, 0x2d, 0xcc, 0x17, 0x4a, 0x4e, 0x71, 0x1b, 0xde, 0xdf, 0x9b, - 0x2a, 0x32, 0x10, 0xe6, 0x8c, 0xdc, 0xdf, 0xef, 0x4b, 0xc5, 0x85, 0x94, 0xe7, 0xe8, 0xef, 0xb3, - 0x8d, 0x50, 0xc8, 0x02, 0xa1, 0xfa, 0x3a, 0xc7, 0xa6, 0xfa, 0x9e, 0xe1, 0x3b, 0x9e, 0xc1, 0x0e, - 0x67, 0xf9, 0xa3, 0x2f, 0x3a, 0x9d, 0x67, 0x5b, 0xcf, 0xfe, 0x5e, 0x96, 0x6e, 0xcc, 0x7c, 0xaf, - 0x38, 0xf0, 0xc8, 0x3b, 0xf9, 0x59, 0x27, 0x55, 0x22, 0xe2, 0x6e, 0xfb, 0xc0, 0x47, 0xcc, 0x7d, - 0xc0, 0xe2, 0x81, 0x5c, 0x97, 0xfb, 0x9f, 0x73, 0x60, 0x4c, 0xc2, 0xa9, 0x7a, 0x1c, 0xa3, 0x5b, - 0x30, 0x24, 0x5b, 0x2a, 0xbe, 0x9e, 0x4d, 0x5b, 0x80, 0x52, 0xe2, 0x55, 0x63, 0x14, 0x37, 0xf7, - 0xad, 0x01, 0x40, 0xe9, 0x5e, 0xd5, 0x0a, 0x63, 0x9f, 0x49, 0xa2, 0x23, 0xec, 0x42, 0x81, 0xb6, - 0x0b, 0x3d, 0x6f, 0x73, 0x17, 0x4a, 0x9b, 0x65, 0xec, 0x47, 0x5f, 0xcc, 0xc8, 0x6d, 0xbe, 0x31, - 0x7d, 0xf8, 0x58, 0xe4, 0xb6, 0xd6, 0x84, 0x83, 0x25, 0xf8, 0xb6, 0x90, 0xe0, 0x7c, 0xeb, 0xfa, - 0x25, 0xbb, 0x12, 0x5c, 0x6b, 0x45, 0x56, 0x96, 0x47, 0x5c, 0xc2, 0xf2, 0xbd, 0xeb, 0xa6, 0x55, - 0x09, 0xab, 0x71, 0x35, 0x65, 0x6d, 0xc4, 0x65, 0xed, 0x80, 0x2d, 0x9e, 0x9a, 0xac, 0xcd, 0xf2, - 0x54, 0x52, 0xf7, 0x55, 0x29, 0x75, 0xf9, 0xae, 0xf5, 0x82, 0x65, 0xa9, 0xab, 0xf1, 0xed, 0x94, - 0xbf, 0xaf, 0xc0, 0xb9, 0x4e, 0x3c, 0x4c, 0x36, 0xd1, 0x25, 0x18, 0xae, 0x84, 0xc1, 0xa6, 0x5f, - 0x5b, 0xf1, 0x5a, 0xe2, 0xbc, 0xa6, 0x64, 0xd1, 0x9c, 0x2c, 0xc0, 0x29, 0x0e, 0x7a, 0x88, 0x0b, - 0x1e, 0x6e, 0x11, 0x19, 0x11, 0xa8, 0x7d, 0x4b, 0x64, 0x97, 0x49, 0xa1, 0xf7, 0x0c, 0x7d, 0xf5, - 0x1b, 0x53, 0xf7, 0x7d, 0xfc, 0x8f, 0x1f, 0xbe, 0xcf, 0xfd, 0xc3, 0x3e, 0x78, 0x20, 0x97, 0xa7, - 0xd0, 0xd6, 0x7f, 0xcb, 0xd0, 0xd6, 0xb5, 0x72, 0x21, 0x45, 0x6e, 0xda, 0x54, 0x64, 0x35, 0xf2, - 0x79, 0x7a, 0xb9, 0x56, 0x8c, 0xf3, 0x1b, 0x45, 0x07, 0x2a, 0xf0, 0x9a, 0x24, 0x6e, 0x79, 0x15, - 0x22, 0x7a, 0xaf, 0x06, 0xea, 0xba, 0x2c, 0xc0, 0x29, 0x0e, 0x3f, 0x42, 0x6f, 0x7a, 0xed, 0x46, - 0x22, 0x0c, 0x65, 0xda, 0x11, 0x9a, 0x81, 0xb1, 0x2c, 0x47, 0xff, 0xd0, 0x01, 0xd4, 0xc9, 0x55, - 0x2c, 0xc4, 0xf5, 0xe3, 0x18, 0x87, 0xd9, 0xf3, 0xfb, 0xda, 0x21, 0x5c, 0xeb, 0x69, 0x4e, 0x3b, - 0xb4, 0x6f, 0xfa, 0xd1, 0x74, 0x1f, 0xe2, 0x87, 0x83, 0x1e, 0x6c, 0x68, 0xcc, 0xd4, 0x52, 0xa9, - 0x90, 0x38, 0xe6, 0xe6, 0x38, 0xdd, 0xd4, 0xc2, 0xc0, 0x58, 0x96, 0xa3, 0x29, 0x28, 0x92, 0x28, - 0x0a, 0x23, 0x71, 0xd6, 0x66, 0xd3, 0xf8, 0x0a, 0x05, 0x60, 0x0e, 0x77, 0x7f, 0x5c, 0x80, 0x52, - 0xb7, 0xd3, 0x09, 0xfa, 0x1d, 0xed, 0x5c, 0x2d, 0x4e, 0x4e, 0xe2, 0xe0, 0x17, 0x1e, 0xdf, 0x99, - 0x28, 0x7b, 0x00, 0xec, 0x72, 0xc2, 0x16, 0xa5, 0x38, 0xdb, 0xc0, 0xc9, 0x2f, 0x69, 0x27, 0x6c, - 0x9d, 0x44, 0xce, 0x06, 0xbf, 0x69, 0x6e, 0xf0, 0x6b, 0xb6, 0x3b, 0xa5, 0x6f, 0xf3, 0x7f, 0x52, - 0x84, 0x33, 0xb2, 0xb4, 0x4c, 0xe8, 0x56, 0xf9, 0x5c, 0x9b, 0x44, 0xbb, 0xe8, 0x07, 0x0e, 0x9c, - 0xf5, 0xb2, 0xa6, 0x1b, 0x9f, 0x1c, 0xc3, 0x40, 0x6b, 0x5c, 0xa7, 0x67, 0x72, 0x38, 0xf2, 0x81, - 0xbe, 0x2c, 0x06, 0xfa, 0x6c, 0x1e, 0x4a, 0x17, 0xbb, 0x7b, 0x6e, 0x07, 0xd0, 0x33, 0x30, 0x2a, - 0xe1, 0xcc, 0xdc, 0xc3, 0x97, 0xb8, 0x32, 0x6e, 0xcf, 0x68, 0x65, 0xd8, 0xc0, 0xa4, 0x35, 0x13, - 0xd2, 0x6c, 0x35, 0xbc, 0x84, 0x68, 0x86, 0x22, 0x55, 0x73, 0x5d, 0x2b, 0xc3, 0x06, 0x26, 0x7a, - 0x0c, 0x06, 0x82, 0xb0, 0x4a, 0xae, 0x55, 0x85, 0x81, 0x78, 0x5c, 0xd4, 0x19, 0xb8, 0xce, 0xa0, - 0x58, 0x94, 0xa2, 0x47, 0x53, 0x6b, 0x5c, 0x91, 0x2d, 0xa1, 0x91, 0x3c, 0x4b, 0x1c, 0xfa, 0xc7, - 0x0e, 0x0c, 0xd3, 0x1a, 0xeb, 0xbb, 0x2d, 0x42, 0xf7, 0x36, 0xfa, 0x45, 0xaa, 0xc7, 0xf3, 0x45, - 0xae, 0x4b, 0x36, 0xa6, 0xa9, 0x63, 0x58, 0xc1, 0xdf, 0x7c, 0x7b, 0x6a, 0x48, 0xfe, 0xc0, 0x69, - 0xab, 0x26, 0x17, 0xe1, 0xfe, 0xae, 0x5f, 0xf3, 0x50, 0xae, 0x80, 0xbf, 0x05, 0xe3, 0x66, 0x23, - 0x0e, 0xe5, 0x07, 0xf8, 0x97, 0xda, 0xb2, 0xe3, 0xfd, 0x12, 0xf2, 0xec, 0x9e, 0x69, 0xb3, 0x6a, - 0x32, 0xcc, 0x8b, 0xa9, 0x67, 0x4e, 0x86, 0x79, 0x31, 0x19, 0xe6, 0xdd, 0x3f, 0x70, 0xd2, 0xa5, - 0xa9, 0xa9, 0x79, 0x74, 0x63, 0x6e, 0x47, 0x0d, 0x21, 0x88, 0xd5, 0xc6, 0x7c, 0x03, 0x2f, 0x63, - 0x0a, 0x47, 0x5f, 0xd2, 0xa4, 0x23, 0xad, 0xd6, 0x16, 0x6e, 0x0d, 0x4b, 0x26, 0x7a, 0x83, 0x70, - 0xa7, 0xfc, 0x13, 0x05, 0x38, 0xdb, 0x04, 0xf7, 0x8b, 0x05, 0x78, 0xe8, 0x40, 0xa5, 0x35, 0xb7, - 0xe1, 0xce, 0x3d, 0x6f, 0x38, 0xdd, 0xd6, 0x22, 0xd2, 0x0a, 0x6f, 0xe0, 0x65, 0xf1, 0xbd, 0xd4, - 0xb6, 0x86, 0x39, 0x18, 0xcb, 0x72, 0xaa, 0x3a, 0x6c, 0x91, 0xdd, 0x85, 0x30, 0x6a, 0x7a, 0x89, - 0x90, 0x0e, 0x4a, 0x75, 0x58, 0x92, 0x05, 0x38, 0xc5, 0x71, 0x7f, 0xe0, 0x40, 0xb6, 0x01, 0xc8, - 0x83, 0xf1, 0x76, 0x4c, 0x22, 0xba, 0xa5, 0x96, 0x49, 0x25, 0x22, 0x72, 0x7a, 0x3e, 0x3a, 0xcd, - 0xbd, 0xfd, 0xb4, 0x87, 0xd3, 0x95, 0x30, 0x22, 0xd3, 0xdb, 0x4f, 0x4d, 0x73, 0x8c, 0x25, 0xb2, - 0x5b, 0x26, 0x0d, 0x42, 0x69, 0xcc, 0xa2, 0xfd, 0xbd, 0xa9, 0xf1, 0x1b, 0x06, 0x01, 0x9c, 0x21, - 0x48, 0x59, 0xb4, 0xbc, 0x38, 0xde, 0x09, 0xa3, 0xaa, 0x60, 0x51, 0x38, 0x34, 0x8b, 0x35, 0x83, - 0x00, 0xce, 0x10, 0x74, 0xbf, 0x4f, 0x8f, 0x8f, 0xba, 0xd6, 0x8a, 0xbe, 0x41, 0x75, 0x1f, 0x0a, - 0x99, 0x6d, 0x84, 0x1b, 0x73, 0x61, 0x90, 0x78, 0x7e, 0x40, 0x64, 0xb0, 0xc0, 0xba, 0x25, 0x1d, - 0xd9, 0xa0, 0x9d, 0xda, 0xf0, 0x3b, 0xcb, 0x70, 0x4e, 0x5b, 0xa8, 0x8e, 0xb3, 0xd1, 0x08, 0x37, - 0xb2, 0x5e, 0x40, 0x8a, 0x84, 0x59, 0x89, 0xfb, 0x53, 0x07, 0x2e, 0x74, 0x51, 0xc6, 0xd1, 0x57, - 0x1c, 0x18, 0xdb, 0xf8, 0x99, 0xe8, 0x9b, 0xd9, 0x0c, 0xf4, 0x3e, 0x18, 0xa7, 0x00, 0xba, 0x13, - 0x89, 0xb9, 0x59, 0x30, 0x3d, 0x54, 0xb3, 0x46, 0x29, 0xce, 0x60, 0xbb, 0xbf, 0x5e, 0x80, 0x1c, - 0x2e, 0xe8, 0x49, 0x18, 0x22, 0x41, 0xb5, 0x15, 0xfa, 0x41, 0x22, 0x84, 0x91, 0x92, 0x7a, 0x57, - 0x04, 0x1c, 0x2b, 0x0c, 0x71, 0xfe, 0x10, 0x03, 0x53, 0xe8, 0x38, 0x7f, 0x88, 0x96, 0xa7, 0x38, - 0xa8, 0x06, 0x13, 0x1e, 0xf7, 0xaf, 0xb0, 0xb9, 0xc7, 0xa6, 0x69, 0xdf, 0x61, 0xa6, 0xe9, 0x59, - 0xe6, 0xfe, 0xcc, 0x90, 0xc0, 0x1d, 0x44, 0xd1, 0xbb, 0x61, 0xa4, 0x1d, 0x93, 0xf2, 0xfc, 0xd2, - 0x5c, 0x44, 0xaa, 0xfc, 0x54, 0xac, 0xf9, 0xfd, 0x6e, 0xa4, 0x45, 0x58, 0xc7, 0x73, 0xff, 0xb5, - 0x03, 0x83, 0xb3, 0x5e, 0x65, 0x2b, 0xdc, 0xdc, 0xa4, 0x43, 0x51, 0x6d, 0x47, 0xa9, 0x61, 0x4b, - 0x1b, 0x8a, 0x79, 0x01, 0xc7, 0x0a, 0x03, 0xad, 0xc3, 0x00, 0x5f, 0xf0, 0x62, 0xd9, 0xfd, 0xa2, - 0xd6, 0x1f, 0x15, 0xc7, 0xc3, 0xa6, 0x43, 0x3b, 0xf1, 0x1b, 0xd3, 0x3c, 0x8e, 0x67, 0xfa, 0x5a, - 0x90, 0xac, 0x46, 0xe5, 0x24, 0xf2, 0x83, 0xda, 0x2c, 0xd0, 0xed, 0x62, 0x81, 0xd1, 0xc0, 0x82, - 0x16, 0xed, 0x46, 0xd3, 0xbb, 0x25, 0xd9, 0x09, 0xf1, 0xa3, 0xba, 0xb1, 0x92, 0x16, 0x61, 0x1d, - 0xcf, 0xfd, 0x43, 0x07, 0x86, 0x67, 0xbd, 0xd8, 0xaf, 0xfc, 0x25, 0x12, 0x3e, 0x1f, 0x82, 0xe2, - 0x9c, 0x57, 0xa9, 0x13, 0x74, 0x23, 0x7b, 0xe8, 0x1d, 0xb9, 0xfc, 0x78, 0x1e, 0x1b, 0x75, 0x00, - 0xd6, 0x39, 0x8d, 0x75, 0x3b, 0x1a, 0xbb, 0x6f, 0x3b, 0x30, 0x3e, 0xd7, 0xf0, 0x49, 0x90, 0xcc, - 0x91, 0x28, 0x61, 0x03, 0x57, 0x83, 0x89, 0x8a, 0x82, 0x1c, 0x65, 0xe8, 0xd8, 0x6c, 0x9d, 0xcb, - 0x90, 0xc0, 0x1d, 0x44, 0x51, 0x15, 0x4e, 0x71, 0x58, 0xba, 0x2a, 0x0e, 0x35, 0x7e, 0xcc, 0x3a, - 0x3a, 0x67, 0x52, 0xc0, 0x59, 0x92, 0xee, 0x4f, 0x1c, 0xb8, 0x30, 0xd7, 0x68, 0xc7, 0x09, 0x89, - 0x6e, 0x0a, 0x69, 0x24, 0xd5, 0x5b, 0xf4, 0x11, 0x18, 0x6a, 0x4a, 0x8f, 0xad, 0x73, 0x87, 0x09, - 0xcc, 0xe4, 0x19, 0xc5, 0xa6, 0x8d, 0x59, 0xdd, 0x78, 0x99, 0x54, 0x92, 0x15, 0x92, 0x78, 0x69, - 0x78, 0x41, 0x0a, 0xc3, 0x8a, 0x2a, 0x6a, 0x41, 0x7f, 0xdc, 0x22, 0x15, 0x7b, 0xd1, 0x5d, 0xb2, - 0x0f, 0xe5, 0x16, 0xa9, 0xa4, 0x72, 0x9d, 0xf9, 0x1a, 0x19, 0x27, 0xf7, 0x7f, 0x3b, 0xf0, 0x40, - 0x97, 0xfe, 0x2e, 0xfb, 0x71, 0x82, 0x5e, 0xea, 0xe8, 0xf3, 0x74, 0x6f, 0x7d, 0xa6, 0xb5, 0x59, - 0x8f, 0x95, 0x40, 0x90, 0x10, 0xad, 0xbf, 0x1f, 0x85, 0xa2, 0x9f, 0x90, 0xa6, 0x34, 0x43, 0x5b, - 0x30, 0x18, 0x75, 0xe9, 0xcb, 0xec, 0x98, 0x8c, 0xf1, 0xbb, 0x46, 0xf9, 0x61, 0xce, 0xd6, 0xdd, - 0x82, 0x81, 0xb9, 0xb0, 0xd1, 0x6e, 0x06, 0xbd, 0x45, 0xca, 0x24, 0xbb, 0x2d, 0x92, 0xdd, 0x23, - 0x99, 0xfa, 0xcf, 0x4a, 0xa4, 0xe1, 0xa8, 0x2f, 0xdf, 0x70, 0xe4, 0xfe, 0x1b, 0x07, 0xe8, 0xaa, - 0xaa, 0xfa, 0xc2, 0x93, 0xc8, 0xc9, 0x71, 0x86, 0x0f, 0xe9, 0xe4, 0x6e, 0xef, 0x4d, 0x8d, 0x29, - 0x44, 0x8d, 0xfe, 0x87, 0x60, 0x20, 0x66, 0x47, 0x72, 0xd1, 0x86, 0x05, 0xa9, 0x3f, 0xf3, 0x83, - 0xfa, 0xed, 0xbd, 0xa9, 0x9e, 0xc2, 0x36, 0xa7, 0x15, 0x6d, 0xe1, 0xf4, 0x14, 0x54, 0xa9, 0xc2, - 0xd7, 0x24, 0x71, 0xec, 0xd5, 0xe4, 0x09, 0x4f, 0x29, 0x7c, 0x2b, 0x1c, 0x8c, 0x65, 0xb9, 0xfb, - 0x65, 0x07, 0xc6, 0xd4, 0xe6, 0x45, 0xd5, 0x77, 0x74, 0x5d, 0xdf, 0xe6, 0xf8, 0x4c, 0x79, 0xa8, - 0x8b, 0xc4, 0x11, 0x1b, 0xf9, 0xc1, 0xbb, 0xe0, 0xbb, 0x60, 0xb4, 0x4a, 0x5a, 0x24, 0xa8, 0x92, - 0xa0, 0x42, 0x8f, 0xdf, 0x74, 0x86, 0x0c, 0xcf, 0x4e, 0xd0, 0xf3, 0xe6, 0xbc, 0x06, 0xc7, 0x06, - 0x96, 0xfb, 0x4d, 0x07, 0xee, 0x57, 0xe4, 0xca, 0x24, 0xc1, 0x24, 0x89, 0x76, 0x55, 0x98, 0xe6, - 0xe1, 0x76, 0xab, 0x9b, 0x54, 0xff, 0x4d, 0x22, 0xce, 0xfc, 0x68, 0xdb, 0xd5, 0x08, 0xd7, 0x96, - 0x19, 0x11, 0x2c, 0xa9, 0xb9, 0xbf, 0xd6, 0x07, 0x67, 0xf5, 0x46, 0x2a, 0x01, 0xf3, 0x09, 0x07, - 0x40, 0x8d, 0x00, 0xdd, 0x90, 0xfb, 0xec, 0xf8, 0xae, 0x8c, 0x2f, 0x95, 0x8a, 0x20, 0x05, 0x8e, - 0xb1, 0xc6, 0x16, 0xbd, 0x00, 0xa3, 0xdb, 0x74, 0x51, 0x90, 0x15, 0xaa, 0x2e, 0xc4, 0xa5, 0x3e, - 0xd6, 0x8c, 0xa9, 0xbc, 0x8f, 0xf9, 0x7c, 0x8a, 0x97, 0x9a, 0x03, 0x34, 0x60, 0x8c, 0x0d, 0x52, - 0xf4, 0xa4, 0x33, 0x16, 0xe9, 0x9f, 0x44, 0xd8, 0xc4, 0x3f, 0x68, 0xb1, 0x8f, 0xd9, 0xaf, 0x3e, - 0x7b, 0x7a, 0x7f, 0x6f, 0x6a, 0xcc, 0x00, 0x61, 0xb3, 0x11, 0xee, 0x0b, 0xc0, 0xc6, 0xc2, 0x0f, - 0xda, 0x64, 0x35, 0x40, 0x8f, 0x48, 0x1b, 0x1d, 0xf7, 0xab, 0x28, 0xc9, 0xa1, 0xdb, 0xe9, 0xe8, - 0x59, 0x76, 0xd3, 0xf3, 0x1b, 0x2c, 0x7c, 0x91, 0x62, 0xa9, 0xb3, 0xec, 0x02, 0x83, 0x62, 0x51, - 0xea, 0x4e, 0xc3, 0xe0, 0x1c, 0xed, 0x3b, 0x89, 0x28, 0x5d, 0x3d, 0xea, 0x78, 0xcc, 0x88, 0x3a, - 0x96, 0xd1, 0xc5, 0xeb, 0x70, 0x6e, 0x2e, 0x22, 0x5e, 0x42, 0xca, 0x4f, 0xcf, 0xb6, 0x2b, 0x5b, - 0x24, 0xe1, 0xa1, 0x5d, 0x31, 0x7a, 0x2f, 0x8c, 0x85, 0x6c, 0xcb, 0x58, 0x0e, 0x2b, 0x5b, 0x7e, - 0x50, 0x13, 0x26, 0xd7, 0x73, 0x82, 0xca, 0xd8, 0xaa, 0x5e, 0x88, 0x4d, 0x5c, 0xf7, 0x3f, 0x15, - 0x60, 0x74, 0x2e, 0x0a, 0x03, 0x29, 0x16, 0x4f, 0x60, 0x2b, 0x4b, 0x8c, 0xad, 0xcc, 0x82, 0xbb, - 0x53, 0x6f, 0x7f, 0xb7, 0xed, 0x0c, 0xbd, 0xae, 0x44, 0x64, 0x9f, 0xad, 0x23, 0x88, 0xc1, 0x97, - 0xd1, 0x4e, 0x3f, 0xb6, 0x29, 0x40, 0xdd, 0xff, 0xec, 0xc0, 0x84, 0x8e, 0x7e, 0x02, 0x3b, 0x68, - 0x6c, 0xee, 0xa0, 0xd7, 0xed, 0xf6, 0xb7, 0xcb, 0xb6, 0xf9, 0xb9, 0x01, 0xb3, 0x9f, 0xcc, 0xd7, - 0xfd, 0x55, 0x07, 0x46, 0x77, 0x34, 0x80, 0xe8, 0xac, 0x6d, 0x25, 0xe6, 0x1d, 0x52, 0xcc, 0xe8, - 0xd0, 0xdb, 0x99, 0xdf, 0xd8, 0x68, 0x09, 0x95, 0xfb, 0x71, 0xa5, 0x4e, 0xaa, 0xed, 0x86, 0xdc, - 0xbe, 0xd5, 0x90, 0x96, 0x05, 0x1c, 0x2b, 0x0c, 0xf4, 0x12, 0x9c, 0xae, 0x84, 0x41, 0xa5, 0x1d, - 0x45, 0x24, 0xa8, 0xec, 0xae, 0xb1, 0x3b, 0x12, 0x62, 0x43, 0x9c, 0x16, 0xd5, 0x4e, 0xcf, 0x65, - 0x11, 0x6e, 0xe7, 0x01, 0x71, 0x27, 0x21, 0xee, 0x2c, 0x88, 0xe9, 0x96, 0x25, 0x0e, 0x5c, 0x9a, - 0xb3, 0x80, 0x81, 0xb1, 0x2c, 0x47, 0x37, 0xe0, 0x42, 0x9c, 0x78, 0x51, 0xe2, 0x07, 0xb5, 0x79, - 0xe2, 0x55, 0x1b, 0x7e, 0x40, 0x8f, 0x12, 0x61, 0x50, 0xe5, 0xae, 0xc4, 0xbe, 0xd9, 0x07, 0xf6, - 0xf7, 0xa6, 0x2e, 0x94, 0xf3, 0x51, 0x70, 0xb7, 0xba, 0xe8, 0x43, 0x30, 0x29, 0xdc, 0x11, 0x9b, - 0xed, 0xc6, 0xb3, 0xe1, 0x46, 0x7c, 0xd5, 0x8f, 0xe9, 0x39, 0x7e, 0xd9, 0x6f, 0xfa, 0x09, 0x73, - 0x18, 0x16, 0x67, 0x2f, 0xee, 0xef, 0x4d, 0x4d, 0x96, 0xbb, 0x62, 0xe1, 0x03, 0x28, 0x20, 0x0c, - 0xe7, 0xb9, 0xf0, 0xeb, 0xa0, 0x3d, 0xc8, 0x68, 0x4f, 0xee, 0xef, 0x4d, 0x9d, 0x5f, 0xc8, 0xc5, - 0xc0, 0x5d, 0x6a, 0xd2, 0x2f, 0x98, 0xf8, 0x4d, 0xf2, 0x6a, 0x18, 0x10, 0x16, 0xa8, 0xa2, 0x7d, - 0xc1, 0x75, 0x01, 0xc7, 0x0a, 0x03, 0xbd, 0x9c, 0xce, 0x44, 0xba, 0x5c, 0x44, 0xc0, 0xc9, 0xe1, - 0x25, 0x1c, 0x3b, 0x9a, 0xdc, 0xd4, 0x28, 0xb1, 0x48, 0x4a, 0x83, 0xb6, 0xfb, 0x47, 0x05, 0x40, - 0x9d, 0x22, 0x02, 0x2d, 0xc1, 0x80, 0x57, 0x49, 0xfc, 0x6d, 0x19, 0x99, 0xf7, 0x48, 0xde, 0xf6, - 0xc9, 0x59, 0x61, 0xb2, 0x49, 0xe8, 0x0c, 0x21, 0xa9, 0x5c, 0x99, 0x61, 0x55, 0xb1, 0x20, 0x81, - 0x42, 0x38, 0xdd, 0xf0, 0xe2, 0x44, 0xce, 0xd5, 0x2a, 0xed, 0xb2, 0x10, 0xac, 0x3f, 0xdf, 0x5b, - 0xa7, 0x68, 0x8d, 0xd9, 0x73, 0x74, 0xe6, 0x2e, 0x67, 0x09, 0xe1, 0x4e, 0xda, 0xe8, 0x63, 0x4c, - 0x0f, 0xe1, 0x4a, 0xa2, 0x54, 0x00, 0x96, 0xac, 0xec, 0xd1, 0x9c, 0xa6, 0xa1, 0x83, 0x08, 0x36, - 0x58, 0x63, 0xe9, 0xfe, 0x5b, 0x80, 0xc1, 0xf9, 0x99, 0xc5, 0x75, 0x2f, 0xde, 0xea, 0x41, 0x35, - 0xa7, 0xb3, 0x43, 0xe8, 0x50, 0xd9, 0xf5, 0x2d, 0x75, 0x2b, 0xac, 0x30, 0x50, 0x00, 0x03, 0x7e, - 0x40, 0x17, 0x44, 0x69, 0xdc, 0x96, 0xf9, 0x5b, 0x1d, 0x33, 0x98, 0x7d, 0xe2, 0x1a, 0xa3, 0x8e, - 0x05, 0x17, 0xf4, 0x3a, 0x0c, 0x7b, 0xf2, 0x66, 0x8b, 0xd8, 0x96, 0x96, 0x6c, 0xd8, 0x75, 0x05, - 0x49, 0x3d, 0xb2, 0x46, 0x80, 0x70, 0xca, 0x10, 0x7d, 0xdc, 0x81, 0x11, 0xd9, 0x75, 0x4c, 0x36, - 0x85, 0xcb, 0x75, 0xc5, 0x5e, 0x9f, 0x31, 0xd9, 0xe4, 0x61, 0x17, 0x1a, 0x00, 0xeb, 0x2c, 0x3b, - 0x54, 0xf9, 0x62, 0x2f, 0xaa, 0x3c, 0xda, 0x81, 0xe1, 0x1d, 0x3f, 0xa9, 0xb3, 0x8d, 0x47, 0xb8, - 0x7a, 0x16, 0xee, 0xbe, 0xd5, 0x94, 0x5c, 0x3a, 0x62, 0x37, 0x25, 0x03, 0x9c, 0xf2, 0x42, 0x97, - 0x38, 0x63, 0x76, 0x33, 0x88, 0x89, 0xac, 0x61, 0xb3, 0x02, 0x2b, 0xc0, 0x29, 0x0e, 0x1d, 0xe2, - 0x51, 0xfa, 0xab, 0x4c, 0x5e, 0x69, 0xd3, 0x75, 0x2c, 0x42, 0xe9, 0x2c, 0xcc, 0x2b, 0x49, 0x91, - 0x0f, 0xd6, 0x4d, 0x8d, 0x07, 0x36, 0x38, 0xd2, 0x35, 0xb2, 0x53, 0x27, 0x81, 0x08, 0xf5, 0x57, - 0x6b, 0xe4, 0x66, 0x9d, 0x04, 0x98, 0x95, 0xa0, 0xd7, 0xf9, 0xd1, 0x82, 0xeb, 0xb8, 0x22, 0x2c, - 0x6e, 0xd9, 0x8e, 0xda, 0xcd, 0x69, 0xf2, 0x68, 0xfb, 0xf4, 0x37, 0xd6, 0xf8, 0x51, 0x75, 0x39, - 0x0c, 0xae, 0xdc, 0xf2, 0x13, 0x71, 0x47, 0x40, 0x49, 0xba, 0x55, 0x06, 0xc5, 0xa2, 0x94, 0x87, - 0x14, 0xd0, 0x49, 0x10, 0xb3, 0x0b, 0x01, 0xc3, 0x7a, 0x48, 0x01, 0x03, 0x63, 0x59, 0x8e, 0xfe, - 0x91, 0x03, 0xc5, 0x7a, 0x18, 0x6e, 0xc5, 0xa5, 0x31, 0x36, 0x39, 0x2c, 0xa8, 0x7a, 0x42, 0xe2, - 0x4c, 0x5f, 0xa5, 0x64, 0xcd, 0x5b, 0x4f, 0x45, 0x06, 0xbb, 0xbd, 0x37, 0x35, 0xbe, 0xec, 0x6f, - 0x92, 0xca, 0x6e, 0xa5, 0x41, 0x18, 0xe4, 0xcd, 0xb7, 0x35, 0xc8, 0x95, 0x6d, 0x12, 0x24, 0x98, - 0xb7, 0x6a, 0xf2, 0x73, 0x0e, 0x40, 0x4a, 0x28, 0xc7, 0x77, 0x47, 0x4c, 0x6f, 0xb7, 0x85, 0x73, - 0x9e, 0xd1, 0x34, 0xdd, 0x19, 0xf8, 0xef, 0x1c, 0x18, 0xa1, 0x9d, 0x93, 0x22, 0xf0, 0x31, 0x18, - 0x48, 0xbc, 0xa8, 0x46, 0xa4, 0xfd, 0x5a, 0x7d, 0x8e, 0x75, 0x06, 0xc5, 0xa2, 0x14, 0x05, 0x50, - 0x4c, 0xbc, 0x78, 0x4b, 0x6a, 0x97, 0xd7, 0xac, 0x0d, 0x71, 0xaa, 0x58, 0xd2, 0x5f, 0x31, 0xe6, - 0x6c, 0xd0, 0xe3, 0x30, 0x44, 0x15, 0x80, 0x05, 0x2f, 0x96, 0x21, 0x25, 0xa3, 0x54, 0x88, 0x2f, - 0x08, 0x18, 0x56, 0xa5, 0xee, 0xaf, 0x17, 0xa0, 0x7f, 0x9e, 0x9f, 0x33, 0x06, 0xe2, 0xb0, 0x1d, - 0x55, 0x88, 0xd0, 0x37, 0x2d, 0xcc, 0x69, 0x4a, 0xb7, 0xcc, 0x68, 0x6a, 0x9a, 0x3e, 0xfb, 0x8d, - 0x05, 0x2f, 0x7a, 0x90, 0x1d, 0x4f, 0x22, 0x2f, 0x88, 0x37, 0x99, 0xa7, 0xc0, 0x0f, 0x03, 0x31, - 0x44, 0x16, 0x66, 0xe1, 0xba, 0x41, 0xb7, 0x9c, 0x90, 0x56, 0xea, 0xb0, 0x30, 0xcb, 0x70, 0xa6, - 0x0d, 0xee, 0x6f, 0x38, 0x00, 0x69, 0xeb, 0xd1, 0x67, 0x1d, 0x18, 0xf3, 0xf4, 0x50, 0x46, 0x31, - 0x46, 0xab, 0xf6, 0xdc, 0x8a, 0x8c, 0x2c, 0x3f, 0x62, 0x1b, 0x20, 0x6c, 0x32, 0x76, 0xdf, 0x0d, - 0x45, 0xb6, 0x3a, 0x98, 0x2e, 0x2e, 0x4c, 0xb2, 0x59, 0x1b, 0x8c, 0x34, 0xd5, 0x62, 0x85, 0xe1, - 0xbe, 0x04, 0xe3, 0x57, 0x6e, 0x91, 0x4a, 0x3b, 0x09, 0x23, 0x6e, 0x90, 0xee, 0x72, 0x75, 0xc5, - 0x39, 0xd2, 0xd5, 0x95, 0xef, 0x38, 0x30, 0xa2, 0xc5, 0xb5, 0xd1, 0x9d, 0xba, 0x36, 0x57, 0xe6, - 0xe7, 0x6e, 0x31, 0x54, 0x4b, 0x56, 0x22, 0xe7, 0x38, 0xc9, 0x74, 0x1b, 0x51, 0x20, 0x9c, 0x32, - 0xbc, 0x43, 0xdc, 0x99, 0xfb, 0xfb, 0x0e, 0x9c, 0xcb, 0x0d, 0xc2, 0xbb, 0xc7, 0xcd, 0x36, 0x7c, - 0xbf, 0x85, 0x1e, 0x7c, 0xbf, 0xbf, 0xed, 0x40, 0x4a, 0x89, 0x8a, 0xa2, 0x8d, 0xb4, 0xe5, 0x9a, - 0x28, 0x12, 0x9c, 0x44, 0x29, 0x7a, 0x1d, 0x2e, 0x98, 0x5f, 0xf0, 0x88, 0x6e, 0x00, 0x7e, 0x66, - 0xca, 0xa7, 0x84, 0xbb, 0xb1, 0x70, 0xbf, 0xe6, 0x40, 0x71, 0xd1, 0x6b, 0xd7, 0x48, 0x4f, 0x56, - 0x1c, 0x2a, 0xc7, 0x22, 0xe2, 0x35, 0x12, 0xa9, 0xa7, 0x0b, 0x39, 0x86, 0x05, 0x0c, 0xab, 0x52, - 0x34, 0x03, 0xc3, 0x61, 0x8b, 0x18, 0xae, 0xab, 0x47, 0xe4, 0xe8, 0xad, 0xca, 0x02, 0xba, 0xed, - 0x30, 0xee, 0x0a, 0x82, 0xd3, 0x5a, 0xee, 0x0f, 0x8a, 0x30, 0xa2, 0x5d, 0xd7, 0xa0, 0xba, 0x40, - 0x44, 0x5a, 0x61, 0x56, 0x5f, 0xa6, 0x13, 0x06, 0xb3, 0x12, 0xba, 0x06, 0x23, 0xb2, 0xed, 0xc7, - 0x5c, 0x6c, 0x19, 0x6b, 0x10, 0x0b, 0x38, 0x56, 0x18, 0x68, 0x0a, 0x8a, 0x55, 0xd2, 0x4a, 0xea, - 0xac, 0x79, 0xfd, 0x3c, 0x66, 0x6d, 0x9e, 0x02, 0x30, 0x87, 0x53, 0x84, 0x4d, 0x92, 0x54, 0xea, - 0xcc, 0x60, 0x29, 0x82, 0xda, 0x16, 0x28, 0x00, 0x73, 0x78, 0x8e, 0x73, 0xad, 0x78, 0xfc, 0xce, - 0xb5, 0x01, 0xcb, 0xce, 0x35, 0xd4, 0x82, 0x33, 0x71, 0x5c, 0x5f, 0x8b, 0xfc, 0x6d, 0x2f, 0x21, - 0xe9, 0xec, 0x1b, 0x3c, 0x0c, 0x9f, 0x0b, 0xec, 0x02, 0x75, 0xf9, 0x6a, 0x96, 0x0a, 0xce, 0x23, - 0x8d, 0xca, 0x70, 0xce, 0x0f, 0x62, 0x52, 0x69, 0x47, 0xe4, 0x5a, 0x2d, 0x08, 0x23, 0x72, 0x35, - 0x8c, 0x29, 0x39, 0x71, 0xfd, 0x53, 0x85, 0x79, 0x5e, 0xcb, 0x43, 0xc2, 0xf9, 0x75, 0xd1, 0x22, - 0x9c, 0xae, 0xfa, 0xb1, 0xb7, 0xd1, 0x20, 0xe5, 0xf6, 0x46, 0x33, 0xa4, 0x87, 0x3e, 0x7e, 0x25, - 0x63, 0x68, 0xf6, 0x7e, 0x69, 0xde, 0x98, 0xcf, 0x22, 0xe0, 0xce, 0x3a, 0xe8, 0x19, 0x18, 0x8d, - 0xfd, 0xa0, 0xd6, 0x20, 0xb3, 0x91, 0x17, 0x54, 0xea, 0xe2, 0xde, 0xa8, 0x32, 0x03, 0x97, 0xb5, - 0x32, 0x6c, 0x60, 0xb2, 0x35, 0xcf, 0xeb, 0x64, 0xb4, 0x41, 0x81, 0x2d, 0x4a, 0xdd, 0x1f, 0x3a, - 0x30, 0xaa, 0x87, 0x58, 0x53, 0x4d, 0x1b, 0xea, 0xf3, 0x0b, 0x65, 0xbe, 0x17, 0xd8, 0xdb, 0xf1, - 0xaf, 0x2a, 0x9a, 0xe9, 0xc9, 0x34, 0x85, 0x61, 0x8d, 0x67, 0x0f, 0x17, 0xa6, 0x1f, 0x81, 0xe2, - 0x66, 0x48, 0x15, 0x92, 0x3e, 0xd3, 0x7e, 0xbc, 0x40, 0x81, 0x98, 0x97, 0xb9, 0xff, 0xd3, 0x81, - 0xf3, 0xf9, 0xd1, 0xe3, 0x3f, 0x0b, 0x9d, 0xbc, 0x0c, 0x40, 0xbb, 0x62, 0x08, 0x75, 0x2d, 0x65, - 0x82, 0x2c, 0xc1, 0x1a, 0x56, 0x6f, 0xdd, 0xfe, 0x33, 0xaa, 0x14, 0xa7, 0x7c, 0x3e, 0xef, 0xc0, - 0x18, 0x65, 0xbb, 0x14, 0x6d, 0x18, 0xbd, 0x5d, 0xb5, 0xd3, 0x5b, 0x45, 0x36, 0x35, 0x93, 0x1b, - 0x60, 0x6c, 0x32, 0x47, 0xbf, 0x00, 0xc3, 0x5e, 0xb5, 0x1a, 0x91, 0x38, 0x56, 0x0e, 0x27, 0xe6, - 0x0b, 0x9f, 0x91, 0x40, 0x9c, 0x96, 0x53, 0x21, 0x5a, 0xaf, 0x6e, 0xc6, 0x54, 0x2e, 0x09, 0xc1, - 0xad, 0x84, 0x28, 0x65, 0x42, 0xe1, 0x58, 0x61, 0xb8, 0xbf, 0xda, 0x0f, 0x26, 0x6f, 0x54, 0x85, - 0x53, 0x5b, 0xd1, 0xc6, 0x1c, 0xf3, 0xd7, 0x1f, 0xc5, 0x6f, 0xce, 0xfc, 0xd9, 0x4b, 0x26, 0x05, - 0x9c, 0x25, 0x29, 0xb8, 0x2c, 0x91, 0xdd, 0xc4, 0xdb, 0x38, 0xb2, 0xd7, 0x7c, 0xc9, 0xa4, 0x80, - 0xb3, 0x24, 0xd1, 0xbb, 0x61, 0x64, 0x2b, 0xda, 0x90, 0x22, 0x3a, 0x1b, 0x82, 0xb1, 0x94, 0x16, - 0x61, 0x1d, 0x8f, 0x0e, 0xe1, 0x56, 0xb4, 0x41, 0x77, 0x45, 0x99, 0x40, 0x40, 0x0d, 0xe1, 0x92, - 0x80, 0x63, 0x85, 0x81, 0x5a, 0x80, 0xb6, 0xe4, 0xe8, 0xa9, 0xe8, 0x04, 0xb1, 0x93, 0xf4, 0x1e, - 0xdc, 0xc0, 0xc2, 0xc2, 0x97, 0x3a, 0xe8, 0xe0, 0x1c, 0xda, 0xe8, 0x05, 0xb8, 0xb0, 0x15, 0x6d, - 0x08, 0x65, 0x61, 0x2d, 0xf2, 0x83, 0x8a, 0xdf, 0x32, 0x92, 0x05, 0x4c, 0x89, 0xe6, 0x5e, 0x58, - 0xca, 0x47, 0xc3, 0xdd, 0xea, 0xbb, 0xbf, 0xd3, 0x0f, 0xec, 0x9a, 0x23, 0x95, 0x85, 0x4d, 0x92, - 0xd4, 0xc3, 0x6a, 0x56, 0xff, 0x59, 0x61, 0x50, 0x2c, 0x4a, 0x65, 0xf0, 0x63, 0xa1, 0x4b, 0xf0, - 0xe3, 0x0e, 0x0c, 0xd6, 0x89, 0x57, 0x25, 0x91, 0x34, 0xd7, 0x2d, 0xdb, 0xb9, 0x98, 0x79, 0x95, - 0x11, 0x4d, 0x8f, 0xe1, 0xfc, 0x77, 0x8c, 0x25, 0x37, 0xf4, 0x1e, 0x18, 0xa7, 0x8a, 0x4c, 0xd8, - 0x4e, 0xa4, 0x6d, 0xba, 0x9f, 0xd9, 0xa6, 0xd9, 0x8e, 0xba, 0x6e, 0x94, 0xe0, 0x0c, 0x26, 0x9a, - 0x87, 0x09, 0x61, 0x47, 0x56, 0x66, 0x40, 0x31, 0xb0, 0x2a, 0x8b, 0x43, 0x39, 0x53, 0x8e, 0x3b, - 0x6a, 0xb0, 0xe0, 0xb5, 0xb0, 0xca, 0x5d, 0x89, 0x7a, 0xf0, 0x5a, 0x58, 0xdd, 0xc5, 0xac, 0x04, - 0xbd, 0x0a, 0x43, 0xf4, 0xef, 0x42, 0x14, 0x36, 0x85, 0x6d, 0x66, 0xcd, 0xce, 0xe8, 0x50, 0x1e, - 0xe2, 0xa4, 0xc8, 0x14, 0xbc, 0x59, 0xc1, 0x05, 0x2b, 0x7e, 0xf4, 0xbc, 0x22, 0xf7, 0xe1, 0xf2, - 0x96, 0xdf, 0x7a, 0x9e, 0x44, 0xfe, 0xe6, 0x2e, 0x53, 0x1a, 0x86, 0xd2, 0xf3, 0xca, 0xb5, 0x0e, - 0x0c, 0x9c, 0x53, 0xcb, 0xfd, 0x7c, 0x01, 0x46, 0xf5, 0xdb, 0xb2, 0x77, 0x8a, 0x88, 0x8d, 0xd3, - 0x49, 0xc1, 0x4f, 0xa7, 0x57, 0x2d, 0x74, 0xfb, 0x4e, 0x13, 0xa2, 0x0e, 0xfd, 0x5e, 0x5b, 0x68, - 0x8b, 0x56, 0x8c, 0x60, 0xac, 0xc7, 0xed, 0xa4, 0xce, 0xaf, 0x55, 0xb1, 0x58, 0x55, 0xc6, 0xc1, - 0xfd, 0x54, 0x1f, 0x0c, 0xc9, 0x42, 0xf4, 0x49, 0x07, 0x20, 0x8d, 0x19, 0x12, 0xa2, 0x74, 0xcd, - 0x46, 0x40, 0x89, 0x1e, 0xee, 0xa4, 0x19, 0xae, 0x15, 0x1c, 0x6b, 0x7c, 0x51, 0x02, 0x03, 0x21, - 0x6d, 0xdc, 0x65, 0x7b, 0x37, 0xbe, 0x57, 0x29, 0xe3, 0xcb, 0x8c, 0x7b, 0x6a, 0x36, 0x63, 0x30, - 0x2c, 0x78, 0xd1, 0x13, 0xe0, 0x86, 0x0c, 0x65, 0xb3, 0x67, 0x62, 0x56, 0xd1, 0x71, 0xe9, 0x81, - 0x4e, 0x81, 0x70, 0xca, 0xd0, 0x7d, 0x0a, 0xc6, 0xcd, 0xc5, 0x40, 0x4f, 0x04, 0x1b, 0xbb, 0x09, - 0xe1, 0xf6, 0x86, 0x51, 0x7e, 0x22, 0x98, 0xa5, 0x00, 0xcc, 0xe1, 0xee, 0xf7, 0xa9, 0x1e, 0xa0, - 0xc4, 0x4b, 0x0f, 0x26, 0xfe, 0x47, 0x74, 0x63, 0x59, 0xb7, 0x63, 0xd7, 0xc7, 0x60, 0x98, 0xfd, - 0xc3, 0x16, 0x7a, 0x9f, 0x2d, 0xc7, 0x73, 0xda, 0x4e, 0xb1, 0xd4, 0x99, 0x4e, 0xf0, 0xbc, 0x64, - 0x84, 0x53, 0x9e, 0x6e, 0x08, 0x13, 0x59, 0x6c, 0xf4, 0x41, 0x18, 0x8d, 0xe5, 0xb6, 0x9a, 0xde, - 0xfd, 0xea, 0x71, 0xfb, 0x65, 0x76, 0xdf, 0xb2, 0x56, 0x1d, 0x1b, 0xc4, 0xdc, 0x55, 0x18, 0xb0, - 0x3a, 0x84, 0xee, 0xb7, 0x1d, 0x18, 0x66, 0x9e, 0xb7, 0x5a, 0xe4, 0x35, 0xd3, 0x2a, 0x7d, 0x07, - 0x8c, 0x7a, 0x0c, 0x83, 0xfc, 0x8c, 0x2e, 0x23, 0x56, 0x2c, 0x48, 0x19, 0x9e, 0xa8, 0x2d, 0x95, - 0x32, 0xdc, 0x18, 0x10, 0x63, 0xc9, 0xc9, 0xfd, 0x74, 0x01, 0x06, 0xae, 0x05, 0xad, 0xf6, 0x5f, - 0xf9, 0x64, 0x61, 0x2b, 0xd0, 0x7f, 0x2d, 0x21, 0x4d, 0x33, 0xa7, 0xdd, 0xe8, 0xec, 0xa3, 0x7a, - 0x3e, 0xbb, 0x92, 0x99, 0xcf, 0x0e, 0x7b, 0x3b, 0x32, 0xa0, 0x4b, 0xd8, 0x88, 0xd3, 0xfb, 0x6f, - 0x4f, 0xc2, 0xf0, 0xb2, 0xb7, 0x41, 0x1a, 0x4b, 0x64, 0x97, 0xdd, 0x56, 0xe3, 0xc1, 0x05, 0x4e, - 0x7a, 0xb0, 0x37, 0x02, 0x01, 0xe6, 0x61, 0x9c, 0x61, 0xab, 0xc5, 0x40, 0x4f, 0x0e, 0x24, 0x4d, - 0x08, 0xe4, 0x98, 0x27, 0x07, 0x2d, 0x19, 0x90, 0x86, 0xe5, 0x4e, 0xc3, 0x48, 0x4a, 0xa5, 0x07, - 0xae, 0x3f, 0x2d, 0xc0, 0x98, 0x61, 0xea, 0x36, 0x1c, 0x80, 0xce, 0x1d, 0x1d, 0x80, 0x86, 0x43, - 0xae, 0x70, 0xaf, 0x1d, 0x72, 0x7d, 0x27, 0xef, 0x90, 0x33, 0x3f, 0x52, 0x7f, 0x4f, 0x1f, 0xa9, - 0x01, 0xfd, 0xcb, 0x7e, 0xb0, 0xd5, 0x9b, 0x9c, 0x89, 0x2b, 0x61, 0xab, 0x43, 0xce, 0x94, 0x29, - 0x10, 0xf3, 0x32, 0xa9, 0xb9, 0xf4, 0xe5, 0x6b, 0x2e, 0xee, 0x27, 0x1d, 0x18, 0x5d, 0xf1, 0x02, - 0x7f, 0x93, 0xc4, 0x09, 0x9b, 0x57, 0xc9, 0xb1, 0xde, 0x5a, 0x1a, 0xed, 0x72, 0xff, 0xfe, 0x4d, - 0x07, 0x4e, 0xaf, 0x90, 0x66, 0xe8, 0xbf, 0xea, 0xa5, 0xf1, 0x92, 0xb4, 0xed, 0x75, 0x3f, 0x11, - 0xe1, 0x61, 0xaa, 0xed, 0x57, 0xfd, 0x04, 0x53, 0xf8, 0x1d, 0xec, 0xb8, 0xec, 0x3e, 0x00, 0x3d, - 0xa0, 0x69, 0x37, 0xe9, 0xd2, 0x48, 0x48, 0x59, 0x80, 0x53, 0x1c, 0xf7, 0x77, 0x1d, 0x18, 0xe4, - 0x8d, 0x50, 0x21, 0xa6, 0x4e, 0x17, 0xda, 0x75, 0x28, 0xb2, 0x7a, 0x62, 0x56, 0x2f, 0x5a, 0x50, - 0x7f, 0x28, 0x39, 0xbe, 0x06, 0xd9, 0xbf, 0x98, 0x33, 0x60, 0xc7, 0x16, 0xef, 0xd6, 0x8c, 0x0a, - 0x15, 0x4d, 0x8f, 0x2d, 0x0c, 0x8a, 0x45, 0xa9, 0xfb, 0xf5, 0x3e, 0x18, 0x52, 0x69, 0xa7, 0x58, - 0x52, 0x80, 0x20, 0x08, 0x13, 0x8f, 0x07, 0x16, 0x70, 0x59, 0xfd, 0x41, 0x7b, 0x69, 0xaf, 0xa6, - 0x67, 0x52, 0xea, 0xdc, 0x7f, 0xa7, 0x0e, 0xa1, 0x5a, 0x09, 0xd6, 0x1b, 0x81, 0x3e, 0x0a, 0x03, - 0x0d, 0x2a, 0x7d, 0xa4, 0xe8, 0x7e, 0xde, 0x62, 0x73, 0x98, 0x58, 0x13, 0x2d, 0x51, 0x23, 0xc4, - 0x81, 0x58, 0x70, 0x9d, 0x7c, 0x1f, 0x4c, 0x64, 0x5b, 0x7d, 0xa7, 0x8b, 0x7e, 0xc3, 0xfa, 0x35, - 0xc1, 0xbf, 0x29, 0xa4, 0xe7, 0xe1, 0xab, 0xba, 0xcf, 0xc1, 0xc8, 0x0a, 0x49, 0x22, 0xbf, 0xc2, - 0x08, 0xdc, 0x69, 0x72, 0xf5, 0xa4, 0x3f, 0x7c, 0x86, 0x4d, 0x56, 0x4a, 0x33, 0x46, 0xaf, 0x03, - 0xb4, 0xa2, 0x90, 0x9e, 0x5f, 0x49, 0x5b, 0x7e, 0x6c, 0x0b, 0xfa, 0xf0, 0x9a, 0xa2, 0xc9, 0x5d, - 0xce, 0xe9, 0x6f, 0xac, 0xf1, 0x73, 0x5f, 0x84, 0xe2, 0x4a, 0x3b, 0x21, 0xb7, 0x7a, 0x90, 0x58, - 0x87, 0xbd, 0xf9, 0xee, 0x7e, 0x10, 0x46, 0x19, 0xed, 0xab, 0x61, 0x83, 0x6e, 0xab, 0x74, 0x68, - 0x9a, 0xf4, 0x77, 0xd6, 0x29, 0xc0, 0x90, 0x30, 0x2f, 0xa3, 0x4b, 0xa6, 0x1e, 0x36, 0xaa, 0xea, - 0x16, 0x90, 0x9a, 0x10, 0x57, 0x19, 0x14, 0x8b, 0x52, 0xf7, 0x13, 0x05, 0x18, 0x61, 0x15, 0x85, - 0xb8, 0xd9, 0x85, 0xc1, 0x3a, 0xe7, 0x23, 0xc6, 0xd0, 0x42, 0x48, 0x9d, 0xde, 0x7a, 0xed, 0x2c, - 0xc7, 0x01, 0x58, 0xf2, 0xa3, 0xac, 0x77, 0x3c, 0x3f, 0xa1, 0xac, 0x0b, 0xc7, 0xcb, 0xfa, 0x26, - 0x67, 0x83, 0x25, 0x3f, 0xf7, 0x97, 0x81, 0xdd, 0xae, 0x5d, 0x68, 0x78, 0x35, 0x3e, 0x72, 0xe1, - 0x16, 0xa9, 0x0a, 0x99, 0xab, 0x8d, 0x1c, 0x85, 0x62, 0x51, 0xca, 0x6f, 0x2c, 0x26, 0x91, 0xaf, - 0xa2, 0x72, 0xb5, 0x1b, 0x8b, 0x0c, 0x2c, 0x63, 0xb0, 0xab, 0xee, 0x97, 0x0b, 0x00, 0x2c, 0x49, - 0x19, 0xbf, 0x14, 0xfb, 0x8b, 0x50, 0x6c, 0xd5, 0xbd, 0x38, 0xeb, 0x48, 0x2c, 0xae, 0x51, 0xe0, - 0x6d, 0x71, 0xed, 0x97, 0xfd, 0xc0, 0x1c, 0x51, 0x0f, 0x96, 0x2f, 0x1c, 0x1c, 0x2c, 0x8f, 0x5a, - 0x30, 0x18, 0xb6, 0x13, 0xaa, 0xab, 0x8a, 0xcd, 0xde, 0x82, 0x1f, 0x7d, 0x95, 0x13, 0xe4, 0x11, - 0xe6, 0xe2, 0x07, 0x96, 0x6c, 0xd0, 0x33, 0x30, 0xd4, 0x8a, 0xc2, 0x1a, 0xdd, 0xbb, 0xc5, 0xf6, - 0xfe, 0xa0, 0xd4, 0x87, 0xd6, 0x04, 0xfc, 0xb6, 0xf6, 0x3f, 0x56, 0xd8, 0xee, 0x1f, 0x4f, 0xf0, - 0x71, 0x11, 0x73, 0x6f, 0x12, 0x0a, 0xbe, 0xb4, 0x4c, 0x81, 0x20, 0x51, 0xb8, 0x36, 0x8f, 0x0b, - 0x7e, 0x55, 0xad, 0xab, 0x42, 0xd7, 0x75, 0xf5, 0x6e, 0x18, 0xa9, 0xfa, 0x71, 0xab, 0xe1, 0xed, - 0x5e, 0xcf, 0x31, 0x0b, 0xce, 0xa7, 0x45, 0x58, 0xc7, 0x43, 0x4f, 0x8a, 0xab, 0x11, 0xfd, 0x86, - 0x29, 0x48, 0x5e, 0x8d, 0x48, 0x2f, 0x5d, 0xf3, 0x5b, 0x11, 0xd9, 0xcb, 0xe9, 0xc5, 0x9e, 0x2f, - 0xa7, 0x67, 0x35, 0xb1, 0x81, 0x93, 0xd7, 0xc4, 0xde, 0x0b, 0x63, 0xf2, 0x27, 0x53, 0x8f, 0x4a, - 0x67, 0x59, 0xeb, 0x95, 0xb9, 0x7a, 0x5d, 0x2f, 0xc4, 0x26, 0x6e, 0x3a, 0x69, 0x07, 0x7b, 0x9d, - 0xb4, 0x97, 0x01, 0x36, 0xc2, 0x76, 0x50, 0xf5, 0xa2, 0xdd, 0x6b, 0xf3, 0x22, 0x90, 0x52, 0x29, - 0x7e, 0xb3, 0xaa, 0x04, 0x6b, 0x58, 0xfa, 0x44, 0x1f, 0xbe, 0xc3, 0x44, 0xff, 0x20, 0x0c, 0xb3, - 0xa0, 0x53, 0x52, 0x9d, 0x49, 0x44, 0x88, 0xd1, 0x61, 0xe2, 0x13, 0x95, 0xcc, 0x2d, 0x4b, 0x22, - 0x38, 0xa5, 0x87, 0x3e, 0x04, 0xb0, 0xe9, 0x07, 0x7e, 0x5c, 0x67, 0xd4, 0x47, 0x0e, 0x4d, 0x5d, - 0xf5, 0x73, 0x41, 0x51, 0xc1, 0x1a, 0x45, 0xf4, 0x12, 0x9c, 0x26, 0x71, 0xe2, 0x37, 0xbd, 0x84, - 0x54, 0xd5, 0x65, 0xc2, 0x12, 0xb3, 0x65, 0xaa, 0xb0, 0xdf, 0x2b, 0x59, 0x84, 0xdb, 0x79, 0x40, - 0xdc, 0x49, 0xc8, 0x58, 0x91, 0x93, 0x87, 0x59, 0x91, 0xe8, 0xcf, 0x1d, 0x38, 0x1d, 0x11, 0x1e, - 0x77, 0x12, 0xab, 0x86, 0x9d, 0x63, 0xe2, 0xb8, 0x62, 0x23, 0xff, 0xb7, 0x4a, 0xf4, 0x81, 0xb3, - 0x5c, 0xb8, 0xe2, 0x42, 0x64, 0xef, 0x3b, 0xca, 0x6f, 0xe7, 0x01, 0xdf, 0x7c, 0x7b, 0x6a, 0xaa, - 0x33, 0x0f, 0xbd, 0x22, 0x4e, 0x57, 0xde, 0xdf, 0x7d, 0x7b, 0x6a, 0x42, 0xfe, 0x4e, 0x07, 0xad, - 0xa3, 0x93, 0x74, 0x5b, 0x6d, 0x85, 0xd5, 0x6b, 0x6b, 0x22, 0x16, 0x4c, 0x6d, 0xab, 0x6b, 0x14, - 0x88, 0x79, 0x19, 0x7a, 0x1c, 0x86, 0xaa, 0x1e, 0x69, 0x86, 0x81, 0xca, 0xe4, 0xca, 0xb4, 0xf9, - 0x79, 0x01, 0xc3, 0xaa, 0x94, 0x9e, 0x21, 0x02, 0xb1, 0xa5, 0x94, 0x1e, 0xb0, 0x75, 0x86, 0x90, - 0x9b, 0x14, 0xe7, 0x2a, 0x7f, 0x61, 0xc5, 0x09, 0x35, 0x60, 0xc0, 0x67, 0x86, 0x0a, 0x11, 0x6e, - 0x6a, 0xc1, 0x3a, 0xc2, 0x0d, 0x1f, 0x32, 0xd8, 0x94, 0x89, 0x7e, 0xc1, 0x43, 0xdf, 0x6b, 0x4e, - 0x9d, 0xcc, 0x5e, 0xf3, 0x38, 0x0c, 0x55, 0xea, 0x7e, 0xa3, 0x1a, 0x91, 0xa0, 0x34, 0xc1, 0x4e, - 0xec, 0x6c, 0x24, 0xe6, 0x04, 0x0c, 0xab, 0x52, 0xf4, 0x37, 0x60, 0x2c, 0x6c, 0x27, 0x4c, 0xb4, - 0xd0, 0x71, 0x8a, 0x4b, 0xa7, 0x19, 0x3a, 0x0b, 0x1e, 0x5a, 0xd5, 0x0b, 0xb0, 0x89, 0x47, 0x45, - 0x7c, 0x3d, 0x8c, 0x59, 0x4e, 0x1a, 0x26, 0xe2, 0xcf, 0x9b, 0x22, 0xfe, 0xaa, 0x56, 0x86, 0x0d, - 0x4c, 0xf4, 0x55, 0x07, 0x4e, 0x37, 0xb3, 0x07, 0xb8, 0xd2, 0x05, 0x36, 0x32, 0x65, 0x1b, 0x8a, - 0x7e, 0x86, 0x34, 0x8f, 0xb1, 0xee, 0x00, 0xe3, 0xce, 0x46, 0xb0, 0xec, 0x50, 0xf1, 0x6e, 0x50, - 0xa9, 0x47, 0x61, 0x60, 0x36, 0xef, 0x7e, 0x5b, 0x77, 0xa2, 0xd8, 0xda, 0xce, 0x63, 0x31, 0x7b, - 0xff, 0xfe, 0xde, 0xd4, 0xb9, 0xdc, 0x22, 0x9c, 0xdf, 0xa8, 0xc9, 0x79, 0x38, 0x9f, 0x2f, 0x1f, - 0xee, 0x74, 0xe2, 0xe8, 0xd3, 0x4f, 0x1c, 0x0b, 0x70, 0x7f, 0xd7, 0x46, 0xd1, 0x9d, 0x46, 0x6a, - 0x9b, 0x8e, 0xb9, 0xd3, 0x74, 0x68, 0x87, 0xe3, 0x30, 0xaa, 0x3f, 0x5c, 0xe0, 0xfe, 0xdf, 0x3e, - 0x80, 0xd4, 0x4e, 0x8e, 0x3c, 0x18, 0xe7, 0x36, 0xf9, 0x6b, 0xf3, 0x47, 0xbe, 0xcd, 0x3d, 0x67, - 0x10, 0xc0, 0x19, 0x82, 0xa8, 0x09, 0x88, 0x43, 0xf8, 0xef, 0xa3, 0xf8, 0x56, 0x99, 0x2b, 0x72, - 0xae, 0x83, 0x08, 0xce, 0x21, 0x4c, 0x7b, 0x94, 0x84, 0x5b, 0x24, 0xb8, 0x81, 0x97, 0x8f, 0x92, - 0x12, 0x80, 0x7b, 0xe3, 0x0c, 0x02, 0x38, 0x43, 0x10, 0xb9, 0x30, 0xc0, 0x6c, 0x33, 0x32, 0x40, - 0x9b, 0x89, 0x17, 0xa6, 0x69, 0xc4, 0x58, 0x94, 0xa0, 0x2f, 0x3b, 0x30, 0x2e, 0x33, 0x1b, 0x30, - 0x6b, 0xa8, 0x0c, 0xcd, 0xbe, 0x61, 0xcb, 0xcf, 0x71, 0x45, 0xa7, 0x9e, 0x06, 0x3e, 0x1a, 0xe0, - 0x18, 0x67, 0x1a, 0xe1, 0xbe, 0x00, 0x67, 0x72, 0xaa, 0x5b, 0x39, 0xd1, 0x7e, 0xc7, 0x81, 0x11, - 0x2d, 0xe1, 0x1e, 0x7a, 0x1d, 0x86, 0xc3, 0xb2, 0xf5, 0x68, 0xbb, 0xd5, 0x72, 0x47, 0xb4, 0x9d, - 0x02, 0xe1, 0x94, 0x61, 0x2f, 0x41, 0x82, 0xb9, 0xd9, 0x01, 0xef, 0x71, 0xb3, 0x0f, 0x1d, 0x24, - 0xf8, 0xab, 0x45, 0x48, 0x29, 0x1d, 0x32, 0xe3, 0x46, 0x1a, 0x52, 0x58, 0x38, 0x30, 0xa4, 0xb0, - 0x0a, 0xa7, 0x3c, 0xe6, 0x4b, 0x3e, 0x62, 0x9e, 0x0d, 0x9e, 0x6f, 0xd5, 0xa4, 0x80, 0xb3, 0x24, - 0x29, 0x97, 0x38, 0xad, 0xca, 0xb8, 0xf4, 0x1f, 0x9a, 0x4b, 0xd9, 0xa4, 0x80, 0xb3, 0x24, 0xd1, - 0x4b, 0x50, 0xaa, 0xb0, 0x7b, 0xa3, 0xbc, 0x8f, 0xd7, 0x36, 0xaf, 0x87, 0xc9, 0x5a, 0x44, 0x62, - 0x12, 0x24, 0x22, 0xa3, 0xd6, 0xc3, 0x62, 0x14, 0x4a, 0x73, 0x5d, 0xf0, 0x70, 0x57, 0x0a, 0xf4, - 0x98, 0xc2, 0x9c, 0xd1, 0x7e, 0xb2, 0xcb, 0x84, 0x88, 0xf0, 0xd2, 0xab, 0x63, 0x4a, 0x59, 0x2f, - 0xc4, 0x26, 0x2e, 0xfa, 0x15, 0x07, 0xc6, 0x1a, 0xd2, 0x5c, 0x8f, 0xdb, 0x0d, 0x99, 0x1e, 0x12, - 0x5b, 0x99, 0x7e, 0xcb, 0x3a, 0x65, 0xae, 0x4b, 0x18, 0x20, 0x6c, 0xf2, 0xce, 0x26, 0x3d, 0x19, - 0xea, 0x31, 0xe9, 0xc9, 0xf7, 0x1d, 0x98, 0xc8, 0x72, 0x43, 0x5b, 0xf0, 0x50, 0xd3, 0x8b, 0xb6, - 0xae, 0x05, 0x9b, 0x11, 0xbb, 0x88, 0x91, 0xf0, 0xc9, 0x30, 0xb3, 0x99, 0x90, 0x68, 0xde, 0xdb, - 0xe5, 0xee, 0xcf, 0xa2, 0x7a, 0x5f, 0xe8, 0xa1, 0x95, 0x83, 0x90, 0xf1, 0xc1, 0xb4, 0x50, 0x19, - 0xce, 0x51, 0x04, 0x96, 0x13, 0xcd, 0x0f, 0x83, 0x94, 0x49, 0x81, 0x31, 0x51, 0xc1, 0x80, 0x2b, - 0x79, 0x48, 0x38, 0xbf, 0xae, 0x7b, 0x05, 0x06, 0xf8, 0x25, 0xb4, 0xbb, 0xf2, 0x1f, 0xb9, 0xff, - 0xbe, 0x00, 0x52, 0x31, 0xfc, 0xab, 0xed, 0x8e, 0xa3, 0x9b, 0x68, 0xc4, 0x4c, 0x4a, 0xc2, 0xda, - 0xc1, 0x36, 0x51, 0x91, 0x7d, 0x50, 0x94, 0x50, 0x8d, 0x99, 0xdc, 0xf2, 0x93, 0xb9, 0xb0, 0x2a, - 0x6d, 0x1c, 0x4c, 0x63, 0xbe, 0x22, 0x60, 0x58, 0x95, 0xba, 0x9f, 0x74, 0x60, 0x8c, 0xf6, 0xb2, - 0xd1, 0x20, 0x8d, 0x72, 0x42, 0x5a, 0x31, 0x8a, 0xa1, 0x18, 0xd3, 0x7f, 0xec, 0x99, 0x02, 0xd3, - 0x8b, 0x8b, 0xa4, 0xa5, 0x39, 0x6b, 0x28, 0x13, 0xcc, 0x79, 0xb9, 0x6f, 0xf5, 0xc1, 0xb0, 0x1a, - 0xec, 0x1e, 0xec, 0xa9, 0x97, 0xd3, 0xc4, 0xa0, 0x5c, 0x02, 0x97, 0xb4, 0xa4, 0xa0, 0xb7, 0xe9, - 0xd0, 0x05, 0xbb, 0x3c, 0x43, 0x42, 0x9a, 0x21, 0xf4, 0x49, 0xd3, 0xd5, 0x7c, 0x5e, 0x9f, 0x7f, - 0x1a, 0xbe, 0xf0, 0x39, 0xdf, 0xd2, 0x3d, 0xfd, 0xfd, 0xb6, 0x76, 0x33, 0xe5, 0xc6, 0xec, 0xee, - 0xe2, 0xcf, 0xbc, 0x19, 0x53, 0xec, 0xe9, 0xcd, 0x98, 0x27, 0xa0, 0x9f, 0x04, 0xed, 0x26, 0x53, - 0x95, 0x86, 0xd9, 0x11, 0xa1, 0xff, 0x4a, 0xd0, 0x6e, 0x9a, 0x3d, 0x63, 0x28, 0xe8, 0x7d, 0x30, - 0x52, 0x25, 0x71, 0x25, 0xf2, 0xd9, 0xb5, 0x7f, 0x61, 0xd9, 0x79, 0x90, 0x99, 0xcb, 0x52, 0xb0, - 0x59, 0x51, 0xaf, 0xe0, 0xbe, 0x0a, 0x03, 0x6b, 0x8d, 0x76, 0xcd, 0x0f, 0x50, 0x0b, 0x06, 0x78, - 0x12, 0x00, 0xb1, 0xdb, 0x5b, 0x38, 0x77, 0x72, 0x51, 0xa1, 0x45, 0xa1, 0xf0, 0xfb, 0xab, 0x82, - 0x8f, 0xfb, 0x89, 0x02, 0xd0, 0xa3, 0xf9, 0xe2, 0x1c, 0xfa, 0xdb, 0x1d, 0x4f, 0xa4, 0xfc, 0x5c, - 0xce, 0x13, 0x29, 0x63, 0x0c, 0x39, 0xe7, 0x75, 0x94, 0x06, 0x8c, 0x31, 0xe7, 0x88, 0xdc, 0x03, - 0x85, 0x5a, 0xfd, 0x74, 0x8f, 0xf7, 0xe6, 0xf5, 0xaa, 0x62, 0x47, 0xd0, 0x41, 0xd8, 0x24, 0x8e, - 0x56, 0xe0, 0x0c, 0xcf, 0x2f, 0x39, 0x4f, 0x1a, 0xde, 0x6e, 0x26, 0x8f, 0xd4, 0x03, 0xf2, 0xd5, - 0xab, 0xf9, 0x4e, 0x14, 0x9c, 0x57, 0xcf, 0xfd, 0xbd, 0x7e, 0xd0, 0x5c, 0x12, 0x3d, 0xac, 0x96, - 0x57, 0x32, 0x0e, 0xa8, 0x15, 0x2b, 0x0e, 0x28, 0xe9, 0xd5, 0xe1, 0x12, 0xc8, 0xf4, 0x39, 0xd1, - 0x46, 0xd5, 0x49, 0xa3, 0x25, 0xfa, 0xa8, 0x1a, 0x75, 0x95, 0x34, 0x5a, 0x98, 0x95, 0xa8, 0x0b, - 0x85, 0xfd, 0x5d, 0x2f, 0x14, 0xd6, 0xa1, 0x58, 0xf3, 0xda, 0x35, 0x22, 0x22, 0x30, 0x2d, 0xf8, - 0x1a, 0xd9, 0x15, 0x07, 0xee, 0x6b, 0x64, 0xff, 0x62, 0xce, 0x80, 0x2e, 0xf6, 0xba, 0x0c, 0x49, - 0x11, 0x46, 0x5a, 0x0b, 0x8b, 0x5d, 0x45, 0xb9, 0xf0, 0xc5, 0xae, 0x7e, 0xe2, 0x94, 0x19, 0x6a, - 0xc1, 0x60, 0x85, 0x67, 0xef, 0x10, 0x3a, 0xcb, 0x35, 0x1b, 0x37, 0x26, 0x19, 0x41, 0x6e, 0x4d, - 0x11, 0x3f, 0xb0, 0x64, 0xe3, 0x5e, 0x82, 0x11, 0xed, 0xa5, 0x06, 0xfa, 0x19, 0x54, 0xe2, 0x08, - 0xed, 0x33, 0xcc, 0x7b, 0x89, 0x87, 0x59, 0x89, 0xfb, 0xcd, 0x7e, 0x50, 0xb6, 0x34, 0xfd, 0x7e, - 0x9f, 0x57, 0xd1, 0xd2, 0xdc, 0x18, 0x17, 0xcb, 0xc3, 0x00, 0x8b, 0x52, 0xaa, 0xd7, 0x35, 0x49, - 0x54, 0x53, 0xe7, 0x68, 0x21, 0xae, 0x95, 0x5e, 0xb7, 0xa2, 0x17, 0x62, 0x13, 0x97, 0x2a, 0xe5, - 0x4d, 0xe1, 0xa2, 0xcf, 0x06, 0x40, 0x4b, 0xd7, 0x3d, 0x56, 0x18, 0xe8, 0x93, 0x0e, 0x8c, 0x36, - 0x35, 0x8f, 0xbe, 0x08, 0xc4, 0xb4, 0xe1, 0x50, 0xd2, 0xa8, 0xf2, 0x80, 0x29, 0x1d, 0x82, 0x0d, - 0xae, 0x68, 0x11, 0x4e, 0xc7, 0x24, 0x59, 0xdd, 0x09, 0x48, 0xa4, 0xee, 0xdd, 0x8b, 0x44, 0x0c, - 0xea, 0xf6, 0x43, 0x39, 0x8b, 0x80, 0x3b, 0xeb, 0xe4, 0xc6, 0xae, 0x16, 0x0f, 0x1d, 0xbb, 0x3a, - 0x0f, 0x13, 0x9b, 0x9e, 0xdf, 0x68, 0x47, 0xa4, 0x6b, 0x04, 0xec, 0x42, 0xa6, 0x1c, 0x77, 0xd4, - 0x60, 0x17, 0x70, 0x1a, 0x5e, 0x2d, 0x2e, 0x0d, 0x6a, 0x17, 0x70, 0x28, 0x00, 0x73, 0xb8, 0xfb, - 0x9b, 0x0e, 0xf0, 0x0c, 0x38, 0x33, 0x9b, 0x9b, 0x7e, 0xe0, 0x27, 0xbb, 0xe8, 0x6b, 0x0e, 0x4c, - 0x04, 0x61, 0x95, 0xcc, 0x04, 0x89, 0x2f, 0x81, 0xf6, 0xd2, 0x92, 0x33, 0x5e, 0xd7, 0x33, 0xe4, - 0x79, 0x3a, 0x85, 0x2c, 0x14, 0x77, 0x34, 0xc3, 0xbd, 0x00, 0xe7, 0x72, 0x09, 0xb8, 0xdf, 0xef, - 0x03, 0x33, 0x91, 0x0f, 0x7a, 0x0e, 0x8a, 0x0d, 0x96, 0x5a, 0xc2, 0x39, 0x62, 0x86, 0x26, 0x36, - 0x56, 0x3c, 0xf7, 0x04, 0xa7, 0x84, 0xe6, 0x61, 0x84, 0x65, 0x07, 0x12, 0x89, 0x3f, 0xf8, 0x8a, - 0x70, 0xd3, 0xd7, 0xd0, 0x54, 0xd1, 0x6d, 0xf3, 0x27, 0xd6, 0xab, 0xa1, 0xd7, 0x60, 0x70, 0x83, - 0xe7, 0x48, 0xb4, 0xe7, 0xf3, 0x13, 0x49, 0x17, 0x99, 0x6e, 0x24, 0x33, 0x30, 0xde, 0x4e, 0xff, - 0xc5, 0x92, 0x23, 0xda, 0x85, 0x21, 0x4f, 0x7e, 0xd3, 0x7e, 0x5b, 0x17, 0x2a, 0x8c, 0xf9, 0x23, - 0x22, 0x66, 0xe4, 0x37, 0x54, 0xec, 0x32, 0xa1, 0x45, 0xc5, 0x9e, 0x42, 0x8b, 0xbe, 0xed, 0x00, - 0xa4, 0x0f, 0x4a, 0xa0, 0x5b, 0x30, 0x14, 0x3f, 0x6d, 0x18, 0x2a, 0x6c, 0xdc, 0xa4, 0x17, 0x14, - 0xb5, 0xdb, 0xa6, 0x02, 0x82, 0x15, 0xb7, 0x3b, 0x19, 0x57, 0x7e, 0xea, 0xc0, 0xd9, 0xbc, 0x87, - 0x2f, 0xee, 0x61, 0x8b, 0x0f, 0x6b, 0x57, 0x11, 0x15, 0xd6, 0x22, 0xb2, 0xe9, 0xdf, 0xca, 0xc9, - 0xd4, 0xcb, 0x0b, 0x70, 0x8a, 0xe3, 0xbe, 0x39, 0x08, 0x8a, 0xf1, 0x31, 0xd9, 0x61, 0x1e, 0xa3, - 0x67, 0xa6, 0x5a, 0xaa, 0x73, 0x29, 0x3c, 0xcc, 0xa0, 0x58, 0x94, 0xd2, 0x73, 0x93, 0x0c, 0x8a, - 0x17, 0x22, 0x9b, 0xcd, 0x42, 0x19, 0x3c, 0x8f, 0x55, 0x69, 0x9e, 0x65, 0xa7, 0x78, 0x22, 0x96, - 0x9d, 0x01, 0xfb, 0x96, 0x9d, 0x27, 0x60, 0x30, 0x0a, 0x1b, 0x64, 0x06, 0x5f, 0x17, 0xa7, 0x81, - 0x34, 0xa8, 0x81, 0x83, 0xb1, 0x2c, 0x3f, 0xa2, 0x6d, 0x03, 0xfd, 0xb6, 0x73, 0x80, 0xf1, 0x68, - 0xd8, 0xd6, 0x9e, 0x90, 0x9b, 0xd6, 0x8c, 0x1d, 0x6d, 0x8e, 0x62, 0x91, 0xfa, 0xba, 0x03, 0xa7, - 0x49, 0x50, 0x89, 0x76, 0x19, 0x1d, 0x41, 0x4d, 0xf8, 0x9c, 0x6f, 0xd8, 0x58, 0x7c, 0x57, 0xb2, - 0xc4, 0xb9, 0x6b, 0xa7, 0x03, 0x8c, 0x3b, 0x9b, 0x81, 0x56, 0x61, 0xa8, 0xe2, 0x89, 0x19, 0x31, - 0x72, 0x98, 0x19, 0xc1, 0x3d, 0x67, 0x33, 0x62, 0x2a, 0x28, 0x22, 0xee, 0x8f, 0x0b, 0x70, 0x26, - 0xa7, 0x49, 0xec, 0x02, 0x55, 0x93, 0xce, 0xc8, 0x6b, 0xd5, 0xec, 0x7a, 0x5c, 0x12, 0x70, 0xac, - 0x30, 0xd0, 0x1a, 0x9c, 0xdd, 0x6a, 0xc6, 0x29, 0x95, 0xb9, 0x30, 0x48, 0xc8, 0x2d, 0xb9, 0x3a, - 0xa5, 0x3f, 0xfa, 0xec, 0x52, 0x0e, 0x0e, 0xce, 0xad, 0x49, 0xd5, 0x17, 0x12, 0x78, 0x1b, 0x0d, - 0x92, 0x16, 0x89, 0xeb, 0x7f, 0x4a, 0x7d, 0xb9, 0x92, 0x29, 0xc7, 0x1d, 0x35, 0xd0, 0x67, 0x1d, - 0x78, 0x20, 0x26, 0xd1, 0x36, 0x89, 0xca, 0x7e, 0x95, 0xcc, 0xb5, 0xe3, 0x24, 0x6c, 0x92, 0xe8, - 0x88, 0xe6, 0xd2, 0xa9, 0xfd, 0xbd, 0xa9, 0x07, 0xca, 0xdd, 0xa9, 0xe1, 0x83, 0x58, 0xb9, 0x9f, - 0x75, 0x60, 0xbc, 0xcc, 0x0e, 0xd3, 0x4a, 0x97, 0xb6, 0x9d, 0xd8, 0xf2, 0x31, 0x95, 0xb0, 0x22, - 0x23, 0x15, 0xcd, 0x14, 0x13, 0xee, 0xcb, 0x30, 0x51, 0x26, 0x4d, 0xaf, 0x55, 0x67, 0x77, 0x77, - 0x79, 0x3c, 0xd6, 0x25, 0x18, 0x8e, 0x25, 0x2c, 0xfb, 0x96, 0x8d, 0x42, 0xc6, 0x29, 0x0e, 0x7a, - 0x94, 0xc7, 0x8e, 0xc9, 0x1b, 0x40, 0xc3, 0xfc, 0xd4, 0xc1, 0x03, 0xce, 0x62, 0x2c, 0xcb, 0xdc, - 0xb7, 0x1c, 0x18, 0x4d, 0xeb, 0x93, 0x4d, 0x54, 0x83, 0x53, 0x15, 0xed, 0xf6, 0x5c, 0x7a, 0x6f, - 0xa1, 0xf7, 0x8b, 0x76, 0x3c, 0xdf, 0xae, 0x49, 0x04, 0x67, 0xa9, 0x1e, 0x3e, 0xf4, 0xee, 0x0b, - 0x05, 0x38, 0xa5, 0x9a, 0x2a, 0x1c, 0x87, 0x6f, 0x64, 0x23, 0xe4, 0xb0, 0x8d, 0xd4, 0x3b, 0xe6, - 0xd8, 0x1f, 0x10, 0x25, 0xf7, 0x46, 0x36, 0x4a, 0xee, 0x58, 0xd9, 0x77, 0xf8, 0x42, 0xbf, 0x5d, - 0x80, 0x21, 0x95, 0x08, 0xe8, 0x39, 0x28, 0xb2, 0xa3, 0xe4, 0xdd, 0x29, 0xc4, 0xec, 0x58, 0x8a, - 0x39, 0x25, 0x4a, 0x92, 0x45, 0xe1, 0x1c, 0x39, 0x0b, 0xea, 0x30, 0x37, 0x28, 0x7a, 0x51, 0x82, - 0x39, 0x25, 0xb4, 0x04, 0x7d, 0x24, 0xa8, 0x0a, 0xcd, 0xf8, 0xf0, 0x04, 0xd9, 0xab, 0x53, 0x57, - 0x82, 0x2a, 0xa6, 0x54, 0x58, 0x2a, 0x4e, 0xae, 0x00, 0x65, 0xde, 0x18, 0x11, 0xda, 0x8f, 0x28, - 0x75, 0x7f, 0xa5, 0x0f, 0x06, 0xca, 0xed, 0x0d, 0xaa, 0xe3, 0x7f, 0xcb, 0x81, 0x33, 0x3b, 0x99, - 0x14, 0xc1, 0xe9, 0x1c, 0xbf, 0x61, 0xcf, 0xa8, 0xaa, 0x47, 0x82, 0x29, 0x53, 0x52, 0x4e, 0x21, - 0xce, 0x6b, 0x8e, 0x91, 0xa5, 0xb3, 0xef, 0x58, 0xb2, 0x74, 0xde, 0x3a, 0xe6, 0xab, 0x10, 0x63, - 0xdd, 0xae, 0x41, 0xb8, 0xbf, 0x57, 0x04, 0xe0, 0x5f, 0x63, 0xb5, 0x95, 0xf4, 0x62, 0x26, 0x7b, - 0x06, 0x46, 0x6b, 0x24, 0x20, 0x91, 0x8c, 0xf3, 0xcb, 0x3c, 0x5f, 0xb3, 0xa8, 0x95, 0x61, 0x03, - 0x93, 0x9d, 0x49, 0x82, 0x24, 0xda, 0xe5, 0x7a, 0x6b, 0xf6, 0xba, 0x83, 0x2a, 0xc1, 0x1a, 0x16, - 0x9a, 0x36, 0xbc, 0x18, 0xdc, 0x21, 0x3e, 0x7e, 0x80, 0xd3, 0xe1, 0x7d, 0x30, 0x6e, 0xe6, 0x0e, - 0x11, 0xca, 0x9a, 0x72, 0x60, 0x9b, 0x29, 0x47, 0x70, 0x06, 0x9b, 0x4e, 0xe2, 0x6a, 0xb4, 0x8b, - 0xdb, 0x81, 0xd0, 0xda, 0xd4, 0x24, 0x9e, 0x67, 0x50, 0x2c, 0x4a, 0x59, 0xd2, 0x05, 0xb6, 0x7f, - 0x71, 0xb8, 0x48, 0xdc, 0x90, 0x26, 0x5d, 0xd0, 0xca, 0xb0, 0x81, 0x49, 0x39, 0x08, 0x33, 0x23, - 0x98, 0xcb, 0x24, 0x63, 0x1b, 0x6c, 0xc1, 0x78, 0x68, 0x9a, 0x47, 0xb8, 0x0a, 0xf3, 0xae, 0x1e, - 0xa7, 0x9e, 0x51, 0x97, 0x07, 0x1e, 0x64, 0xac, 0x29, 0x19, 0xfa, 0x54, 0x6d, 0xd5, 0x6f, 0x05, - 0x8c, 0x9a, 0x61, 0xa2, 0x5d, 0x03, 0xf7, 0xd7, 0xe0, 0x6c, 0x2b, 0xac, 0xae, 0x45, 0x7e, 0x18, - 0xf9, 0xc9, 0xee, 0x5c, 0xc3, 0x8b, 0x63, 0x36, 0x31, 0xc6, 0x4c, 0x75, 0x66, 0x2d, 0x07, 0x07, - 0xe7, 0xd6, 0xa4, 0x07, 0x8c, 0x96, 0x00, 0xb2, 0x60, 0xad, 0x22, 0x57, 0xc8, 0x24, 0x22, 0x56, - 0xa5, 0xee, 0x19, 0x38, 0x5d, 0x6e, 0xb7, 0x5a, 0x0d, 0x9f, 0x54, 0x95, 0x97, 0xc0, 0x7d, 0x3f, - 0x9c, 0x12, 0x39, 0x3c, 0x95, 0xf2, 0x70, 0xa8, 0x8c, 0xd3, 0xee, 0x9f, 0x3b, 0x70, 0x2a, 0x13, - 0x1a, 0x83, 0x5e, 0xcb, 0x6e, 0xf9, 0x56, 0x6c, 0x66, 0xfa, 0x66, 0xcf, 0x17, 0x69, 0xae, 0xfa, - 0x50, 0x97, 0x71, 0xed, 0xd6, 0xee, 0x93, 0xb0, 0xe8, 0x6f, 0xbe, 0x23, 0xe8, 0xc1, 0xf1, 0xee, - 0x67, 0x0a, 0x90, 0x1f, 0x8f, 0x84, 0x3e, 0xda, 0x39, 0x00, 0xcf, 0x59, 0x1c, 0x00, 0x11, 0x10, - 0xd5, 0x7d, 0x0c, 0x02, 0x73, 0x0c, 0x56, 0x2c, 0x8d, 0x81, 0xe0, 0xdb, 0x39, 0x12, 0xff, 0xcb, - 0x81, 0x91, 0xf5, 0xf5, 0x65, 0x65, 0xe2, 0xc2, 0x70, 0x3e, 0xe6, 0xf7, 0xe5, 0x99, 0xdb, 0x76, - 0x2e, 0x6c, 0xb6, 0xb8, 0x17, 0x57, 0x78, 0x97, 0x59, 0x3a, 0xd5, 0x72, 0x2e, 0x06, 0xee, 0x52, - 0x13, 0x5d, 0x83, 0x33, 0x7a, 0x49, 0x59, 0x7b, 0xbd, 0xae, 0x28, 0x72, 0xd4, 0x74, 0x16, 0xe3, - 0xbc, 0x3a, 0x59, 0x52, 0xc2, 0x5a, 0xc9, 0xb6, 0xab, 0x1c, 0x52, 0xa2, 0x18, 0xe7, 0xd5, 0x71, - 0x57, 0x61, 0x64, 0xdd, 0x8b, 0x54, 0xc7, 0x3f, 0x00, 0x13, 0x95, 0xb0, 0x29, 0xad, 0x44, 0xcb, - 0x64, 0x9b, 0x34, 0x44, 0x97, 0xf9, 0x93, 0x11, 0x99, 0x32, 0xdc, 0x81, 0xed, 0xfe, 0xf7, 0x8b, - 0xa0, 0xee, 0xff, 0xf5, 0xb0, 0xc3, 0xb4, 0x54, 0xa4, 0x66, 0xd1, 0x72, 0xa4, 0xa6, 0x92, 0xb5, - 0x99, 0x68, 0xcd, 0x24, 0x8d, 0xd6, 0x1c, 0xb0, 0x1d, 0xad, 0xa9, 0x14, 0xc6, 0x8e, 0x88, 0xcd, - 0xaf, 0x38, 0x30, 0x1a, 0x84, 0x55, 0xa2, 0xdc, 0x6b, 0x83, 0x4c, 0x6b, 0x7d, 0xc9, 0x5e, 0xe0, - 0x3b, 0x8f, 0x3c, 0x14, 0xe4, 0x79, 0x14, 0xb1, 0xda, 0xa2, 0xf4, 0x22, 0x6c, 0xb4, 0x03, 0x2d, - 0x68, 0x76, 0x4b, 0xee, 0x1e, 0x78, 0x30, 0xef, 0xb8, 0x71, 0x47, 0x23, 0xe4, 0x2d, 0x4d, 0x6f, - 0x1a, 0xb6, 0x65, 0x8f, 0x93, 0x97, 0xba, 0x34, 0x2f, 0x87, 0xcc, 0x08, 0x9c, 0xea, 0x53, 0x2e, - 0x0c, 0xf0, 0x70, 0x63, 0x91, 0x0d, 0x89, 0x39, 0xdf, 0x78, 0x28, 0x32, 0x16, 0x25, 0x28, 0x91, - 0x2e, 0xfc, 0x11, 0x5b, 0xf9, 0xfd, 0x8d, 0x10, 0x81, 0x7c, 0x1f, 0x3e, 0x7a, 0x56, 0x3f, 0xc6, - 0x8e, 0xf6, 0x72, 0x8c, 0x1d, 0xeb, 0x7a, 0x84, 0xfd, 0xbc, 0x03, 0xa3, 0x15, 0x2d, 0xdf, 0x7e, - 0xe9, 0x71, 0x5b, 0xef, 0x0a, 0xe7, 0x3d, 0x8b, 0xc0, 0x7d, 0x3a, 0x46, 0x7e, 0x7f, 0x83, 0x3b, - 0x4b, 0x01, 0xc9, 0xce, 0xec, 0x6c, 0xeb, 0xb7, 0x92, 0xf5, 0xc1, 0xb4, 0x01, 0xc8, 0x50, 0x48, - 0x0a, 0xc3, 0x82, 0x17, 0x7a, 0x1d, 0x86, 0x64, 0xc4, 0xba, 0x88, 0xec, 0xc6, 0x36, 0x8c, 0xec, - 0xa6, 0x27, 0x4f, 0xe6, 0x8d, 0xe3, 0x50, 0xac, 0x38, 0xa2, 0x3a, 0xf4, 0x55, 0xbd, 0x9a, 0x88, - 0xf1, 0x5e, 0xb1, 0x93, 0x97, 0x53, 0xf2, 0x64, 0xc7, 0xab, 0xf9, 0x99, 0x45, 0x4c, 0x59, 0xa0, - 0x5b, 0x69, 0xc2, 0xf2, 0x09, 0x6b, 0xbb, 0xaf, 0xa9, 0x26, 0x71, 0xab, 0x44, 0x47, 0xfe, 0xf3, - 0xaa, 0x70, 0x7e, 0xfe, 0x35, 0xc6, 0x76, 0xc1, 0x4e, 0x62, 0x4f, 0x9e, 0x45, 0x24, 0x75, 0xa0, - 0x52, 0x2e, 0xf5, 0x24, 0x69, 0x95, 0x7e, 0xde, 0x16, 0x17, 0x96, 0x0b, 0x83, 0x3f, 0x01, 0xbd, - 0xbe, 0xbe, 0x86, 0x19, 0x75, 0xd4, 0x80, 0x81, 0x16, 0x8b, 0xcb, 0x28, 0xfd, 0x82, 0xad, 0xbd, - 0x85, 0xc7, 0x79, 0xf0, 0xb9, 0xc9, 0xff, 0xc7, 0x82, 0x07, 0xba, 0x02, 0x83, 0xfc, 0xdd, 0x0d, - 0x1e, 0x63, 0x3f, 0x72, 0x79, 0xb2, 0xfb, 0xeb, 0x1d, 0xe9, 0x46, 0xc1, 0x7f, 0xc7, 0x58, 0xd6, - 0x45, 0x5f, 0x70, 0x60, 0x9c, 0x4a, 0xd4, 0xf4, 0xa1, 0x90, 0x12, 0xb2, 0x25, 0xb3, 0x6e, 0xc4, - 0x54, 0x23, 0x91, 0xb2, 0x46, 0x1d, 0x93, 0xae, 0x19, 0xec, 0x70, 0x86, 0x3d, 0x7a, 0x03, 0x86, - 0x62, 0xbf, 0x4a, 0x2a, 0x5e, 0x14, 0x97, 0xce, 0x1c, 0x4f, 0x53, 0x52, 0x77, 0x8b, 0x60, 0x84, - 0x15, 0xcb, 0xdc, 0xf7, 0xfc, 0xcf, 0xde, 0xe3, 0xf7, 0xfc, 0xff, 0x8e, 0x03, 0xe7, 0x78, 0x9e, - 0xf8, 0xec, 0x23, 0x01, 0xe7, 0x8e, 0x68, 0x5e, 0x61, 0x97, 0x03, 0x66, 0xf2, 0x48, 0xe2, 0x7c, - 0x4e, 0x2c, 0xd1, 0xac, 0xf9, 0xae, 0xcb, 0x79, 0xab, 0x6e, 0xc7, 0xde, 0xdf, 0x72, 0x41, 0x4f, - 0xc1, 0x48, 0x4b, 0x6c, 0x87, 0x7e, 0xdc, 0x64, 0x57, 0x3d, 0xfa, 0xf8, 0x25, 0xbc, 0xb5, 0x14, - 0x8c, 0x75, 0x1c, 0x23, 0xeb, 0xf0, 0x13, 0x07, 0x65, 0x1d, 0x46, 0x37, 0x60, 0x24, 0x09, 0x1b, - 0x22, 0xf1, 0x66, 0x5c, 0x2a, 0xb1, 0x19, 0x78, 0x31, 0x6f, 0x6d, 0xad, 0x2b, 0xb4, 0xf4, 0x24, - 0x9b, 0xc2, 0x62, 0xac, 0xd3, 0x61, 0xe1, 0xb5, 0x22, 0xff, 0x7e, 0xc4, 0x8e, 0xb0, 0xf7, 0x67, - 0xc2, 0x6b, 0xf5, 0x42, 0x6c, 0xe2, 0xa2, 0x45, 0x38, 0xdd, 0xea, 0x38, 0x03, 0xf3, 0x2b, 0x66, - 0x2a, 0xa2, 0xa1, 0xf3, 0x00, 0xdc, 0x59, 0xc7, 0x38, 0xfd, 0x3e, 0x70, 0xd0, 0xe9, 0xb7, 0x4b, - 0x0e, 0xde, 0x07, 0x8f, 0x92, 0x83, 0x17, 0x55, 0xe1, 0x41, 0xaf, 0x9d, 0x84, 0x2c, 0xdf, 0x8b, - 0x59, 0x85, 0x47, 0x1a, 0x3f, 0xcc, 0x83, 0x97, 0xf7, 0xf7, 0xa6, 0x1e, 0x9c, 0x39, 0x00, 0x0f, - 0x1f, 0x48, 0x05, 0xbd, 0x0a, 0x43, 0x44, 0xe4, 0x11, 0x2e, 0xfd, 0x9c, 0x2d, 0x25, 0xc1, 0xcc, - 0x4c, 0x2c, 0x83, 0x38, 0x39, 0x0c, 0x2b, 0x7e, 0x68, 0x1d, 0x46, 0xea, 0x61, 0x9c, 0xcc, 0x34, - 0x7c, 0x2f, 0x26, 0x71, 0xe9, 0x21, 0x36, 0x69, 0x72, 0x75, 0xaf, 0xab, 0x12, 0x2d, 0x9d, 0x33, - 0x57, 0xd3, 0x9a, 0x58, 0x27, 0x83, 0x08, 0x73, 0x3e, 0xb2, 0x30, 0x6b, 0xe9, 0xc7, 0xb9, 0xc8, - 0x3a, 0xf6, 0x58, 0x1e, 0xe5, 0xb5, 0xb0, 0x5a, 0x36, 0xb1, 0x95, 0xf7, 0x51, 0x07, 0xe2, 0x2c, - 0x4d, 0xf4, 0x0c, 0x8c, 0xb6, 0xc2, 0x6a, 0xb9, 0x45, 0x2a, 0x6b, 0x5e, 0x52, 0xa9, 0x97, 0xa6, - 0x4c, 0xab, 0xdb, 0x9a, 0x56, 0x86, 0x0d, 0x4c, 0xd4, 0x82, 0xc1, 0x26, 0x4f, 0x04, 0x50, 0x7a, - 0xc4, 0xd6, 0xd9, 0x46, 0x64, 0x16, 0xe0, 0xfa, 0x82, 0xf8, 0x81, 0x25, 0x1b, 0xf4, 0x4f, 0x1c, - 0x38, 0x95, 0xb9, 0xbc, 0x54, 0x7a, 0x87, 0x35, 0x95, 0xc5, 0x24, 0x3c, 0xfb, 0x18, 0x1b, 0x3e, - 0x13, 0x78, 0xbb, 0x13, 0x84, 0xb3, 0x2d, 0xe2, 0xe3, 0xc2, 0xb2, 0x79, 0x94, 0x1e, 0xb5, 0x37, - 0x2e, 0x8c, 0xa0, 0x1c, 0x17, 0xf6, 0x03, 0x4b, 0x36, 0xe8, 0x09, 0x18, 0x14, 0x89, 0xf7, 0x4a, - 0x8f, 0x99, 0x1e, 0x64, 0x91, 0x9f, 0x0f, 0xcb, 0xf2, 0xc9, 0xf7, 0xc3, 0xe9, 0x8e, 0xa3, 0xdb, - 0xa1, 0x52, 0x4a, 0xfc, 0x86, 0x03, 0xfa, 0x6d, 0x67, 0xeb, 0x8f, 0x77, 0x3c, 0x03, 0xa3, 0x15, - 0xfe, 0xc4, 0x1f, 0xbf, 0x2f, 0xdd, 0x6f, 0xda, 0x3f, 0xe7, 0xb4, 0x32, 0x6c, 0x60, 0xba, 0x57, - 0x01, 0x75, 0x66, 0x56, 0x3f, 0x52, 0xbe, 0xa2, 0x7f, 0xe6, 0xc0, 0x98, 0xa1, 0x33, 0x58, 0xf7, - 0x11, 0x2e, 0x00, 0x6a, 0xfa, 0x51, 0x14, 0x46, 0xfa, 0x5b, 0x6a, 0x22, 0xa7, 0x01, 0xbb, 0x35, - 0xb6, 0xd2, 0x51, 0x8a, 0x73, 0x6a, 0xb8, 0xff, 0xa2, 0x1f, 0xd2, 0x28, 0x66, 0x95, 0xba, 0xd6, - 0xe9, 0x9a, 0xba, 0xf6, 0x49, 0x18, 0x7a, 0x39, 0x0e, 0x83, 0xb5, 0x34, 0xc1, 0xad, 0xfa, 0x16, - 0xcf, 0x96, 0x57, 0xaf, 0x33, 0x4c, 0x85, 0xc1, 0xb0, 0x5f, 0x59, 0xf0, 0x1b, 0x49, 0x67, 0x06, - 0xd4, 0x67, 0x9f, 0xe3, 0x70, 0xac, 0x30, 0xd8, 0xb3, 0x6a, 0xdb, 0x44, 0x19, 0xc6, 0xd3, 0x67, - 0xd5, 0xf8, 0xa3, 0x09, 0xac, 0x0c, 0x5d, 0x82, 0x61, 0x65, 0x54, 0x17, 0x96, 0x7a, 0x35, 0x52, - 0xca, 0xf2, 0x8e, 0x53, 0x1c, 0xa6, 0x10, 0x0a, 0x43, 0xac, 0x30, 0xa1, 0x94, 0x6d, 0x1c, 0x4f, - 0x32, 0xa6, 0x5d, 0x2e, 0xdb, 0x25, 0x18, 0x2b, 0x96, 0x79, 0x7e, 0xd2, 0xe1, 0x63, 0xf1, 0x93, - 0x6a, 0x21, 0xf5, 0xc5, 0x5e, 0x43, 0xea, 0xcd, 0xb9, 0x3d, 0xd4, 0xd3, 0xdc, 0xfe, 0x54, 0x1f, - 0x0c, 0x3e, 0x4f, 0x22, 0x96, 0xf8, 0xfb, 0x09, 0x18, 0xdc, 0xe6, 0xff, 0x66, 0xef, 0x63, 0x0a, - 0x0c, 0x2c, 0xcb, 0xe9, 0x77, 0xdb, 0x68, 0xfb, 0x8d, 0xea, 0x7c, 0xba, 0x8a, 0xd3, 0x9c, 0x81, - 0xb2, 0x00, 0xa7, 0x38, 0xb4, 0x42, 0x8d, 0x6a, 0xf6, 0xcd, 0xa6, 0xdf, 0xf1, 0x62, 0xf8, 0xa2, - 0x2c, 0xc0, 0x29, 0x0e, 0x7a, 0x0c, 0x06, 0x6a, 0x7e, 0xb2, 0xee, 0xd5, 0xb2, 0x5e, 0xbe, 0x45, - 0x06, 0xc5, 0xa2, 0x94, 0xb9, 0x89, 0xfc, 0x64, 0x3d, 0x22, 0xcc, 0xb2, 0xdb, 0x91, 0x0e, 0x62, - 0x51, 0x2b, 0xc3, 0x06, 0x26, 0x6b, 0x52, 0x28, 0x7a, 0x26, 0x82, 0x30, 0xd3, 0x26, 0xc9, 0x02, - 0x9c, 0xe2, 0xd0, 0xf9, 0x5f, 0x09, 0x9b, 0x2d, 0xbf, 0x21, 0xc2, 0x83, 0xb5, 0xf9, 0x3f, 0x27, - 0xe0, 0x58, 0x61, 0x50, 0x6c, 0x2a, 0xc2, 0xa8, 0xf8, 0xc9, 0x3e, 0x61, 0xb5, 0x26, 0xe0, 0x58, - 0x61, 0xb8, 0xcf, 0xc3, 0x18, 0x5f, 0xc9, 0x73, 0x0d, 0xcf, 0x6f, 0x2e, 0xce, 0xa1, 0x2b, 0x1d, - 0x21, 0xf5, 0x4f, 0xe4, 0x84, 0xd4, 0x9f, 0x33, 0x2a, 0x75, 0x86, 0xd6, 0xbb, 0x3f, 0x2c, 0xc0, - 0xd0, 0x09, 0xbe, 0x02, 0x78, 0xe2, 0x0f, 0xda, 0xa2, 0x5b, 0x99, 0x17, 0x00, 0xd7, 0x6c, 0xde, - 0x90, 0x39, 0xf0, 0xf5, 0xbf, 0xff, 0x52, 0x80, 0xf3, 0x12, 0x55, 0x9e, 0xe5, 0x16, 0xe7, 0xd8, - 0x13, 0x56, 0xc7, 0x3f, 0xd0, 0x91, 0x31, 0xd0, 0x6b, 0xf6, 0x4e, 0xa3, 0x8b, 0x73, 0x5d, 0x87, - 0xfa, 0xd5, 0xcc, 0x50, 0x63, 0xab, 0x5c, 0x0f, 0x1e, 0xec, 0xbf, 0x70, 0x60, 0x32, 0x7f, 0xb0, - 0x4f, 0xe0, 0xd1, 0xc5, 0x37, 0xcc, 0x47, 0x17, 0x7f, 0xc9, 0xde, 0x14, 0x33, 0xbb, 0xd2, 0xe5, - 0xf9, 0xc5, 0x3f, 0x73, 0xe0, 0xac, 0xac, 0xc0, 0x76, 0xcf, 0x59, 0x3f, 0x60, 0x81, 0x28, 0xc7, - 0x3f, 0xcd, 0x5e, 0x37, 0xa6, 0xd9, 0x8b, 0xf6, 0x3a, 0xae, 0xf7, 0xa3, 0xeb, 0x63, 0xd5, 0x7f, - 0xea, 0x40, 0x29, 0xaf, 0xc2, 0x09, 0x7c, 0xf2, 0xd7, 0xcc, 0x4f, 0xfe, 0xfc, 0xf1, 0xf4, 0xbc, - 0xfb, 0x07, 0x2f, 0x75, 0x1b, 0x28, 0xd4, 0x90, 0x7a, 0x95, 0x63, 0xcb, 0x47, 0xcb, 0x59, 0xe4, - 0x2b, 0x68, 0x0d, 0x18, 0x88, 0x59, 0xd4, 0x86, 0x98, 0x02, 0x57, 0x6d, 0x68, 0x5b, 0x94, 0x9e, - 0xb0, 0xb1, 0xb3, 0xff, 0xb1, 0xe0, 0xe1, 0xfe, 0x66, 0x01, 0x2e, 0xa8, 0xc7, 0x54, 0xc9, 0x36, - 0x69, 0xa4, 0xeb, 0x83, 0x3d, 0x93, 0xe0, 0xa9, 0x9f, 0xf6, 0x9e, 0x49, 0x48, 0x59, 0xa4, 0x6b, - 0x21, 0x85, 0x61, 0x8d, 0x27, 0x2a, 0xc3, 0x39, 0xf6, 0xac, 0xc1, 0x82, 0x1f, 0x78, 0x0d, 0xff, - 0x55, 0x12, 0x61, 0xd2, 0x0c, 0xb7, 0xbd, 0x86, 0xd0, 0xd4, 0xd5, 0x95, 0xdc, 0x85, 0x3c, 0x24, - 0x9c, 0x5f, 0xb7, 0xe3, 0xc4, 0xdd, 0xd7, 0xeb, 0x89, 0xdb, 0xfd, 0x13, 0x07, 0x46, 0x4f, 0xf0, - 0xe9, 0xd9, 0xd0, 0x5c, 0x12, 0xcf, 0xda, 0x5b, 0x12, 0x5d, 0x96, 0xc1, 0x5e, 0x11, 0x3a, 0x5e, - 0xe3, 0x44, 0x9f, 0x76, 0x54, 0x5c, 0x0b, 0x8f, 0xfd, 0xfb, 0x90, 0xbd, 0x76, 0x1c, 0x26, 0x8f, - 0x23, 0xfa, 0x7a, 0x26, 0xb9, 0x65, 0xc1, 0x56, 0x86, 0xa6, 0x8e, 0xd6, 0x1c, 0x21, 0xc9, 0xe5, - 0x57, 0x1c, 0x00, 0xde, 0x4e, 0x91, 0x1b, 0x9b, 0xb6, 0x6d, 0xe3, 0xd8, 0x46, 0x8a, 0x32, 0xe1, - 0x4d, 0x53, 0x4b, 0x28, 0x2d, 0xc0, 0x5a, 0x4b, 0xee, 0x22, 0x7b, 0xe5, 0x5d, 0x27, 0xce, 0xfc, - 0x82, 0x03, 0xa7, 0x32, 0xcd, 0xcd, 0xa9, 0xbf, 0x69, 0xbe, 0xd2, 0x67, 0x41, 0xb3, 0x32, 0x33, - 0x26, 0xeb, 0xc6, 0x93, 0xff, 0xe6, 0x82, 0xf1, 0x8c, 0x31, 0x7a, 0x0d, 0x86, 0xa5, 0xe5, 0x43, - 0x4e, 0x6f, 0x9b, 0xaf, 0x95, 0xaa, 0xe3, 0x8d, 0x84, 0xc4, 0x38, 0xe5, 0x97, 0x09, 0x9b, 0x2b, - 0xf4, 0x14, 0x36, 0x77, 0x6f, 0xdf, 0x3a, 0xcd, 0xb7, 0x4b, 0xf7, 0x1f, 0x8b, 0x5d, 0xfa, 0x41, - 0xeb, 0x76, 0xe9, 0x87, 0x4e, 0xd8, 0x2e, 0xad, 0x39, 0x09, 0x8b, 0x77, 0xe1, 0x24, 0x7c, 0x0d, - 0xce, 0x6e, 0xa7, 0x87, 0x4e, 0x35, 0x93, 0x44, 0x5e, 0xa0, 0x27, 0x72, 0xad, 0xd1, 0xf4, 0x00, - 0x1d, 0x27, 0x24, 0x48, 0xb4, 0xe3, 0x6a, 0x1a, 0xb1, 0xf7, 0x7c, 0x0e, 0x39, 0x9c, 0xcb, 0x24, - 0xeb, 0xed, 0x19, 0xec, 0xc1, 0xdb, 0xf3, 0x96, 0x03, 0xe7, 0xbc, 0x8e, 0x3b, 0x5c, 0x98, 0x6c, - 0x8a, 0x90, 0x93, 0x9b, 0xf6, 0x54, 0x08, 0x83, 0xbc, 0x70, 0xab, 0xe5, 0x15, 0xe1, 0xfc, 0x06, - 0xa1, 0x47, 0x53, 0xd7, 0x3b, 0x8f, 0xf3, 0xcc, 0xf7, 0x93, 0x7f, 0x3d, 0x1b, 0xcf, 0x03, 0x6c, - 0xe8, 0x3f, 0x62, 0xf7, 0xb4, 0x6d, 0x21, 0xa6, 0x67, 0xe4, 0x2e, 0x62, 0x7a, 0x32, 0xae, 0xb7, - 0x51, 0x4b, 0xae, 0xb7, 0x00, 0x26, 0xfc, 0xa6, 0x57, 0x23, 0x6b, 0xed, 0x46, 0x83, 0xdf, 0x01, - 0x91, 0xef, 0xc9, 0xe6, 0x5a, 0xf0, 0x96, 0xc3, 0x8a, 0xd7, 0xc8, 0x3e, 0xdb, 0xad, 0xee, 0xba, - 0x5c, 0xcb, 0x50, 0xc2, 0x1d, 0xb4, 0xe9, 0x84, 0x65, 0x09, 0xea, 0x48, 0x42, 0x47, 0x9b, 0x05, - 0x8e, 0x0c, 0xf1, 0x09, 0x7b, 0x35, 0x05, 0x63, 0x1d, 0x07, 0x2d, 0xc1, 0x70, 0x35, 0x88, 0xc5, - 0x75, 0xd4, 0x53, 0x4c, 0x98, 0xbd, 0x93, 0x8a, 0xc0, 0xf9, 0xeb, 0x65, 0x75, 0x11, 0xf5, 0xc1, - 0x9c, 0x8c, 0x8b, 0xaa, 0x1c, 0xa7, 0xf5, 0xd1, 0x0a, 0x23, 0x26, 0x1e, 0xdb, 0xe2, 0xf1, 0x1c, - 0x0f, 0x77, 0x71, 0x18, 0xcd, 0x5f, 0x97, 0xcf, 0x85, 0x8d, 0x09, 0x76, 0xe2, 0xd5, 0xac, 0x94, - 0x82, 0xf6, 0xae, 0xef, 0xe9, 0x03, 0xdf, 0xf5, 0x65, 0xa9, 0x56, 0x93, 0x86, 0x72, 0x0f, 0x5f, - 0xb4, 0x96, 0x6a, 0x35, 0x8d, 0x94, 0x14, 0xa9, 0x56, 0x53, 0x00, 0xd6, 0x59, 0xa2, 0xd5, 0x6e, - 0x6e, 0xf2, 0x33, 0x4c, 0x68, 0x1c, 0xde, 0xe9, 0xad, 0xfb, 0x4b, 0xcf, 0x1e, 0xe8, 0x2f, 0xed, - 0xf0, 0xef, 0x9e, 0x3b, 0x84, 0x7f, 0xb7, 0xce, 0x92, 0x60, 0x2e, 0xce, 0x09, 0x97, 0xba, 0x85, - 0xf3, 0x1d, 0x4b, 0xbb, 0xc1, 0x23, 0x4f, 0xd9, 0xbf, 0x98, 0x33, 0xe8, 0x1a, 0x50, 0x7d, 0xe1, - 0xc8, 0x01, 0xd5, 0x54, 0x3c, 0xa7, 0x70, 0x96, 0x4d, 0xb5, 0x28, 0xc4, 0x73, 0x0a, 0xc6, 0x3a, - 0x4e, 0xd6, 0x5b, 0x7a, 0xff, 0xb1, 0x79, 0x4b, 0x27, 0x4f, 0xc0, 0x5b, 0xfa, 0x40, 0xcf, 0xde, - 0xd2, 0x5b, 0x70, 0xa6, 0x15, 0x56, 0xe7, 0xfd, 0x38, 0x6a, 0xb3, 0x4b, 0x71, 0xb3, 0xed, 0x6a, - 0x8d, 0x24, 0xcc, 0xdd, 0x3a, 0x72, 0xf9, 0x9d, 0x7a, 0x23, 0x5b, 0x6c, 0x21, 0xcb, 0x35, 0x9a, - 0xa9, 0xc0, 0x4c, 0x27, 0x2c, 0xea, 0x36, 0xa7, 0x10, 0xe7, 0xb1, 0xd0, 0xfd, 0xb4, 0x0f, 0x9f, - 0x8c, 0x9f, 0xf6, 0x03, 0x30, 0x14, 0xd7, 0xdb, 0x49, 0x35, 0xdc, 0x09, 0x98, 0x33, 0x7e, 0x78, - 0xf6, 0x1d, 0xca, 0x94, 0x2d, 0xe0, 0xb7, 0xf7, 0xa6, 0x26, 0xe4, 0xff, 0x9a, 0x15, 0x5b, 0x40, - 0xd0, 0x37, 0xba, 0xdc, 0xdf, 0x71, 0x8f, 0xf3, 0xfe, 0xce, 0x85, 0x43, 0xdd, 0xdd, 0xc9, 0x73, - 0x46, 0x3f, 0xf2, 0x33, 0xe7, 0x8c, 0xfe, 0x9a, 0x03, 0x63, 0xdb, 0xba, 0xcb, 0x40, 0x38, 0xcc, - 0x2d, 0x04, 0xee, 0x18, 0x9e, 0x88, 0x59, 0x97, 0xca, 0x39, 0x03, 0x74, 0x3b, 0x0b, 0xc0, 0x66, - 0x4b, 0x72, 0x82, 0x8a, 0x1e, 0xbd, 0x57, 0x41, 0x45, 0x6f, 0x30, 0x39, 0x26, 0x0f, 0xb9, 0xcc, - 0x8b, 0x6e, 0x37, 0xa6, 0x58, 0xca, 0x44, 0x15, 0x52, 0xac, 0xf3, 0x43, 0x9f, 0x77, 0x60, 0x42, - 0x9e, 0xcb, 0x84, 0xcb, 0x2f, 0x16, 0x51, 0x91, 0x36, 0x8f, 0x83, 0x2c, 0xac, 0x7e, 0x3d, 0xc3, - 0x07, 0x77, 0x70, 0xa6, 0x52, 0x5d, 0x05, 0xa1, 0xd5, 0x62, 0x16, 0xfc, 0x2b, 0x74, 0x98, 0x99, - 0x14, 0x8c, 0x75, 0x1c, 0xf4, 0x4d, 0xf5, 0x58, 0xff, 0x13, 0x4c, 0xa0, 0xbf, 0x60, 0x59, 0x37, - 0xb5, 0xf1, 0x62, 0x3f, 0xfa, 0x92, 0x03, 0x13, 0x3b, 0x19, 0x83, 0x86, 0x08, 0x0b, 0xc5, 0xf6, - 0x4d, 0x25, 0x7c, 0xb8, 0xb3, 0x50, 0xdc, 0xd1, 0x02, 0xf4, 0x39, 0xd3, 0xd0, 0xc9, 0xe3, 0x47, - 0x2d, 0x0e, 0x60, 0xc6, 0xb0, 0xca, 0xaf, 0xb9, 0xe5, 0x5b, 0x3c, 0xef, 0x3a, 0x3e, 0x64, 0x92, - 0x76, 0x26, 0xfd, 0x58, 0x39, 0x55, 0x89, 0x69, 0x6f, 0xb1, 0xb0, 0xd8, 0x8d, 0xcf, 0xaf, 0x9b, - 0x5b, 0xbe, 0x74, 0x1e, 0xc6, 0x4d, 0xdf, 0x1e, 0x7a, 0x97, 0xf9, 0x0e, 0xc4, 0xc5, 0x6c, 0x4a, - 0xfd, 0x31, 0x89, 0x6f, 0xa4, 0xd5, 0x37, 0xf2, 0xde, 0x17, 0x8e, 0x35, 0xef, 0x7d, 0xdf, 0xc9, - 0xe4, 0xbd, 0x9f, 0x38, 0x8e, 0xbc, 0xf7, 0xa7, 0x0f, 0x95, 0xf7, 0x5e, 0x7b, 0x77, 0xa0, 0xff, - 0x0e, 0xef, 0x0e, 0xcc, 0xc0, 0x29, 0x79, 0xf7, 0x87, 0x88, 0xd4, 0xe2, 0xdc, 0xed, 0x7f, 0x41, - 0x54, 0x39, 0x35, 0x67, 0x16, 0xe3, 0x2c, 0x3e, 0x5d, 0x64, 0xc5, 0x80, 0xd5, 0x1c, 0xb0, 0xf5, - 0x28, 0x91, 0x39, 0xb5, 0xd8, 0xf1, 0x59, 0x88, 0x28, 0x19, 0xed, 0x5c, 0x64, 0xb0, 0xdb, 0xf2, - 0x1f, 0xcc, 0x5b, 0x80, 0x5e, 0x82, 0x52, 0xb8, 0xb9, 0xd9, 0x08, 0xbd, 0x6a, 0x9a, 0x9c, 0x5f, - 0xc6, 0x25, 0xf0, 0xbb, 0x9b, 0x2a, 0x97, 0xeb, 0x6a, 0x17, 0x3c, 0xdc, 0x95, 0x02, 0x7a, 0x8b, - 0x2a, 0x26, 0x49, 0x18, 0x91, 0x6a, 0x6a, 0xab, 0x19, 0x66, 0x7d, 0x26, 0xd6, 0xfb, 0x5c, 0x36, - 0xf9, 0xf0, 0xde, 0xab, 0x8f, 0x92, 0x29, 0xc5, 0xd9, 0x66, 0xa1, 0x08, 0xce, 0xb7, 0xf2, 0x4c, - 0x45, 0xb1, 0xb8, 0xb1, 0x74, 0x90, 0xc1, 0x4a, 0x2e, 0xdd, 0xf3, 0xb9, 0xc6, 0xa6, 0x18, 0x77, - 0xa1, 0xac, 0x27, 0xd0, 0x1f, 0x3a, 0x99, 0x04, 0xfa, 0x1f, 0x03, 0xa8, 0xc8, 0x54, 0x5e, 0xd2, - 0xf8, 0xb0, 0x64, 0xe5, 0x2a, 0x0d, 0xa7, 0xa9, 0xbd, 0x59, 0xaa, 0xd8, 0x60, 0x8d, 0x25, 0xfa, - 0x3f, 0xb9, 0x2f, 0x4c, 0x70, 0x0b, 0x4b, 0xcd, 0xfa, 0x9c, 0xf8, 0x99, 0x7b, 0x65, 0xe2, 0x9f, - 0x3a, 0x30, 0xc9, 0x67, 0x5e, 0x56, 0xb9, 0xa7, 0xaa, 0x85, 0xb8, 0xdb, 0x63, 0x3b, 0x74, 0x85, - 0x45, 0xf1, 0x95, 0x0d, 0xae, 0xcc, 0xd1, 0x7d, 0x40, 0x4b, 0xd0, 0x57, 0x72, 0x8e, 0x14, 0xa7, - 0x6c, 0xd9, 0x2c, 0xf3, 0xdf, 0x09, 0x38, 0xb3, 0xdf, 0xcb, 0x29, 0xe2, 0x9f, 0x77, 0x35, 0xa9, - 0x22, 0xd6, 0xbc, 0x5f, 0x3e, 0x26, 0x93, 0xaa, 0xfe, 0x98, 0xc1, 0xa1, 0x0c, 0xab, 0x5f, 0x70, - 0x60, 0xc2, 0xcb, 0x84, 0x9a, 0x30, 0x3b, 0x90, 0x15, 0x9b, 0xd4, 0x4c, 0x94, 0xc6, 0xaf, 0x30, - 0x25, 0x2f, 0x1b, 0xd5, 0x82, 0x3b, 0x98, 0xa3, 0x1f, 0x3a, 0xf0, 0x40, 0xe2, 0xc5, 0x5b, 0x3c, - 0x55, 0x70, 0x9c, 0xde, 0xd5, 0x15, 0x8d, 0x3b, 0xcb, 0x56, 0xe3, 0x2b, 0xd6, 0x57, 0xe3, 0x7a, - 0x77, 0x9e, 0x7c, 0x5d, 0x3e, 0x22, 0xd6, 0xe5, 0x03, 0x07, 0x60, 0xe2, 0x83, 0x9a, 0x3e, 0xf9, - 0x69, 0x87, 0x3f, 0x29, 0xd5, 0x55, 0xe5, 0xdb, 0x30, 0x55, 0xbe, 0x65, 0x9b, 0x8f, 0xda, 0xe8, - 0xba, 0xe7, 0xaf, 0x39, 0x70, 0x36, 0x6f, 0x47, 0xca, 0x69, 0xd2, 0x47, 0xcc, 0x26, 0x59, 0x3c, - 0x65, 0xe9, 0x0d, 0xb2, 0xf2, 0xa6, 0xc6, 0xe4, 0x75, 0x78, 0xf8, 0x4e, 0x5f, 0xf1, 0x4e, 0xf4, - 0x86, 0x74, 0xb5, 0xf8, 0x4f, 0x87, 0x35, 0x2f, 0x64, 0x42, 0x5a, 0xd6, 0x63, 0xb8, 0x03, 0x18, - 0xf0, 0x83, 0x86, 0x1f, 0x10, 0x71, 0x5f, 0xd3, 0xe6, 0x19, 0x56, 0xbc, 0x89, 0x43, 0xa9, 0x63, - 0xc1, 0xe5, 0x1e, 0x3b, 0x25, 0xb3, 0xaf, 0x8c, 0xf5, 0x9f, 0xfc, 0x2b, 0x63, 0x3b, 0x30, 0xbc, - 0xe3, 0x27, 0x75, 0x16, 0x4c, 0x21, 0x7c, 0x7d, 0x16, 0xee, 0x39, 0x52, 0x72, 0x69, 0xdf, 0x6f, - 0x4a, 0x06, 0x38, 0xe5, 0x85, 0x2e, 0x71, 0xc6, 0x2c, 0x72, 0x3b, 0x1b, 0x52, 0x7b, 0x53, 0x16, - 0xe0, 0x14, 0x87, 0x0e, 0xd6, 0x28, 0xfd, 0x25, 0xf3, 0x19, 0x89, 0xb4, 0xbb, 0x36, 0xd2, 0x29, - 0x0a, 0x8a, 0xfc, 0x36, 0xf1, 0x4d, 0x8d, 0x07, 0x36, 0x38, 0xaa, 0xcc, 0xc7, 0x43, 0x5d, 0x33, - 0x1f, 0xbf, 0xce, 0x14, 0xb6, 0xc4, 0x0f, 0xda, 0x64, 0x35, 0x10, 0xf1, 0xde, 0xcb, 0x76, 0xee, - 0x3e, 0x73, 0x9a, 0xfc, 0x08, 0x9e, 0xfe, 0xc6, 0x1a, 0x3f, 0xcd, 0xe5, 0x32, 0x72, 0xa0, 0xcb, - 0x25, 0x35, 0xb9, 0x8c, 0x5a, 0x37, 0xb9, 0x24, 0xa4, 0x65, 0xc5, 0xe4, 0xf2, 0x33, 0x65, 0x0e, - 0xf8, 0x0b, 0x07, 0x90, 0xd2, 0xbb, 0x94, 0x40, 0x3d, 0x81, 0xa0, 0xca, 0x8f, 0x3b, 0x00, 0x81, - 0x7a, 0x8b, 0xd2, 0xee, 0x2e, 0xc8, 0x69, 0xa6, 0x0d, 0x48, 0x61, 0x58, 0xe3, 0xe9, 0xfe, 0x0f, - 0x27, 0x8d, 0x5d, 0x4e, 0xfb, 0x7e, 0x02, 0x41, 0x64, 0xbb, 0x66, 0x10, 0xd9, 0xba, 0x45, 0xd3, - 0xbd, 0xea, 0x46, 0x97, 0x70, 0xb2, 0x9f, 0x14, 0xe0, 0x94, 0x8e, 0x5c, 0x26, 0x27, 0xf1, 0xb1, - 0x77, 0x8c, 0x08, 0xda, 0x1b, 0x76, 0xfb, 0x5b, 0x16, 0x1e, 0xa0, 0xbc, 0x68, 0xed, 0x8f, 0x65, - 0xa2, 0xb5, 0x6f, 0xda, 0x67, 0x7d, 0x70, 0xc8, 0xf6, 0x7f, 0x75, 0xe0, 0x4c, 0xa6, 0xc6, 0x09, - 0x4c, 0xb0, 0x6d, 0x73, 0x82, 0x3d, 0x67, 0xbd, 0xd7, 0x5d, 0x66, 0xd7, 0xb7, 0x0a, 0x1d, 0xbd, - 0x65, 0x87, 0xb8, 0x4f, 0x39, 0x50, 0xa4, 0xda, 0xb2, 0x8c, 0xe7, 0xfa, 0xc8, 0xb1, 0xcc, 0x00, - 0xa6, 0xd7, 0x0b, 0xe9, 0xac, 0xda, 0xc7, 0x60, 0x98, 0x73, 0x9f, 0xfc, 0xa4, 0x03, 0x90, 0x22, - 0xdd, 0x2b, 0x15, 0xd8, 0xfd, 0x4e, 0x01, 0xce, 0xe5, 0x4e, 0x23, 0xf4, 0x19, 0x65, 0x91, 0x73, - 0x6c, 0x47, 0x2b, 0x1a, 0x8c, 0x74, 0xc3, 0xdc, 0x98, 0x61, 0x98, 0x13, 0xf6, 0xb8, 0x7b, 0x75, - 0x80, 0x11, 0x62, 0x5a, 0x1b, 0xac, 0x1f, 0x3b, 0x69, 0x00, 0xac, 0xca, 0x6b, 0xf4, 0x97, 0xf0, - 0x12, 0x8f, 0xfb, 0x13, 0xed, 0x86, 0x83, 0xec, 0xe8, 0x09, 0xc8, 0x8a, 0x1d, 0x53, 0x56, 0x60, - 0xfb, 0x7e, 0xe4, 0x2e, 0xc2, 0xe2, 0x15, 0xc8, 0x73, 0x2c, 0xf7, 0x96, 0x14, 0xd1, 0xb8, 0x0e, - 0x5b, 0xe8, 0xf9, 0x3a, 0xec, 0x18, 0x8c, 0xbc, 0xe8, 0xb7, 0x94, 0x0f, 0x74, 0xfa, 0xbb, 0x3f, - 0xba, 0x78, 0xdf, 0xf7, 0x7e, 0x74, 0xf1, 0xbe, 0x1f, 0xfe, 0xe8, 0xe2, 0x7d, 0x1f, 0xdf, 0xbf, - 0xe8, 0x7c, 0x77, 0xff, 0xa2, 0xf3, 0xbd, 0xfd, 0x8b, 0xce, 0x0f, 0xf7, 0x2f, 0x3a, 0xff, 0x61, - 0xff, 0xa2, 0xf3, 0xf7, 0xfe, 0xe3, 0xc5, 0xfb, 0x5e, 0x1c, 0x92, 0x1d, 0xfb, 0xff, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x03, 0xd7, 0x47, 0xef, 0x2e, 0xd5, 0x00, 0x00, -} - -func (m *Amount) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Amount) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Amount) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ArchiveStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArchiveStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArchiveStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Zip != nil { - { - size, err := m.Zip.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.None != nil { - { - size, err := m.None.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Tar != nil { - { - size, err := m.Tar.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Arguments) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Arguments) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Arguments) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Artifacts) > 0 { - for iNdEx := len(m.Artifacts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Artifacts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Parameters) > 0 { - for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ArtGCStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtGCStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtGCStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.NotSpecified { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - if len(m.PodsRecouped) > 0 { - keysForPodsRecouped := make([]string, 0, len(m.PodsRecouped)) - for k := range m.PodsRecouped { - keysForPodsRecouped = append(keysForPodsRecouped, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPodsRecouped) - for iNdEx := len(keysForPodsRecouped) - 1; iNdEx >= 0; iNdEx-- { - v := m.PodsRecouped[string(keysForPodsRecouped[iNdEx])] - baseI := i - i-- - if v { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(keysForPodsRecouped[iNdEx]) - copy(dAtA[i:], keysForPodsRecouped[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodsRecouped[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.StrategiesProcessed) > 0 { - keysForStrategiesProcessed := make([]string, 0, len(m.StrategiesProcessed)) - for k := range m.StrategiesProcessed { - keysForStrategiesProcessed = append(keysForStrategiesProcessed, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForStrategiesProcessed) - for iNdEx := len(keysForStrategiesProcessed) - 1; iNdEx >= 0; iNdEx-- { - v := m.StrategiesProcessed[ArtifactGCStrategy(keysForStrategiesProcessed[iNdEx])] - baseI := i - i-- - if v { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(keysForStrategiesProcessed[iNdEx]) - copy(dAtA[i:], keysForStrategiesProcessed[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStrategiesProcessed[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Artifact) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Artifact) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Artifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.Deleted { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x68 - if m.ArtifactGC != nil { - { - size, err := m.ArtifactGC.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } - i -= len(m.FromExpression) - copy(dAtA[i:], m.FromExpression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FromExpression))) - i-- - dAtA[i] = 0x5a - i-- - if m.RecurseMode { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - i -= len(m.SubPath) - copy(dAtA[i:], m.SubPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) - i-- - dAtA[i] = 0x4a - i-- - if m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - if m.Archive != nil { - { - size, err := m.Archive.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - i -= len(m.GlobalName) - copy(dAtA[i:], m.GlobalName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GlobalName))) - i-- - dAtA[i] = 0x32 - { - size, err := m.ArtifactLocation.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - i -= len(m.From) - copy(dAtA[i:], m.From) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.From))) - i-- - dAtA[i] = 0x22 - if m.Mode != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) - i-- - dAtA[i] = 0x18 - } - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ArtifactGC) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactGC) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactGC) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.ServiceAccountName) - copy(dAtA[i:], m.ServiceAccountName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i-- - dAtA[i] = 0x1a - if m.PodMetadata != nil { - { - size, err := m.PodMetadata.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Strategy) - copy(dAtA[i:], m.Strategy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ArtifactGCSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactGCSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactGCSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ArtifactsByNode) > 0 { - keysForArtifactsByNode := make([]string, 0, len(m.ArtifactsByNode)) - for k := range m.ArtifactsByNode { - keysForArtifactsByNode = append(keysForArtifactsByNode, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactsByNode) - for iNdEx := len(keysForArtifactsByNode) - 1; iNdEx >= 0; iNdEx-- { - v := m.ArtifactsByNode[string(keysForArtifactsByNode[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForArtifactsByNode[iNdEx]) - copy(dAtA[i:], keysForArtifactsByNode[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForArtifactsByNode[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ArtifactGCStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactGCStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactGCStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ArtifactResultsByNode) > 0 { - keysForArtifactResultsByNode := make([]string, 0, len(m.ArtifactResultsByNode)) - for k := range m.ArtifactResultsByNode { - keysForArtifactResultsByNode = append(keysForArtifactResultsByNode, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactResultsByNode) - for iNdEx := len(keysForArtifactResultsByNode) - 1; iNdEx >= 0; iNdEx-- { - v := m.ArtifactResultsByNode[string(keysForArtifactResultsByNode[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForArtifactResultsByNode[iNdEx]) - copy(dAtA[i:], keysForArtifactResultsByNode[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForArtifactResultsByNode[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ArtifactLocation) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactLocation) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactLocation) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Azure != nil { - { - size, err := m.Azure.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if m.GCS != nil { - { - size, err := m.GCS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.OSS != nil { - { - size, err := m.OSS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.Raw != nil { - { - size, err := m.Raw.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.HDFS != nil { - { - size, err := m.HDFS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Artifactory != nil { - { - size, err := m.Artifactory.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.HTTP != nil { - { - size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Git != nil { - { - size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.S3 != nil { - { - size, err := m.S3.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.ArchiveLogs != nil { - i-- - if *m.ArchiveLogs { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ArtifactNodeSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactNodeSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactNodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Artifacts) > 0 { - keysForArtifacts := make([]string, 0, len(m.Artifacts)) - for k := range m.Artifacts { - keysForArtifacts = append(keysForArtifacts, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForArtifacts) - for iNdEx := len(keysForArtifacts) - 1; iNdEx >= 0; iNdEx-- { - v := m.Artifacts[string(keysForArtifacts[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForArtifacts[iNdEx]) - copy(dAtA[i:], keysForArtifacts[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForArtifacts[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if m.ArchiveLocation != nil { - { - size, err := m.ArchiveLocation.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ArtifactPaths) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactPaths) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Artifact.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ArtifactRepository) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactRepository) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Azure != nil { - { - size, err := m.Azure.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.GCS != nil { - { - size, err := m.GCS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.OSS != nil { - { - size, err := m.OSS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.HDFS != nil { - { - size, err := m.HDFS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Artifactory != nil { - { - size, err := m.Artifactory.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.S3 != nil { - { - size, err := m.S3.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.ArchiveLogs != nil { - i-- - if *m.ArchiveLogs { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ArtifactRepositoryRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactRepositoryRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactRepositoryRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - i -= len(m.ConfigMap) - copy(dAtA[i:], m.ConfigMap) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConfigMap))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ArtifactRepositoryRefStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactRepositoryRefStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactRepositoryRefStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ArtifactRepository != nil { - { - size, err := m.ArtifactRepository.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - i-- - if m.Default { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ArtifactRepositoryRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ArtifactResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Error != nil { - i -= len(*m.Error) - copy(dAtA[i:], *m.Error) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error))) - i-- - dAtA[i] = 0x1a - } - i-- - if m.Success { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ArtifactResultNodeStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactResultNodeStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactResultNodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ArtifactResults) > 0 { - keysForArtifactResults := make([]string, 0, len(m.ArtifactResults)) - for k := range m.ArtifactResults { - keysForArtifactResults = append(keysForArtifactResults, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactResults) - for iNdEx := len(keysForArtifactResults) - 1; iNdEx >= 0; iNdEx-- { - v := m.ArtifactResults[string(keysForArtifactResults[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForArtifactResults[iNdEx]) - copy(dAtA[i:], keysForArtifactResults[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForArtifactResults[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ArtifactSearchQuery) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactSearchQuery) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactSearchQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.NodeTypes) > 0 { - keysForNodeTypes := make([]string, 0, len(m.NodeTypes)) - for k := range m.NodeTypes { - keysForNodeTypes = append(keysForNodeTypes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeTypes) - for iNdEx := len(keysForNodeTypes) - 1; iNdEx >= 0; iNdEx-- { - v := m.NodeTypes[NodeType(keysForNodeTypes[iNdEx])] - baseI := i - i-- - if v { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(keysForNodeTypes[iNdEx]) - copy(dAtA[i:], keysForNodeTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeTypes[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 - } - } - if m.Deleted != nil { - i-- - if *m.Deleted { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - i -= len(m.NodeId) - copy(dAtA[i:], m.NodeId) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeId))) - i-- - dAtA[i] = 0x22 - i -= len(m.TemplateName) - copy(dAtA[i:], m.TemplateName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TemplateName))) - i-- - dAtA[i] = 0x1a - i -= len(m.ArtifactName) - copy(dAtA[i:], m.ArtifactName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ArtifactName))) - i-- - dAtA[i] = 0x12 - if len(m.ArtifactGCStrategies) > 0 { - keysForArtifactGCStrategies := make([]string, 0, len(m.ArtifactGCStrategies)) - for k := range m.ArtifactGCStrategies { - keysForArtifactGCStrategies = append(keysForArtifactGCStrategies, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactGCStrategies) - for iNdEx := len(keysForArtifactGCStrategies) - 1; iNdEx >= 0; iNdEx-- { - v := m.ArtifactGCStrategies[ArtifactGCStrategy(keysForArtifactGCStrategies[iNdEx])] - baseI := i - i-- - if v { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(keysForArtifactGCStrategies[iNdEx]) - copy(dAtA[i:], keysForArtifactGCStrategies[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForArtifactGCStrategies[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ArtifactSearchResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactSearchResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactSearchResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.NodeID) - copy(dAtA[i:], m.NodeID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) - i-- - dAtA[i] = 0x12 - { - size, err := m.Artifact.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ArtifactoryArtifact) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactoryArtifact) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactoryArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ArtifactoryAuth.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ArtifactoryArtifactRepository) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactoryArtifactRepository) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactoryArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.KeyFormat) - copy(dAtA[i:], m.KeyFormat) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyFormat))) - i-- - dAtA[i] = 0x1a - i -= len(m.RepoURL) - copy(dAtA[i:], m.RepoURL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RepoURL))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ArtifactoryAuth.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ArtifactoryAuth) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ArtifactoryAuth) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ArtifactoryAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.PasswordSecret != nil { - { - size, err := m.PasswordSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.UsernameSecret != nil { - { - size, err := m.UsernameSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AzureArtifact) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AzureArtifact) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AzureArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Blob) - copy(dAtA[i:], m.Blob) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Blob))) - i-- - dAtA[i] = 0x12 - { - size, err := m.AzureBlobContainer.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AzureArtifactRepository) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AzureArtifactRepository) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AzureArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.BlobNameFormat) - copy(dAtA[i:], m.BlobNameFormat) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.BlobNameFormat))) - i-- - dAtA[i] = 0x12 - { - size, err := m.AzureBlobContainer.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AzureBlobContainer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AzureBlobContainer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AzureBlobContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.UseSDKCreds { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - if m.AccountKeySecret != nil { - { - size, err := m.AccountKeySecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.Container) - copy(dAtA[i:], m.Container) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i-- - dAtA[i] = 0x12 - i -= len(m.Endpoint) - copy(dAtA[i:], m.Endpoint) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Endpoint))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Backoff) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Backoff) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Backoff) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.MaxDuration) - copy(dAtA[i:], m.MaxDuration) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MaxDuration))) - i-- - dAtA[i] = 0x1a - if m.Factor != nil { - { - size, err := m.Factor.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Duration) - copy(dAtA[i:], m.Duration) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Duration))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *BasicAuth) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BasicAuth) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BasicAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.PasswordSecret != nil { - { - size, err := m.PasswordSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.UsernameSecret != nil { - { - size, err := m.UsernameSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Cache) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Cache) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Cache) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ConfigMap != nil { - { - size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ClientCertAuth) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClientCertAuth) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClientCertAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ClientKeySecret != nil { - { - size, err := m.ClientKeySecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.ClientCertSecret != nil { - { - size, err := m.ClientCertSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ClusterWorkflowTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterWorkflowTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterWorkflowTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ClusterWorkflowTemplateList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterWorkflowTemplateList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterWorkflowTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Column) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Column) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Column) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x1a - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Condition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Condition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Condition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ContainerNode) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerNode) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Dependencies) > 0 { - for iNdEx := len(m.Dependencies) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Dependencies[iNdEx]) - copy(dAtA[i:], m.Dependencies[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Dependencies[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ContainerSetRetryStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerSetRetryStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerSetRetryStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Retries != nil { - { - size, err := m.Retries.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Duration) - copy(dAtA[i:], m.Duration) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Duration))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ContainerSetTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerSetTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerSetTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.RetryStrategy != nil { - { - size, err := m.RetryStrategy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.Containers) > 0 { - for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.VolumeMounts) > 0 { - for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - return len(dAtA) - i, nil -} - -func (m *ContinueOn) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContinueOn) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContinueOn) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.Failed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i-- - if m.Error { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *Counter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Counter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CreateS3BucketOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateS3BucketOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateS3BucketOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ObjectLocking { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - return len(dAtA) - i, nil -} - -func (m *CronWorkflow) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CronWorkflow) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CronWorkflow) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CronWorkflowList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CronWorkflowList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CronWorkflowList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CronWorkflowSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CronWorkflowSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CronWorkflowSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.WorkflowMetadata != nil { - { - size, err := m.WorkflowMetadata.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - i -= len(m.Timezone) - copy(dAtA[i:], m.Timezone) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Timezone))) - i-- - dAtA[i] = 0x42 - if m.FailedJobsHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) - i-- - dAtA[i] = 0x38 - } - if m.SuccessfulJobsHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) - i-- - dAtA[i] = 0x30 - } - if m.StartingDeadlineSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) - i-- - dAtA[i] = 0x28 - } - i-- - if m.Suspend { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - i -= len(m.ConcurrencyPolicy) - copy(dAtA[i:], m.ConcurrencyPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i-- - dAtA[i] = 0x1a - i -= len(m.Schedule) - copy(dAtA[i:], m.Schedule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i-- - dAtA[i] = 0x12 - { - size, err := m.WorkflowSpec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CronWorkflowStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CronWorkflowStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CronWorkflowStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.LastScheduledTime != nil { - { - size, err := m.LastScheduledTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Active) > 0 { - for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DAGTask) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DAGTask) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DAGTask) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Inline != nil { - { - size, err := m.Inline.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - } - if len(m.Hooks) > 0 { - keysForHooks := make([]string, 0, len(m.Hooks)) - for k := range m.Hooks { - keysForHooks = append(keysForHooks, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHooks) - for iNdEx := len(keysForHooks) - 1; iNdEx >= 0; iNdEx-- { - v := m.Hooks[LifecycleEvent(keysForHooks[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForHooks[iNdEx]) - copy(dAtA[i:], keysForHooks[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHooks[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x6a - } - } - i -= len(m.Depends) - copy(dAtA[i:], m.Depends) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Depends))) - i-- - dAtA[i] = 0x62 - i -= len(m.OnExit) - copy(dAtA[i:], m.OnExit) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OnExit))) - i-- - dAtA[i] = 0x5a - if m.ContinueOn != nil { - { - size, err := m.ContinueOn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - i -= len(m.When) - copy(dAtA[i:], m.When) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.When))) - i-- - dAtA[i] = 0x4a - if m.WithSequence != nil { - { - size, err := m.WithSequence.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - i -= len(m.WithParam) - copy(dAtA[i:], m.WithParam) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.WithParam))) - i-- - dAtA[i] = 0x3a - if len(m.WithItems) > 0 { - for iNdEx := len(m.WithItems) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.WithItems[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.Dependencies) > 0 { - for iNdEx := len(m.Dependencies) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Dependencies[iNdEx]) - copy(dAtA[i:], m.Dependencies[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Dependencies[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if m.TemplateRef != nil { - { - size, err := m.TemplateRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - { - size, err := m.Arguments.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Template) - copy(dAtA[i:], m.Template) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Template))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *DAGTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DAGTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DAGTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.FailFast != nil { - i-- - if *m.FailFast { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.Tasks) > 0 { - for iNdEx := len(m.Tasks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tasks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Target) - copy(dAtA[i:], m.Target) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Target))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Data) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Data) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Transformation) > 0 { - for iNdEx := len(m.Transformation) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Transformation[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *DataSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DataSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DataSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ArtifactPaths != nil { - { - size, err := m.ArtifactPaths.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Selector) - copy(dAtA[i:], m.Selector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ExecutorConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecutorConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecutorConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.ServiceAccountName) - copy(dAtA[i:], m.ServiceAccountName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GCSArtifact) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GCSArtifact) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GCSArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - { - size, err := m.GCSBucket.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GCSArtifactRepository) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GCSArtifactRepository) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GCSArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.KeyFormat) - copy(dAtA[i:], m.KeyFormat) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyFormat))) - i-- - dAtA[i] = 0x12 - { - size, err := m.GCSBucket.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GCSBucket) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GCSBucket) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GCSBucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ServiceAccountKeySecret != nil { - { - size, err := m.ServiceAccountKeySecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Bucket) - copy(dAtA[i:], m.Bucket) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Bucket))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Gauge) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Gauge) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Operation) - copy(dAtA[i:], m.Operation) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i-- - dAtA[i] = 0x1a - if m.Realtime != nil { - i-- - if *m.Realtime { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GitArtifact) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GitArtifact) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GitArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Branch) - copy(dAtA[i:], m.Branch) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Branch))) - i-- - dAtA[i] = 0x5a - i-- - if m.SingleBranch { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - i-- - if m.DisableSubmodules { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - i-- - if m.InsecureIgnoreHostKey { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - if m.SSHPrivateKeySecret != nil { - { - size, err := m.SSHPrivateKeySecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.PasswordSecret != nil { - { - size, err := m.PasswordSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.UsernameSecret != nil { - { - size, err := m.UsernameSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.Fetch) > 0 { - for iNdEx := len(m.Fetch) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Fetch[iNdEx]) - copy(dAtA[i:], m.Fetch[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Fetch[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if m.Depth != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Depth)) - i-- - dAtA[i] = 0x18 - } - i -= len(m.Revision) - copy(dAtA[i:], m.Revision) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) - i-- - dAtA[i] = 0x12 - i -= len(m.Repo) - copy(dAtA[i:], m.Repo) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repo))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HDFSArtifact) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HDFSArtifact) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HDFSArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - { - size, err := m.HDFSConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HDFSArtifactRepository) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HDFSArtifactRepository) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HDFSArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i -= len(m.PathFormat) - copy(dAtA[i:], m.PathFormat) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathFormat))) - i-- - dAtA[i] = 0x12 - { - size, err := m.HDFSConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HDFSConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HDFSConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HDFSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.HDFSUser) - copy(dAtA[i:], m.HDFSUser) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HDFSUser))) - i-- - dAtA[i] = 0x1a - if len(m.Addresses) > 0 { - for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Addresses[iNdEx]) - copy(dAtA[i:], m.Addresses[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.HDFSKrbConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HDFSKrbConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HDFSKrbConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HDFSKrbConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.KrbServicePrincipalName) - copy(dAtA[i:], m.KrbServicePrincipalName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbServicePrincipalName))) - i-- - dAtA[i] = 0x32 - if m.KrbConfigConfigMap != nil { - { - size, err := m.KrbConfigConfigMap.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - i -= len(m.KrbRealm) - copy(dAtA[i:], m.KrbRealm) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbRealm))) - i-- - dAtA[i] = 0x22 - i -= len(m.KrbUsername) - copy(dAtA[i:], m.KrbUsername) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbUsername))) - i-- - dAtA[i] = 0x1a - if m.KrbKeytabSecret != nil { - { - size, err := m.KrbKeytabSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.KrbCCacheSecret != nil { - { - size, err := m.KrbCCacheSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *HTTP) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTP) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HTTP) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.BodyFrom != nil { - { - size, err := m.BodyFrom.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - i-- - if m.InsecureSkipVerify { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - i -= len(m.SuccessCondition) - copy(dAtA[i:], m.SuccessCondition) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SuccessCondition))) - i-- - dAtA[i] = 0x32 - i -= len(m.Body) - copy(dAtA[i:], m.Body) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Body))) - i-- - dAtA[i] = 0x2a - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x20 - } - if len(m.Headers) > 0 { - for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0x12 - i -= len(m.Method) - copy(dAtA[i:], m.Method) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Method))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HTTPArtifact) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPArtifact) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HTTPArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Auth != nil { - { - size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Headers) > 0 { - for iNdEx := len(m.Headers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Headers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HTTPAuth) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPAuth) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HTTPAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.BasicAuth.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.OAuth2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ClientCert.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HTTPBodySource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPBodySource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HTTPBodySource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Bytes != nil { - i -= len(m.Bytes) - copy(dAtA[i:], m.Bytes) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Bytes))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ValueFrom != nil { - { - size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HTTPHeaderSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPHeaderSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HTTPHeaderSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SecretKeyRef != nil { - { - size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Header) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Header) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Histogram) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Buckets) > 0 { - for iNdEx := len(m.Buckets) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Buckets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x1a - return len(dAtA) - i, nil -} - -func (m *Inputs) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Inputs) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Inputs) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Artifacts) > 0 { - for iNdEx := len(m.Artifacts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Artifacts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Parameters) > 0 { - for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Item) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Item) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Item) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Value != nil { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LabelKeys) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelKeys) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelKeys) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Items[iNdEx]) - copy(dAtA[i:], m.Items[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Items[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LabelValueFrom) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelValueFrom) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelValueFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *LabelValues) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelValues) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelValues) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Items[iNdEx]) - copy(dAtA[i:], m.Items[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Items[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LifecycleHook) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LifecycleHook) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LifecycleHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) - i-- - dAtA[i] = 0x22 - if m.TemplateRef != nil { - { - size, err := m.TemplateRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - { - size, err := m.Arguments.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Template) - copy(dAtA[i:], m.Template) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Template))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Link) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Link) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0x1a - i -= len(m.Scope) - copy(dAtA[i:], m.Scope) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ManifestFrom) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ManifestFrom) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ManifestFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Artifact != nil { - { - size, err := m.Artifact.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MemoizationStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoizationStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemoizationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.CacheName) - copy(dAtA[i:], m.CacheName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CacheName))) - i-- - dAtA[i] = 0x1a - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - i-- - if m.Hit { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *Memoize) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Memoize) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Memoize) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.MaxAge) - copy(dAtA[i:], m.MaxAge) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MaxAge))) - i-- - dAtA[i] = 0x1a - if m.Cache != nil { - { - size, err := m.Cache.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Metadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Labels) > 0 { - keysForLabels := make([]string, 0, len(m.Labels)) - for k := range m.Labels { - keysForLabels = append(keysForLabels, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { - v := m.Labels[string(keysForLabels[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForLabels[iNdEx]) - copy(dAtA[i:], keysForLabels[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Annotations) > 0 { - keysForAnnotations := make([]string, 0, len(m.Annotations)) - for k := range m.Annotations { - keysForAnnotations = append(keysForAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { - v := m.Annotations[string(keysForAnnotations[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForAnnotations[iNdEx]) - copy(dAtA[i:], keysForAnnotations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MetricLabel) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricLabel) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricLabel) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Metrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Prometheus) > 0 { - for iNdEx := len(m.Prometheus) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Prometheus[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Mutex) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Mutex) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Mutex) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MutexHolding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MutexHolding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MutexHolding) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Holder) - copy(dAtA[i:], m.Holder) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Holder))) - i-- - dAtA[i] = 0x12 - i -= len(m.Mutex) - copy(dAtA[i:], m.Mutex) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Mutex))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MutexStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MutexStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MutexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Waiting) > 0 { - for iNdEx := len(m.Waiting) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Waiting[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Holding) > 0 { - for iNdEx := len(m.Holding) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Holding[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *NodeFlag) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeFlag) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeFlag) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.Retried { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i-- - if m.Hooked { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *NodeResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Progress) - copy(dAtA[i:], m.Progress) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Progress))) - i-- - dAtA[i] = 0x22 - if m.Outputs != nil { - { - size, err := m.Outputs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - i -= len(m.Phase) - copy(dAtA[i:], m.Phase) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NodeStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.NodeFlag != nil { - { - size, err := m.NodeFlag.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xda - } - i -= len(m.Progress) - copy(dAtA[i:], m.Progress) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Progress))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd2 - if m.SynchronizationStatus != nil { - { - size, err := m.SynchronizationStatus.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xca - } - i = encodeVarintGenerated(dAtA, i, uint64(m.EstimatedDuration)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc0 - if m.MemoizationStatus != nil { - { - size, err := m.MemoizationStatus.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba - } - i -= len(m.HostNodeName) - copy(dAtA[i:], m.HostNodeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostNodeName))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - if len(m.ResourcesDuration) > 0 { - keysForResourcesDuration := make([]string, 0, len(m.ResourcesDuration)) - for k := range m.ResourcesDuration { - keysForResourcesDuration = append(keysForResourcesDuration, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForResourcesDuration) - for iNdEx := len(keysForResourcesDuration) - 1; iNdEx >= 0; iNdEx-- { - v := m.ResourcesDuration[k8s_io_api_core_v1.ResourceName(keysForResourcesDuration[iNdEx])] - baseI := i - i = encodeVarintGenerated(dAtA, i, uint64(v)) - i-- - dAtA[i] = 0x10 - i -= len(keysForResourcesDuration[iNdEx]) - copy(dAtA[i:], keysForResourcesDuration[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForResourcesDuration[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - } - i -= len(m.TemplateScope) - copy(dAtA[i:], m.TemplateScope) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TemplateScope))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - if len(m.OutboundNodes) > 0 { - for iNdEx := len(m.OutboundNodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.OutboundNodes[iNdEx]) - copy(dAtA[i:], m.OutboundNodes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OutboundNodes[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - } - if len(m.Children) > 0 { - for iNdEx := len(m.Children) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Children[iNdEx]) - copy(dAtA[i:], m.Children[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Children[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - } - if m.Outputs != nil { - { - size, err := m.Outputs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x7a - } - if m.Inputs != nil { - { - size, err := m.Inputs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - } - if m.Daemoned != nil { - i-- - if *m.Daemoned { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x68 - } - i -= len(m.PodIP) - copy(dAtA[i:], m.PodIP) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) - i-- - dAtA[i] = 0x62 - { - size, err := m.FinishedAt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - { - size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x4a - i -= len(m.BoundaryID) - copy(dAtA[i:], m.BoundaryID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.BoundaryID))) - i-- - dAtA[i] = 0x42 - i -= len(m.Phase) - copy(dAtA[i:], m.Phase) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i-- - dAtA[i] = 0x3a - if m.TemplateRef != nil { - { - size, err := m.TemplateRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - i -= len(m.TemplateName) - copy(dAtA[i:], m.TemplateName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TemplateName))) - i-- - dAtA[i] = 0x2a - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x22 - i -= len(m.DisplayName) - copy(dAtA[i:], m.DisplayName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) - i-- - dAtA[i] = 0x1a - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NodeSynchronizationStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeSynchronizationStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeSynchronizationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Waiting) - copy(dAtA[i:], m.Waiting) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Waiting))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NoneStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NoneStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NoneStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *OAuth2Auth) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OAuth2Auth) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OAuth2Auth) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.EndpointParams) > 0 { - for iNdEx := len(m.EndpointParams) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EndpointParams[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.Scopes) > 0 { - for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Scopes[iNdEx]) - copy(dAtA[i:], m.Scopes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if m.TokenURLSecret != nil { - { - size, err := m.TokenURLSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.ClientSecretSecret != nil { - { - size, err := m.ClientSecretSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.ClientIDSecret != nil { - { - size, err := m.ClientIDSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *OAuth2EndpointParam) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OAuth2EndpointParam) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OAuth2EndpointParam) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *OSSArtifact) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OSSArtifact) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OSSArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - { - size, err := m.OSSBucket.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *OSSArtifactRepository) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OSSArtifactRepository) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OSSArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.KeyFormat) - copy(dAtA[i:], m.KeyFormat) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyFormat))) - i-- - dAtA[i] = 0x12 - { - size, err := m.OSSBucket.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *OSSBucket) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OSSBucket) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OSSBucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.UseSDKCreds { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - if m.LifecycleRule != nil { - { - size, err := m.LifecycleRule.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - i -= len(m.SecurityToken) - copy(dAtA[i:], m.SecurityToken) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecurityToken))) - i-- - dAtA[i] = 0x32 - i-- - if m.CreateBucketIfNotPresent { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - if m.SecretKeySecret != nil { - { - size, err := m.SecretKeySecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.AccessKeySecret != nil { - { - size, err := m.AccessKeySecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.Bucket) - copy(dAtA[i:], m.Bucket) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Bucket))) - i-- - dAtA[i] = 0x12 - i -= len(m.Endpoint) - copy(dAtA[i:], m.Endpoint) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Endpoint))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *OSSLifecycleRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OSSLifecycleRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OSSLifecycleRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.MarkDeletionAfterDays)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.MarkInfrequentAccessAfterDays)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *Object) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Object) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Object) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Value != nil { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Outputs) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Outputs) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Outputs) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ExitCode != nil { - i -= len(*m.ExitCode) - copy(dAtA[i:], *m.ExitCode) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ExitCode))) - i-- - dAtA[i] = 0x22 - } - if m.Result != nil { - i -= len(*m.Result) - copy(dAtA[i:], *m.Result) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Result))) - i-- - dAtA[i] = 0x1a - } - if len(m.Artifacts) > 0 { - for iNdEx := len(m.Artifacts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Artifacts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Parameters) > 0 { - for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ParallelSteps) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ParallelSteps) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ParallelSteps) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Steps) > 0 { - for iNdEx := len(m.Steps) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Steps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Parameter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Parameter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Parameter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Description != nil { - i -= len(*m.Description) - copy(dAtA[i:], *m.Description) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Description))) - i-- - dAtA[i] = 0x3a - } - if len(m.Enum) > 0 { - for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Enum[iNdEx]) - copy(dAtA[i:], m.Enum[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Enum[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - i -= len(m.GlobalName) - copy(dAtA[i:], m.GlobalName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GlobalName))) - i-- - dAtA[i] = 0x2a - if m.ValueFrom != nil { - { - size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Value != nil { - i -= len(*m.Value) - copy(dAtA[i:], *m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) - i-- - dAtA[i] = 0x1a - } - if m.Default != nil { - i -= len(*m.Default) - copy(dAtA[i:], *m.Default) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Default))) - i-- - dAtA[i] = 0x12 - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Plugin) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Plugin) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Plugin) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodGC) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodGC) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodGC) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.DeleteDelayDuration) - copy(dAtA[i:], m.DeleteDelayDuration) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeleteDelayDuration))) - i-- - dAtA[i] = 0x1a - if m.LabelSelector != nil { - { - size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Strategy) - copy(dAtA[i:], m.Strategy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Prometheus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Prometheus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Prometheus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Counter != nil { - { - size, err := m.Counter.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.Histogram != nil { - { - size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Gauge != nil { - { - size, err := m.Gauge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - i -= len(m.When) - copy(dAtA[i:], m.When) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.When))) - i-- - dAtA[i] = 0x22 - i -= len(m.Help) - copy(dAtA[i:], m.Help) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Help))) - i-- - dAtA[i] = 0x1a - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RawArtifact) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RawArtifact) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RawArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ManifestFrom != nil { - { - size, err := m.ManifestFrom.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if len(m.Flags) > 0 { - for iNdEx := len(m.Flags) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Flags[iNdEx]) - copy(dAtA[i:], m.Flags[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Flags[iNdEx]))) - i-- - dAtA[i] = 0x3a - } - } - i -= len(m.FailureCondition) - copy(dAtA[i:], m.FailureCondition) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FailureCondition))) - i-- - dAtA[i] = 0x32 - i -= len(m.SuccessCondition) - copy(dAtA[i:], m.SuccessCondition) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SuccessCondition))) - i-- - dAtA[i] = 0x2a - i-- - if m.SetOwnerReference { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - i -= len(m.Manifest) - copy(dAtA[i:], m.Manifest) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manifest))) - i-- - dAtA[i] = 0x1a - i -= len(m.MergeStrategy) - copy(dAtA[i:], m.MergeStrategy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MergeStrategy))) - i-- - dAtA[i] = 0x12 - i -= len(m.Action) - copy(dAtA[i:], m.Action) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RetryAffinity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RetryAffinity) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RetryAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.NodeAntiAffinity != nil { - { - size, err := m.NodeAntiAffinity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RetryNodeAntiAffinity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RetryNodeAntiAffinity) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RetryNodeAntiAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *RetryStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RetryStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RetryStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) - i-- - dAtA[i] = 0x2a - if m.Affinity != nil { - { - size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Backoff != nil { - { - size, err := m.Backoff.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.RetryPolicy) - copy(dAtA[i:], m.RetryPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RetryPolicy))) - i-- - dAtA[i] = 0x12 - if m.Limit != nil { - { - size, err := m.Limit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *S3Artifact) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *S3Artifact) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *S3Artifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - { - size, err := m.S3Bucket.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *S3ArtifactRepository) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *S3ArtifactRepository) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *S3ArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.KeyPrefix) - copy(dAtA[i:], m.KeyPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyPrefix))) - i-- - dAtA[i] = 0x1a - i -= len(m.KeyFormat) - copy(dAtA[i:], m.KeyFormat) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyFormat))) - i-- - dAtA[i] = 0x12 - { - size, err := m.S3Bucket.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *S3Bucket) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *S3Bucket) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *S3Bucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CASecret != nil { - { - size, err := m.CASecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if m.EncryptionOptions != nil { - { - size, err := m.EncryptionOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if m.CreateBucketIfNotPresent != nil { - { - size, err := m.CreateBucketIfNotPresent.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - i-- - if m.UseSDKCreds { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - i -= len(m.RoleARN) - copy(dAtA[i:], m.RoleARN) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleARN))) - i-- - dAtA[i] = 0x3a - if m.SecretKeySecret != nil { - { - size, err := m.SecretKeySecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.AccessKeySecret != nil { - { - size, err := m.AccessKeySecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Insecure != nil { - i-- - if *m.Insecure { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - i -= len(m.Region) - copy(dAtA[i:], m.Region) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) - i-- - dAtA[i] = 0x1a - i -= len(m.Bucket) - copy(dAtA[i:], m.Bucket) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Bucket))) - i-- - dAtA[i] = 0x12 - i -= len(m.Endpoint) - copy(dAtA[i:], m.Endpoint) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Endpoint))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *S3EncryptionOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *S3EncryptionOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *S3EncryptionOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ServerSideCustomerKeySecret != nil { - { - size, err := m.ServerSideCustomerKeySecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - i-- - if m.EnableEncryption { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i -= len(m.KmsEncryptionContext) - copy(dAtA[i:], m.KmsEncryptionContext) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KmsEncryptionContext))) - i-- - dAtA[i] = 0x12 - i -= len(m.KmsKeyId) - copy(dAtA[i:], m.KmsKeyId) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KmsKeyId))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ScriptTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScriptTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScriptTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Source) - copy(dAtA[i:], m.Source) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source))) - i-- - dAtA[i] = 0x12 - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *SemaphoreHolding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SemaphoreHolding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SemaphoreHolding) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Holders) > 0 { - for iNdEx := len(m.Holders) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Holders[iNdEx]) - copy(dAtA[i:], m.Holders[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Holders[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Semaphore) - copy(dAtA[i:], m.Semaphore) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Semaphore))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *SemaphoreRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SemaphoreRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SemaphoreRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - if m.ConfigMapKeyRef != nil { - { - size, err := m.ConfigMapKeyRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SemaphoreStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SemaphoreStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SemaphoreStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Waiting) > 0 { - for iNdEx := len(m.Waiting) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Waiting[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Holding) > 0 { - for iNdEx := len(m.Holding) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Holding[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Sequence) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Sequence) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Sequence) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Format) - copy(dAtA[i:], m.Format) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) - i-- - dAtA[i] = 0x22 - if m.End != nil { - { - size, err := m.End.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Start != nil { - { - size, err := m.Start.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Count != nil { - { - size, err := m.Count.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Submit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Submit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Submit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.Arguments != nil { - { - size, err := m.Arguments.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.WorkflowTemplateRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *SubmitOpts) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SubmitOpts) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SubmitOpts) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Priority != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) - i-- - dAtA[i] = 0x70 - } - i -= len(m.PodPriorityClassName) - copy(dAtA[i:], m.PodPriorityClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodPriorityClassName))) - i-- - dAtA[i] = 0x6a - i -= len(m.Annotations) - copy(dAtA[i:], m.Annotations) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Annotations))) - i-- - dAtA[i] = 0x62 - if m.OwnerReference != nil { - { - size, err := m.OwnerReference.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - i -= len(m.Labels) - copy(dAtA[i:], m.Labels) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Labels))) - i-- - dAtA[i] = 0x52 - i-- - if m.ServerDryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - i-- - if m.DryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - i -= len(m.ServiceAccount) - copy(dAtA[i:], m.ServiceAccount) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccount))) - i-- - dAtA[i] = 0x3a - if len(m.Parameters) > 0 { - for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Parameters[iNdEx]) - copy(dAtA[i:], m.Parameters[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Parameters[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - i -= len(m.Entrypoint) - copy(dAtA[i:], m.Entrypoint) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Entrypoint))) - i-- - dAtA[i] = 0x22 - i -= len(m.GenerateName) - copy(dAtA[i:], m.GenerateName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *SuppliedValueFrom) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SuppliedValueFrom) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SuppliedValueFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *SuspendTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SuspendTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SuspendTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Duration) - copy(dAtA[i:], m.Duration) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Duration))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Synchronization) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Synchronization) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Synchronization) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Mutex != nil { - { - size, err := m.Mutex.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Semaphore != nil { - { - size, err := m.Semaphore.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SynchronizationStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SynchronizationStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SynchronizationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Mutex != nil { - { - size, err := m.Mutex.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Semaphore != nil { - { - size, err := m.Semaphore.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TTLStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TTLStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TTLStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SecondsAfterFailure != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.SecondsAfterFailure)) - i-- - dAtA[i] = 0x18 - } - if m.SecondsAfterSuccess != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.SecondsAfterSuccess)) - i-- - dAtA[i] = 0x10 - } - if m.SecondsAfterCompletion != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.SecondsAfterCompletion)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *TarStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TarStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TarStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CompressionLevel != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CompressionLevel)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Template) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Template) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Template) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Plugin != nil { - { - size, err := m.Plugin.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xda - } - if m.HTTP != nil { - { - size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xd2 - } - if m.FailFast != nil { - i-- - if *m.FailFast { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xc8 - } - if m.ContainerSet != nil { - { - size, err := m.ContainerSet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xc2 - } - if m.Data != nil { - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xba - } - i -= len(m.Timeout) - copy(dAtA[i:], m.Timeout) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Timeout))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xb2 - if m.Memoize != nil { - { - size, err := m.Memoize.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xaa - } - if m.Synchronization != nil { - { - size, err := m.Synchronization.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xa2 - } - if m.Metrics != nil { - { - size, err := m.Metrics.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x9a - } - if m.Executor != nil { - { - size, err := m.Executor.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x8a - } - if m.AutomountServiceAccountToken != nil { - i-- - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x80 - } - i -= len(m.PodSpecPatch) - copy(dAtA[i:], m.PodSpecPatch) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodSpecPatch))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xfa - if m.SecurityContext != nil { - { - size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf2 - } - if len(m.HostAliases) > 0 { - for iNdEx := len(m.HostAliases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xea - } - } - i -= len(m.ServiceAccountName) - copy(dAtA[i:], m.ServiceAccountName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe2 - if m.Priority != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd8 - } - i -= len(m.PriorityClassName) - copy(dAtA[i:], m.PriorityClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd2 - i -= len(m.SchedulerName) - copy(dAtA[i:], m.SchedulerName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xca - if len(m.Tolerations) > 0 { - for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 - } - } - if m.Parallelism != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb8 - } - if m.RetryStrategy != nil { - { - size, err := m.RetryStrategy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if m.ActiveDeadlineSeconds != nil { - { - size, err := m.ActiveDeadlineSeconds.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - if m.ArchiveLocation != nil { - { - size, err := m.ArchiveLocation.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - if len(m.Sidecars) > 0 { - for iNdEx := len(m.Sidecars) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Sidecars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } - } - if len(m.InitContainers) > 0 { - for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } - } - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - } - if m.Suspend != nil { - { - size, err := m.Suspend.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - if m.DAG != nil { - { - size, err := m.DAG.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x7a - } - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - } - if m.Script != nil { - { - size, err := m.Script.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - if m.Container != nil { - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } - if len(m.Steps) > 0 { - for iNdEx := len(m.Steps) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Steps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - } - if m.Daemon != nil { - i-- - if *m.Daemon { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - } - { - size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - if m.Affinity != nil { - { - size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if len(m.NodeSelector) > 0 { - keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) - for k := range m.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForNodeSelector[iNdEx]) - copy(dAtA[i:], keysForNodeSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x3a - } - } - { - size, err := m.Outputs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - { - size, err := m.Inputs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *TemplateRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TemplateRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TemplateRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ClusterScope { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - i -= len(m.Template) - copy(dAtA[i:], m.Template) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Template))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *TransformationStep) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TransformationStep) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TransformationStep) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UserContainer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserContainer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.MirrorVolumeMounts != nil { - i-- - if *m.MirrorVolumeMounts { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ValueFrom) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValueFrom) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValueFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ConfigMapKeyRef != nil { - { - size, err := m.ConfigMapKeyRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) - i-- - dAtA[i] = 0x42 - i -= len(m.Event) - copy(dAtA[i:], m.Event) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Event))) - i-- - dAtA[i] = 0x3a - if m.Supplied != nil { - { - size, err := m.Supplied.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Default != nil { - i -= len(*m.Default) - copy(dAtA[i:], *m.Default) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Default))) - i-- - dAtA[i] = 0x2a - } - i -= len(m.Parameter) - copy(dAtA[i:], m.Parameter) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Parameter))) - i-- - dAtA[i] = 0x22 - i -= len(m.JQFilter) - copy(dAtA[i:], m.JQFilter) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.JQFilter))) - i-- - dAtA[i] = 0x1a - i -= len(m.JSONPath) - copy(dAtA[i:], m.JSONPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) - i-- - dAtA[i] = 0x12 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Version) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Version) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Platform) - copy(dAtA[i:], m.Platform) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Platform))) - i-- - dAtA[i] = 0x42 - i -= len(m.Compiler) - copy(dAtA[i:], m.Compiler) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Compiler))) - i-- - dAtA[i] = 0x3a - i -= len(m.GoVersion) - copy(dAtA[i:], m.GoVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GoVersion))) - i-- - dAtA[i] = 0x32 - i -= len(m.GitTreeState) - copy(dAtA[i:], m.GitTreeState) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GitTreeState))) - i-- - dAtA[i] = 0x2a - i -= len(m.GitTag) - copy(dAtA[i:], m.GitTag) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GitTag))) - i-- - dAtA[i] = 0x22 - i -= len(m.GitCommit) - copy(dAtA[i:], m.GitCommit) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GitCommit))) - i-- - dAtA[i] = 0x1a - i -= len(m.BuildDate) - copy(dAtA[i:], m.BuildDate) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.BuildDate))) - i-- - dAtA[i] = 0x12 - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VolumeClaimGC) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeClaimGC) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeClaimGC) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Strategy) - copy(dAtA[i:], m.Strategy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Workflow) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Workflow) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Workflow) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowArtifactGCTask) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowArtifactGCTask) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowArtifactGCTask) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowArtifactGCTaskList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowArtifactGCTaskList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowArtifactGCTaskList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowEventBinding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowEventBinding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowEventBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowEventBindingList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowEventBindingList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowEventBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowEventBindingSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowEventBindingSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowEventBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Submit != nil { - { - size, err := m.Submit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowLevelArtifactGC) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowLevelArtifactGC) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowLevelArtifactGC) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.PodSpecPatch) - copy(dAtA[i:], m.PodSpecPatch) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodSpecPatch))) - i-- - dAtA[i] = 0x1a - i-- - if m.ForceFinalizerRemoval { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - { - size, err := m.ArtifactGC.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowMetadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LabelsFrom) > 0 { - keysForLabelsFrom := make([]string, 0, len(m.LabelsFrom)) - for k := range m.LabelsFrom { - keysForLabelsFrom = append(keysForLabelsFrom, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabelsFrom) - for iNdEx := len(keysForLabelsFrom) - 1; iNdEx >= 0; iNdEx-- { - v := m.LabelsFrom[string(keysForLabelsFrom[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForLabelsFrom[iNdEx]) - copy(dAtA[i:], keysForLabelsFrom[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabelsFrom[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Annotations) > 0 { - keysForAnnotations := make([]string, 0, len(m.Annotations)) - for k := range m.Annotations { - keysForAnnotations = append(keysForAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { - v := m.Annotations[string(keysForAnnotations[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForAnnotations[iNdEx]) - copy(dAtA[i:], keysForAnnotations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Labels) > 0 { - keysForLabels := make([]string, 0, len(m.Labels)) - for k := range m.Labels { - keysForLabels = append(keysForLabels, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { - v := m.Labels[string(keysForLabels[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForLabels[iNdEx]) - copy(dAtA[i:], keysForLabels[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *WorkflowSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ArtifactGC != nil { - { - size, err := m.ArtifactGC.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xda - } - if m.WorkflowMetadata != nil { - { - size, err := m.WorkflowMetadata.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xd2 - } - if len(m.Hooks) > 0 { - keysForHooks := make([]string, 0, len(m.Hooks)) - for k := range m.Hooks { - keysForHooks = append(keysForHooks, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHooks) - for iNdEx := len(keysForHooks) - 1; iNdEx >= 0; iNdEx-- { - v := m.Hooks[LifecycleEvent(keysForHooks[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForHooks[iNdEx]) - copy(dAtA[i:], keysForHooks[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHooks[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xca - } - } - if m.ArchiveLogs != nil { - i-- - if *m.ArchiveLogs { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xc0 - } - if m.TemplateDefaults != nil { - { - size, err := m.TemplateDefaults.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xba - } - if m.PodMetadata != nil { - { - size, err := m.PodMetadata.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xb2 - } - if m.RetryStrategy != nil { - { - size, err := m.RetryStrategy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xaa - } - if m.VolumeClaimGC != nil { - { - size, err := m.VolumeClaimGC.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xa2 - } - if m.Synchronization != nil { - { - size, err := m.Synchronization.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x9a - } - if m.WorkflowTemplateRef != nil { - { - size, err := m.WorkflowTemplateRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x92 - } - i -= len(m.Shutdown) - copy(dAtA[i:], m.Shutdown) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Shutdown))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x8a - if m.Metrics != nil { - { - size, err := m.Metrics.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x82 - } - if m.PodDisruptionBudget != nil { - { - size, err := m.PodDisruptionBudget.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xfa - } - if m.TTLStrategy != nil { - { - size, err := m.TTLStrategy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf2 - } - if m.Executor != nil { - { - size, err := m.Executor.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xea - } - if m.AutomountServiceAccountToken != nil { - i-- - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe0 - } - i -= len(m.PodSpecPatch) - copy(dAtA[i:], m.PodSpecPatch) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodSpecPatch))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xda - if m.SecurityContext != nil { - { - size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd2 - } - if len(m.HostAliases) > 0 { - for iNdEx := len(m.HostAliases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xca - } - } - if m.PodPriority != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.PodPriority)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc0 - } - i -= len(m.PodPriorityClassName) - copy(dAtA[i:], m.PodPriorityClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodPriorityClassName))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba - if m.PodGC != nil { - { - size, err := m.PodGC.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - i -= len(m.SchedulerName) - copy(dAtA[i:], m.SchedulerName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - if m.Priority != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa0 - } - if m.ActiveDeadlineSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x98 - } - i -= len(m.OnExit) - copy(dAtA[i:], m.OnExit) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OnExit))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - if m.DNSConfig != nil { - { - size, err := m.DNSConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - if m.DNSPolicy != nil { - i -= len(*m.DNSPolicy) - copy(dAtA[i:], *m.DNSPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DNSPolicy))) - i-- - dAtA[i] = 0x7a - } - if m.HostNetwork != nil { - i-- - if *m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x70 - } - if len(m.ImagePullSecrets) > 0 { - for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - } - if len(m.Tolerations) > 0 { - for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } - } - if m.Affinity != nil { - { - size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if len(m.NodeSelector) > 0 { - keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) - for k := range m.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForNodeSelector[iNdEx]) - copy(dAtA[i:], keysForNodeSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x52 - } - } - if m.Suspend != nil { - i-- - if *m.Suspend { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - } - if m.ArtifactRepositoryRef != nil { - { - size, err := m.ArtifactRepositoryRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.Parallelism != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) - i-- - dAtA[i] = 0x38 - } - if len(m.VolumeClaimTemplates) > 0 { - for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - i -= len(m.ServiceAccountName) - copy(dAtA[i:], m.ServiceAccountName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i-- - dAtA[i] = 0x22 - { - size, err := m.Arguments.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Entrypoint) - copy(dAtA[i:], m.Entrypoint) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Entrypoint))) - i-- - dAtA[i] = 0x12 - if len(m.Templates) > 0 { - for iNdEx := len(m.Templates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Templates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *WorkflowStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.TaskResultsCompletionStatus) > 0 { - keysForTaskResultsCompletionStatus := make([]string, 0, len(m.TaskResultsCompletionStatus)) - for k := range m.TaskResultsCompletionStatus { - keysForTaskResultsCompletionStatus = append(keysForTaskResultsCompletionStatus, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForTaskResultsCompletionStatus) - for iNdEx := len(keysForTaskResultsCompletionStatus) - 1; iNdEx >= 0; iNdEx-- { - v := m.TaskResultsCompletionStatus[string(keysForTaskResultsCompletionStatus[iNdEx])] - baseI := i - i-- - if v { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(keysForTaskResultsCompletionStatus[iNdEx]) - copy(dAtA[i:], keysForTaskResultsCompletionStatus[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTaskResultsCompletionStatus[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - } - if m.ArtifactGCStatus != nil { - { - size, err := m.ArtifactGCStatus.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } - if m.ArtifactRepositoryRef != nil { - { - size, err := m.ArtifactRepositoryRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } - i -= len(m.Progress) - copy(dAtA[i:], m.Progress) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Progress))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - i = encodeVarintGenerated(dAtA, i, uint64(m.EstimatedDuration)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - if m.Synchronization != nil { - { - size, err := m.Synchronization.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x7a - } - if m.StoredWorkflowSpec != nil { - { - size, err := m.StoredWorkflowSpec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - } - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - } - if len(m.ResourcesDuration) > 0 { - keysForResourcesDuration := make([]string, 0, len(m.ResourcesDuration)) - for k := range m.ResourcesDuration { - keysForResourcesDuration = append(keysForResourcesDuration, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForResourcesDuration) - for iNdEx := len(keysForResourcesDuration) - 1; iNdEx >= 0; iNdEx-- { - v := m.ResourcesDuration[k8s_io_api_core_v1.ResourceName(keysForResourcesDuration[iNdEx])] - baseI := i - i = encodeVarintGenerated(dAtA, i, uint64(v)) - i-- - dAtA[i] = 0x10 - i -= len(keysForResourcesDuration[iNdEx]) - copy(dAtA[i:], keysForResourcesDuration[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForResourcesDuration[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x62 - } - } - i -= len(m.OffloadNodeStatusVersion) - copy(dAtA[i:], m.OffloadNodeStatusVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OffloadNodeStatusVersion))) - i-- - dAtA[i] = 0x52 - if len(m.StoredTemplates) > 0 { - keysForStoredTemplates := make([]string, 0, len(m.StoredTemplates)) - for k := range m.StoredTemplates { - keysForStoredTemplates = append(keysForStoredTemplates, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForStoredTemplates) - for iNdEx := len(keysForStoredTemplates) - 1; iNdEx >= 0; iNdEx-- { - v := m.StoredTemplates[string(keysForStoredTemplates[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForStoredTemplates[iNdEx]) - copy(dAtA[i:], keysForStoredTemplates[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStoredTemplates[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x4a - } - } - if m.Outputs != nil { - { - size, err := m.Outputs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if len(m.PersistentVolumeClaims) > 0 { - for iNdEx := len(m.PersistentVolumeClaims) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PersistentVolumeClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if len(m.Nodes) > 0 { - keysForNodes := make([]string, 0, len(m.Nodes)) - for k := range m.Nodes { - keysForNodes = append(keysForNodes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodes) - for iNdEx := len(keysForNodes) - 1; iNdEx >= 0; iNdEx-- { - v := m.Nodes[string(keysForNodes[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForNodes[iNdEx]) - copy(dAtA[i:], keysForNodes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodes[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 - } - } - i -= len(m.CompressedNodes) - copy(dAtA[i:], m.CompressedNodes) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CompressedNodes))) - i-- - dAtA[i] = 0x2a - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x22 - { - size, err := m.FinishedAt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Phase) - copy(dAtA[i:], m.Phase) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowStep) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowStep) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowStep) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Inline != nil { - { - size, err := m.Inline.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - if len(m.Hooks) > 0 { - keysForHooks := make([]string, 0, len(m.Hooks)) - for k := range m.Hooks { - keysForHooks = append(keysForHooks, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHooks) - for iNdEx := len(keysForHooks) - 1; iNdEx >= 0; iNdEx-- { - v := m.Hooks[LifecycleEvent(keysForHooks[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForHooks[iNdEx]) - copy(dAtA[i:], keysForHooks[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHooks[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x62 - } - } - i -= len(m.OnExit) - copy(dAtA[i:], m.OnExit) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OnExit))) - i-- - dAtA[i] = 0x5a - if m.ContinueOn != nil { - { - size, err := m.ContinueOn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - i -= len(m.When) - copy(dAtA[i:], m.When) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.When))) - i-- - dAtA[i] = 0x42 - if m.WithSequence != nil { - { - size, err := m.WithSequence.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - i -= len(m.WithParam) - copy(dAtA[i:], m.WithParam) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.WithParam))) - i-- - dAtA[i] = 0x32 - if len(m.WithItems) > 0 { - for iNdEx := len(m.WithItems) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.WithItems[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.TemplateRef != nil { - { - size, err := m.TemplateRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - { - size, err := m.Arguments.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Template) - copy(dAtA[i:], m.Template) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Template))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowTaskResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowTaskResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowTaskResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.NodeResult.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowTaskResultList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowTaskResultList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowTaskResultList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowTaskSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowTaskSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowTaskSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowTaskSetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowTaskSetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowTaskSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowTaskSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowTaskSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowTaskSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tasks) > 0 { - keysForTasks := make([]string, 0, len(m.Tasks)) - for k := range m.Tasks { - keysForTasks = append(keysForTasks, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForTasks) - for iNdEx := len(keysForTasks) - 1; iNdEx >= 0; iNdEx-- { - v := m.Tasks[string(keysForTasks[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForTasks[iNdEx]) - copy(dAtA[i:], keysForTasks[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTasks[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *WorkflowTaskSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowTaskSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowTaskSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Nodes) > 0 { - keysForNodes := make([]string, 0, len(m.Nodes)) - for k := range m.Nodes { - keysForNodes = append(keysForNodes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodes) - for iNdEx := len(keysForNodes) - 1; iNdEx >= 0; iNdEx-- { - v := m.Nodes[string(keysForNodes[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForNodes[iNdEx]) - copy(dAtA[i:], keysForNodes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodes[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *WorkflowTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowTemplateList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowTemplateList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WorkflowTemplateRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkflowTemplateRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkflowTemplateRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ClusterScope { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ZipStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ZipStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ZipStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Amount) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ArchiveStrategy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Tar != nil { - l = m.Tar.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.None != nil { - l = m.None.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Zip != nil { - l = m.Zip.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Arguments) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Parameters) > 0 { - for _, e := range m.Parameters { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Artifacts) > 0 { - for _, e := range m.Artifacts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ArtGCStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.StrategiesProcessed) > 0 { - for k, v := range m.StrategiesProcessed { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.PodsRecouped) > 0 { - for k, v := range m.PodsRecouped { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - n += 2 - return n -} - -func (m *Artifact) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) - } - l = len(m.From) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ArtifactLocation.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.GlobalName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Archive != nil { - l = m.Archive.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - l = len(m.SubPath) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.FromExpression) - n += 1 + l + sovGenerated(uint64(l)) - if m.ArtifactGC != nil { - l = m.ArtifactGC.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *ArtifactGC) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Strategy) - n += 1 + l + sovGenerated(uint64(l)) - if m.PodMetadata != nil { - l = m.PodMetadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ArtifactGCSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ArtifactsByNode) > 0 { - for k, v := range m.ArtifactsByNode { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ArtifactGCStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ArtifactResultsByNode) > 0 { - for k, v := range m.ArtifactResultsByNode { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ArtifactLocation) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ArchiveLogs != nil { - n += 2 - } - if m.S3 != nil { - l = m.S3.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Git != nil { - l = m.Git.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Artifactory != nil { - l = m.Artifactory.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HDFS != nil { - l = m.HDFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Raw != nil { - l = m.Raw.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.OSS != nil { - l = m.OSS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GCS != nil { - l = m.GCS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Azure != nil { - l = m.Azure.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ArtifactNodeSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ArchiveLocation != nil { - l = m.ArchiveLocation.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Artifacts) > 0 { - for k, v := range m.Artifacts { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ArtifactPaths) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Artifact.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ArtifactRepository) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ArchiveLogs != nil { - n += 2 - } - if m.S3 != nil { - l = m.S3.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Artifactory != nil { - l = m.Artifactory.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HDFS != nil { - l = m.HDFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.OSS != nil { - l = m.OSS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GCS != nil { - l = m.GCS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Azure != nil { - l = m.Azure.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ArtifactRepositoryRef) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ConfigMap) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ArtifactRepositoryRefStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ArtifactRepositoryRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.ArtifactRepository != nil { - l = m.ArtifactRepository.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ArtifactResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.Error != nil { - l = len(*m.Error) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ArtifactResultNodeStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ArtifactResults) > 0 { - for k, v := range m.ArtifactResults { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ArtifactSearchQuery) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ArtifactGCStrategies) > 0 { - for k, v := range m.ArtifactGCStrategies { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ArtifactName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.TemplateName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeId) - n += 1 + l + sovGenerated(uint64(l)) - if m.Deleted != nil { - n += 2 - } - if len(m.NodeTypes) > 0 { - for k, v := range m.NodeTypes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ArtifactSearchResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Artifact.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ArtifactoryArtifact) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ArtifactoryAuth.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ArtifactoryArtifactRepository) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ArtifactoryAuth.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RepoURL) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KeyFormat) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ArtifactoryAuth) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.UsernameSecret != nil { - l = m.UsernameSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PasswordSecret != nil { - l = m.PasswordSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AzureArtifact) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.AzureBlobContainer.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Blob) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AzureArtifactRepository) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.AzureBlobContainer.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.BlobNameFormat) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AzureBlobContainer) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Endpoint) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - if m.AccountKeySecret != nil { - l = m.AccountKeySecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *Backoff) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Duration) - n += 1 + l + sovGenerated(uint64(l)) - if m.Factor != nil { - l = m.Factor.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.MaxDuration) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *BasicAuth) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.UsernameSecret != nil { - l = m.UsernameSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PasswordSecret != nil { - l = m.PasswordSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Cache) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ClientCertAuth) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ClientCertSecret != nil { - l = m.ClientCertSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ClientKeySecret != nil { - l = m.ClientKeySecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ClusterWorkflowTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterWorkflowTemplateList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Column) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Condition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerNode) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Dependencies) > 0 { - for _, s := range m.Dependencies { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ContainerSetRetryStrategy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Duration) - n += 1 + l + sovGenerated(uint64(l)) - if m.Retries != nil { - l = m.Retries.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ContainerSetTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.RetryStrategy != nil { - l = m.RetryStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ContinueOn) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - n += 2 - return n -} - -func (m *Counter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CreateS3BucketOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - return n -} - -func (m *CronWorkflow) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CronWorkflowList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CronWorkflowSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.WorkflowSpec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Schedule) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ConcurrencyPolicy) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.StartingDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) - } - if m.SuccessfulJobsHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit)) - } - if m.FailedJobsHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) - } - l = len(m.Timezone) - n += 1 + l + sovGenerated(uint64(l)) - if m.WorkflowMetadata != nil { - l = m.WorkflowMetadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CronWorkflowStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Active) > 0 { - for _, e := range m.Active { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.LastScheduledTime != nil { - l = m.LastScheduledTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DAGTask) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Template) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Arguments.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.TemplateRef != nil { - l = m.TemplateRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Dependencies) > 0 { - for _, s := range m.Dependencies { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.WithItems) > 0 { - for _, e := range m.WithItems { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.WithParam) - n += 1 + l + sovGenerated(uint64(l)) - if m.WithSequence != nil { - l = m.WithSequence.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.When) - n += 1 + l + sovGenerated(uint64(l)) - if m.ContinueOn != nil { - l = m.ContinueOn.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.OnExit) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Depends) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Hooks) > 0 { - for k, v := range m.Hooks { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.Inline != nil { - l = m.Inline.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *DAGTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Target) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Tasks) > 0 { - for _, e := range m.Tasks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.FailFast != nil { - n += 2 - } - return n -} - -func (m *Data) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Transformation) > 0 { - for _, e := range m.Transformation { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DataSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ArtifactPaths != nil { - l = m.ArtifactPaths.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Event) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Selector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ExecutorConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GCSArtifact) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.GCSBucket.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GCSArtifactRepository) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.GCSBucket.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KeyFormat) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GCSBucket) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Bucket) - n += 1 + l + sovGenerated(uint64(l)) - if m.ServiceAccountKeySecret != nil { - l = m.ServiceAccountKeySecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Gauge) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - if m.Realtime != nil { - n += 2 - } - l = len(m.Operation) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GitArtifact) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Repo) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Revision) - n += 1 + l + sovGenerated(uint64(l)) - if m.Depth != nil { - n += 1 + sovGenerated(uint64(*m.Depth)) - } - if len(m.Fetch) > 0 { - for _, s := range m.Fetch { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.UsernameSecret != nil { - l = m.UsernameSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PasswordSecret != nil { - l = m.PasswordSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SSHPrivateKeySecret != nil { - l = m.SSHPrivateKeySecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - n += 2 - n += 2 - l = len(m.Branch) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HDFSArtifact) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.HDFSConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *HDFSArtifactRepository) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.HDFSConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PathFormat) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *HDFSConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.HDFSKrbConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Addresses) > 0 { - for _, s := range m.Addresses { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.HDFSUser) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HDFSKrbConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.KrbCCacheSecret != nil { - l = m.KrbCCacheSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.KrbKeytabSecret != nil { - l = m.KrbKeytabSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.KrbUsername) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KrbRealm) - n += 1 + l + sovGenerated(uint64(l)) - if m.KrbConfigConfigMap != nil { - l = m.KrbConfigConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.KrbServicePrincipalName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HTTP) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Method) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Headers) > 0 { - for _, e := range m.Headers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - l = len(m.Body) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SuccessCondition) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.BodyFrom != nil { - l = m.BodyFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *HTTPArtifact) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Headers) > 0 { - for _, e := range m.Headers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Auth != nil { - l = m.Auth.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *HTTPAuth) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ClientCert.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.OAuth2.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.BasicAuth.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HTTPBodySource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Bytes != nil { - l = len(m.Bytes) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *HTTPHeader) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - if m.ValueFrom != nil { - l = m.ValueFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *HTTPHeaderSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SecretKeyRef != nil { - l = m.SecretKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Header) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Histogram) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Buckets) > 0 { - for _, e := range m.Buckets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Inputs) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Parameters) > 0 { - for _, e := range m.Parameters { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Artifacts) > 0 { - for _, e := range m.Artifacts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Item) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != nil { - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *LabelKeys) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Items) > 0 { - for _, s := range m.Items { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LabelValueFrom) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *LabelValues) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Items) > 0 { - for _, s := range m.Items { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LifecycleHook) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Template) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Arguments.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.TemplateRef != nil { - l = m.TemplateRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Link) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Scope) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ManifestFrom) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Artifact != nil { - l = m.Artifact.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *MemoizationStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.CacheName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Memoize) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if m.Cache != nil { - l = m.Cache.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.MaxAge) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Metadata) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *MetricLabel) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Metrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Prometheus) > 0 { - for _, e := range m.Prometheus { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Mutex) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *MutexHolding) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Mutex) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Holder) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *MutexStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Holding) > 0 { - for _, e := range m.Holding { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Waiting) > 0 { - for _, e := range m.Waiting { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NodeFlag) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - n += 2 - return n -} - -func (m *NodeResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - if m.Outputs != nil { - l = m.Outputs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Progress) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DisplayName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.TemplateName) - n += 1 + l + sovGenerated(uint64(l)) - if m.TemplateRef != nil { - l = m.TemplateRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.BoundaryID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FinishedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodIP) - n += 1 + l + sovGenerated(uint64(l)) - if m.Daemoned != nil { - n += 2 - } - if m.Inputs != nil { - l = m.Inputs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Outputs != nil { - l = m.Outputs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Children) > 0 { - for _, s := range m.Children { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.OutboundNodes) > 0 { - for _, s := range m.OutboundNodes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - l = len(m.TemplateScope) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.ResourcesDuration) > 0 { - for k, v := range m.ResourcesDuration { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + sovGenerated(uint64(v)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.HostNodeName) - n += 2 + l + sovGenerated(uint64(l)) - if m.MemoizationStatus != nil { - l = m.MemoizationStatus.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - n += 2 + sovGenerated(uint64(m.EstimatedDuration)) - if m.SynchronizationStatus != nil { - l = m.SynchronizationStatus.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - l = len(m.Progress) - n += 2 + l + sovGenerated(uint64(l)) - if m.NodeFlag != nil { - l = m.NodeFlag.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NodeSynchronizationStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Waiting) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NoneStrategy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *OAuth2Auth) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ClientIDSecret != nil { - l = m.ClientIDSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ClientSecretSecret != nil { - l = m.ClientSecretSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TokenURLSecret != nil { - l = m.TokenURLSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.EndpointParams) > 0 { - for _, e := range m.EndpointParams { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *OAuth2EndpointParam) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *OSSArtifact) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.OSSBucket.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *OSSArtifactRepository) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.OSSBucket.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KeyFormat) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *OSSBucket) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Endpoint) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Bucket) - n += 1 + l + sovGenerated(uint64(l)) - if m.AccessKeySecret != nil { - l = m.AccessKeySecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretKeySecret != nil { - l = m.SecretKeySecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - l = len(m.SecurityToken) - n += 1 + l + sovGenerated(uint64(l)) - if m.LifecycleRule != nil { - l = m.LifecycleRule.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *OSSLifecycleRule) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.MarkInfrequentAccessAfterDays)) - n += 1 + sovGenerated(uint64(m.MarkDeletionAfterDays)) - return n -} - -func (m *Object) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != nil { - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Outputs) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Parameters) > 0 { - for _, e := range m.Parameters { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Artifacts) > 0 { - for _, e := range m.Artifacts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Result != nil { - l = len(*m.Result) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ExitCode != nil { - l = len(*m.ExitCode) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ParallelSteps) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Steps) > 0 { - for _, e := range m.Steps { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Parameter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Default != nil { - l = len(*m.Default) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Value != nil { - l = len(*m.Value) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ValueFrom != nil { - l = m.ValueFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.GlobalName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Enum) > 0 { - for _, s := range m.Enum { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Description != nil { - l = len(*m.Description) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Plugin) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodGC) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Strategy) - n += 1 + l + sovGenerated(uint64(l)) - if m.LabelSelector != nil { - l = m.LabelSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.DeleteDelayDuration) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Prometheus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Help) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.When) - n += 1 + l + sovGenerated(uint64(l)) - if m.Gauge != nil { - l = m.Gauge.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Histogram != nil { - l = m.Histogram.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Counter != nil { - l = m.Counter.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RawArtifact) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Action) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.MergeStrategy) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Manifest) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.SuccessCondition) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FailureCondition) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Flags) > 0 { - for _, s := range m.Flags { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.ManifestFrom != nil { - l = m.ManifestFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RetryAffinity) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NodeAntiAffinity != nil { - l = m.NodeAntiAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RetryNodeAntiAffinity) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *RetryStrategy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Limit != nil { - l = m.Limit.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.RetryPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.Backoff != nil { - l = m.Backoff.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Affinity != nil { - l = m.Affinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *S3Artifact) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.S3Bucket.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *S3ArtifactRepository) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.S3Bucket.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KeyFormat) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KeyPrefix) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *S3Bucket) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Endpoint) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Bucket) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Region) - n += 1 + l + sovGenerated(uint64(l)) - if m.Insecure != nil { - n += 2 - } - if m.AccessKeySecret != nil { - l = m.AccessKeySecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretKeySecret != nil { - l = m.SecretKeySecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.RoleARN) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.CreateBucketIfNotPresent != nil { - l = m.CreateBucketIfNotPresent.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.EncryptionOptions != nil { - l = m.EncryptionOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CASecret != nil { - l = m.CASecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *S3EncryptionOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.KmsKeyId) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KmsEncryptionContext) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.ServerSideCustomerKeySecret != nil { - l = m.ServerSideCustomerKeySecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ScriptTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Source) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SemaphoreHolding) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Semaphore) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Holders) > 0 { - for _, s := range m.Holders { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *SemaphoreRef) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ConfigMapKeyRef != nil { - l = m.ConfigMapKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SemaphoreStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Holding) > 0 { - for _, e := range m.Holding { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Waiting) > 0 { - for _, e := range m.Waiting { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Sequence) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Count != nil { - l = m.Count.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Start != nil { - l = m.Start.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.End != nil { - l = m.End.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Format) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Submit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.WorkflowTemplateRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Arguments != nil { - l = m.Arguments.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SubmitOpts) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.GenerateName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Entrypoint) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Parameters) > 0 { - for _, s := range m.Parameters { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ServiceAccount) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - l = len(m.Labels) - n += 1 + l + sovGenerated(uint64(l)) - if m.OwnerReference != nil { - l = m.OwnerReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Annotations) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodPriorityClassName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 1 + sovGenerated(uint64(*m.Priority)) - } - return n -} - -func (m *SuppliedValueFrom) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *SuspendTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Duration) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Synchronization) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Semaphore != nil { - l = m.Semaphore.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Mutex != nil { - l = m.Mutex.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SynchronizationStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Semaphore != nil { - l = m.Semaphore.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Mutex != nil { - l = m.Mutex.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *TTLStrategy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SecondsAfterCompletion != nil { - n += 1 + sovGenerated(uint64(*m.SecondsAfterCompletion)) - } - if m.SecondsAfterSuccess != nil { - n += 1 + sovGenerated(uint64(*m.SecondsAfterSuccess)) - } - if m.SecondsAfterFailure != nil { - n += 1 + sovGenerated(uint64(*m.SecondsAfterFailure)) - } - return n -} - -func (m *TarStrategy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CompressionLevel != nil { - n += 1 + sovGenerated(uint64(*m.CompressionLevel)) - } - return n -} - -func (m *Template) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Inputs.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Outputs.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.Affinity != nil { - l = m.Affinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Daemon != nil { - n += 2 - } - if len(m.Steps) > 0 { - for _, e := range m.Steps { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Container != nil { - l = m.Container.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Script != nil { - l = m.Script.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DAG != nil { - l = m.DAG.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Suspend != nil { - l = m.Suspend.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.InitContainers) > 0 { - for _, e := range m.InitContainers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.Sidecars) > 0 { - for _, e := range m.Sidecars { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.ArchiveLocation != nil { - l = m.ArchiveLocation.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ActiveDeadlineSeconds != nil { - l = m.ActiveDeadlineSeconds.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.RetryStrategy != nil { - l = m.RetryStrategy.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Parallelism != nil { - n += 2 + sovGenerated(uint64(*m.Parallelism)) - } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - l = len(m.SchedulerName) - n += 2 + l + sovGenerated(uint64(l)) - l = len(m.PriorityClassName) - n += 2 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 2 + sovGenerated(uint64(*m.Priority)) - } - l = len(m.ServiceAccountName) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.HostAliases) > 0 { - for _, e := range m.HostAliases { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - l = len(m.PodSpecPatch) - n += 2 + l + sovGenerated(uint64(l)) - if m.AutomountServiceAccountToken != nil { - n += 3 - } - if m.Executor != nil { - l = m.Executor.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Metrics != nil { - l = m.Metrics.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Synchronization != nil { - l = m.Synchronization.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Memoize != nil { - l = m.Memoize.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - l = len(m.Timeout) - n += 2 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = m.Data.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ContainerSet != nil { - l = m.ContainerSet.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.FailFast != nil { - n += 3 - } - if m.HTTP != nil { - l = m.HTTP.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Plugin != nil { - l = m.Plugin.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *TemplateRef) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Template) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *TransformationStep) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *UserContainer) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.MirrorVolumeMounts != nil { - n += 2 - } - return n -} - -func (m *ValueFrom) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.JSONPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.JQFilter) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Parameter) - n += 1 + l + sovGenerated(uint64(l)) - if m.Default != nil { - l = len(*m.Default) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Supplied != nil { - l = m.Supplied.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Event) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - if m.ConfigMapKeyRef != nil { - l = m.ConfigMapKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Version) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.BuildDate) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.GitCommit) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.GitTag) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.GitTreeState) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.GoVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Compiler) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Platform) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeClaimGC) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Strategy) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Workflow) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WorkflowArtifactGCTask) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WorkflowArtifactGCTaskList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *WorkflowEventBinding) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WorkflowEventBindingList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *WorkflowEventBindingSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Event.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Submit != nil { - l = m.Submit.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *WorkflowLevelArtifactGC) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ArtifactGC.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.PodSpecPatch) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WorkflowList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *WorkflowMetadata) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.LabelsFrom) > 0 { - for k, v := range m.LabelsFrom { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *WorkflowSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Templates) > 0 { - for _, e := range m.Templates { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Entrypoint) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Arguments.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.VolumeClaimTemplates) > 0 { - for _, e := range m.VolumeClaimTemplates { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Parallelism != nil { - n += 1 + sovGenerated(uint64(*m.Parallelism)) - } - if m.ArtifactRepositoryRef != nil { - l = m.ArtifactRepositoryRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Suspend != nil { - n += 2 - } - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.Affinity != nil { - l = m.Affinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.HostNetwork != nil { - n += 2 - } - if m.DNSPolicy != nil { - l = len(*m.DNSPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DNSConfig != nil { - l = m.DNSConfig.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - l = len(m.OnExit) - n += 2 + l + sovGenerated(uint64(l)) - if m.ActiveDeadlineSeconds != nil { - n += 2 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) - } - if m.Priority != nil { - n += 2 + sovGenerated(uint64(*m.Priority)) - } - l = len(m.SchedulerName) - n += 2 + l + sovGenerated(uint64(l)) - if m.PodGC != nil { - l = m.PodGC.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - l = len(m.PodPriorityClassName) - n += 2 + l + sovGenerated(uint64(l)) - if m.PodPriority != nil { - n += 2 + sovGenerated(uint64(*m.PodPriority)) - } - if len(m.HostAliases) > 0 { - for _, e := range m.HostAliases { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - l = len(m.PodSpecPatch) - n += 2 + l + sovGenerated(uint64(l)) - if m.AutomountServiceAccountToken != nil { - n += 3 - } - if m.Executor != nil { - l = m.Executor.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.TTLStrategy != nil { - l = m.TTLStrategy.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PodDisruptionBudget != nil { - l = m.PodDisruptionBudget.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Metrics != nil { - l = m.Metrics.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - l = len(m.Shutdown) - n += 2 + l + sovGenerated(uint64(l)) - if m.WorkflowTemplateRef != nil { - l = m.WorkflowTemplateRef.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Synchronization != nil { - l = m.Synchronization.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.VolumeClaimGC != nil { - l = m.VolumeClaimGC.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.RetryStrategy != nil { - l = m.RetryStrategy.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PodMetadata != nil { - l = m.PodMetadata.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.TemplateDefaults != nil { - l = m.TemplateDefaults.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ArchiveLogs != nil { - n += 3 - } - if len(m.Hooks) > 0 { - for k, v := range m.Hooks { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.WorkflowMetadata != nil { - l = m.WorkflowMetadata.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ArtifactGC != nil { - l = m.ArtifactGC.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *WorkflowStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FinishedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.CompressedNodes) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Nodes) > 0 { - for k, v := range m.Nodes { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.PersistentVolumeClaims) > 0 { - for _, e := range m.PersistentVolumeClaims { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Outputs != nil { - l = m.Outputs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.StoredTemplates) > 0 { - for k, v := range m.StoredTemplates { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.OffloadNodeStatusVersion) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ResourcesDuration) > 0 { - for k, v := range m.ResourcesDuration { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + sovGenerated(uint64(v)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.StoredWorkflowSpec != nil { - l = m.StoredWorkflowSpec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Synchronization != nil { - l = m.Synchronization.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 + sovGenerated(uint64(m.EstimatedDuration)) - l = len(m.Progress) - n += 2 + l + sovGenerated(uint64(l)) - if m.ArtifactRepositoryRef != nil { - l = m.ArtifactRepositoryRef.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ArtifactGCStatus != nil { - l = m.ArtifactGCStatus.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.TaskResultsCompletionStatus) > 0 { - for k, v := range m.TaskResultsCompletionStatus { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *WorkflowStep) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Template) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Arguments.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.TemplateRef != nil { - l = m.TemplateRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.WithItems) > 0 { - for _, e := range m.WithItems { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.WithParam) - n += 1 + l + sovGenerated(uint64(l)) - if m.WithSequence != nil { - l = m.WithSequence.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.When) - n += 1 + l + sovGenerated(uint64(l)) - if m.ContinueOn != nil { - l = m.ContinueOn.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.OnExit) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Hooks) > 0 { - for k, v := range m.Hooks { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.Inline != nil { - l = m.Inline.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *WorkflowTaskResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.NodeResult.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WorkflowTaskResultList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *WorkflowTaskSet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WorkflowTaskSetList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *WorkflowTaskSetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Tasks) > 0 { - for k, v := range m.Tasks { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *WorkflowTaskSetStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Nodes) > 0 { - for k, v := range m.Nodes { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *WorkflowTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WorkflowTemplateList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *WorkflowTemplateRef) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *ZipStrategy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Amount) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Amount{`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *ArchiveStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ArchiveStrategy{`, - `Tar:` + strings.Replace(this.Tar.String(), "TarStrategy", "TarStrategy", 1) + `,`, - `None:` + strings.Replace(this.None.String(), "NoneStrategy", "NoneStrategy", 1) + `,`, - `Zip:` + strings.Replace(this.Zip.String(), "ZipStrategy", "ZipStrategy", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Arguments) String() string { - if this == nil { - return "nil" - } - repeatedStringForParameters := "[]Parameter{" - for _, f := range this.Parameters { - repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "Parameter", "Parameter", 1), `&`, ``, 1) + "," - } - repeatedStringForParameters += "}" - repeatedStringForArtifacts := "[]Artifact{" - for _, f := range this.Artifacts { - repeatedStringForArtifacts += strings.Replace(strings.Replace(f.String(), "Artifact", "Artifact", 1), `&`, ``, 1) + "," - } - repeatedStringForArtifacts += "}" - s := strings.Join([]string{`&Arguments{`, - `Parameters:` + repeatedStringForParameters + `,`, - `Artifacts:` + repeatedStringForArtifacts + `,`, - `}`, - }, "") - return s -} -func (this *ArtGCStatus) String() string { - if this == nil { - return "nil" - } - keysForStrategiesProcessed := make([]string, 0, len(this.StrategiesProcessed)) - for k := range this.StrategiesProcessed { - keysForStrategiesProcessed = append(keysForStrategiesProcessed, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForStrategiesProcessed) - mapStringForStrategiesProcessed := "map[ArtifactGCStrategy]bool{" - for _, k := range keysForStrategiesProcessed { - mapStringForStrategiesProcessed += fmt.Sprintf("%v: %v,", k, this.StrategiesProcessed[ArtifactGCStrategy(k)]) - } - mapStringForStrategiesProcessed += "}" - keysForPodsRecouped := make([]string, 0, len(this.PodsRecouped)) - for k := range this.PodsRecouped { - keysForPodsRecouped = append(keysForPodsRecouped, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPodsRecouped) - mapStringForPodsRecouped := "map[string]bool{" - for _, k := range keysForPodsRecouped { - mapStringForPodsRecouped += fmt.Sprintf("%v: %v,", k, this.PodsRecouped[k]) - } - mapStringForPodsRecouped += "}" - s := strings.Join([]string{`&ArtGCStatus{`, - `StrategiesProcessed:` + mapStringForStrategiesProcessed + `,`, - `PodsRecouped:` + mapStringForPodsRecouped + `,`, - `NotSpecified:` + fmt.Sprintf("%v", this.NotSpecified) + `,`, - `}`, - }, "") - return s -} -func (this *Artifact) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Artifact{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `From:` + fmt.Sprintf("%v", this.From) + `,`, - `ArtifactLocation:` + strings.Replace(strings.Replace(this.ArtifactLocation.String(), "ArtifactLocation", "ArtifactLocation", 1), `&`, ``, 1) + `,`, - `GlobalName:` + fmt.Sprintf("%v", this.GlobalName) + `,`, - `Archive:` + strings.Replace(this.Archive.String(), "ArchiveStrategy", "ArchiveStrategy", 1) + `,`, - `Optional:` + fmt.Sprintf("%v", this.Optional) + `,`, - `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, - `RecurseMode:` + fmt.Sprintf("%v", this.RecurseMode) + `,`, - `FromExpression:` + fmt.Sprintf("%v", this.FromExpression) + `,`, - `ArtifactGC:` + strings.Replace(this.ArtifactGC.String(), "ArtifactGC", "ArtifactGC", 1) + `,`, - `Deleted:` + fmt.Sprintf("%v", this.Deleted) + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactGC) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ArtifactGC{`, - `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, - `PodMetadata:` + strings.Replace(this.PodMetadata.String(), "Metadata", "Metadata", 1) + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactGCSpec) String() string { - if this == nil { - return "nil" - } - keysForArtifactsByNode := make([]string, 0, len(this.ArtifactsByNode)) - for k := range this.ArtifactsByNode { - keysForArtifactsByNode = append(keysForArtifactsByNode, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactsByNode) - mapStringForArtifactsByNode := "map[string]ArtifactNodeSpec{" - for _, k := range keysForArtifactsByNode { - mapStringForArtifactsByNode += fmt.Sprintf("%v: %v,", k, this.ArtifactsByNode[k]) - } - mapStringForArtifactsByNode += "}" - s := strings.Join([]string{`&ArtifactGCSpec{`, - `ArtifactsByNode:` + mapStringForArtifactsByNode + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactGCStatus) String() string { - if this == nil { - return "nil" - } - keysForArtifactResultsByNode := make([]string, 0, len(this.ArtifactResultsByNode)) - for k := range this.ArtifactResultsByNode { - keysForArtifactResultsByNode = append(keysForArtifactResultsByNode, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactResultsByNode) - mapStringForArtifactResultsByNode := "map[string]ArtifactResultNodeStatus{" - for _, k := range keysForArtifactResultsByNode { - mapStringForArtifactResultsByNode += fmt.Sprintf("%v: %v,", k, this.ArtifactResultsByNode[k]) - } - mapStringForArtifactResultsByNode += "}" - s := strings.Join([]string{`&ArtifactGCStatus{`, - `ArtifactResultsByNode:` + mapStringForArtifactResultsByNode + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactLocation) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ArtifactLocation{`, - `ArchiveLogs:` + valueToStringGenerated(this.ArchiveLogs) + `,`, - `S3:` + strings.Replace(this.S3.String(), "S3Artifact", "S3Artifact", 1) + `,`, - `Git:` + strings.Replace(this.Git.String(), "GitArtifact", "GitArtifact", 1) + `,`, - `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPArtifact", "HTTPArtifact", 1) + `,`, - `Artifactory:` + strings.Replace(this.Artifactory.String(), "ArtifactoryArtifact", "ArtifactoryArtifact", 1) + `,`, - `HDFS:` + strings.Replace(this.HDFS.String(), "HDFSArtifact", "HDFSArtifact", 1) + `,`, - `Raw:` + strings.Replace(this.Raw.String(), "RawArtifact", "RawArtifact", 1) + `,`, - `OSS:` + strings.Replace(this.OSS.String(), "OSSArtifact", "OSSArtifact", 1) + `,`, - `GCS:` + strings.Replace(this.GCS.String(), "GCSArtifact", "GCSArtifact", 1) + `,`, - `Azure:` + strings.Replace(this.Azure.String(), "AzureArtifact", "AzureArtifact", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactNodeSpec) String() string { - if this == nil { - return "nil" - } - keysForArtifacts := make([]string, 0, len(this.Artifacts)) - for k := range this.Artifacts { - keysForArtifacts = append(keysForArtifacts, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForArtifacts) - mapStringForArtifacts := "map[string]Artifact{" - for _, k := range keysForArtifacts { - mapStringForArtifacts += fmt.Sprintf("%v: %v,", k, this.Artifacts[k]) - } - mapStringForArtifacts += "}" - s := strings.Join([]string{`&ArtifactNodeSpec{`, - `ArchiveLocation:` + strings.Replace(this.ArchiveLocation.String(), "ArtifactLocation", "ArtifactLocation", 1) + `,`, - `Artifacts:` + mapStringForArtifacts + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactPaths) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ArtifactPaths{`, - `Artifact:` + strings.Replace(strings.Replace(this.Artifact.String(), "Artifact", "Artifact", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactRepository) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ArtifactRepository{`, - `ArchiveLogs:` + valueToStringGenerated(this.ArchiveLogs) + `,`, - `S3:` + strings.Replace(this.S3.String(), "S3ArtifactRepository", "S3ArtifactRepository", 1) + `,`, - `Artifactory:` + strings.Replace(this.Artifactory.String(), "ArtifactoryArtifactRepository", "ArtifactoryArtifactRepository", 1) + `,`, - `HDFS:` + strings.Replace(this.HDFS.String(), "HDFSArtifactRepository", "HDFSArtifactRepository", 1) + `,`, - `OSS:` + strings.Replace(this.OSS.String(), "OSSArtifactRepository", "OSSArtifactRepository", 1) + `,`, - `GCS:` + strings.Replace(this.GCS.String(), "GCSArtifactRepository", "GCSArtifactRepository", 1) + `,`, - `Azure:` + strings.Replace(this.Azure.String(), "AzureArtifactRepository", "AzureArtifactRepository", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactResult) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ArtifactResult{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Success:` + fmt.Sprintf("%v", this.Success) + `,`, - `Error:` + valueToStringGenerated(this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactResultNodeStatus) String() string { - if this == nil { - return "nil" - } - keysForArtifactResults := make([]string, 0, len(this.ArtifactResults)) - for k := range this.ArtifactResults { - keysForArtifactResults = append(keysForArtifactResults, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactResults) - mapStringForArtifactResults := "map[string]ArtifactResult{" - for _, k := range keysForArtifactResults { - mapStringForArtifactResults += fmt.Sprintf("%v: %v,", k, this.ArtifactResults[k]) - } - mapStringForArtifactResults += "}" - s := strings.Join([]string{`&ArtifactResultNodeStatus{`, - `ArtifactResults:` + mapStringForArtifactResults + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactSearchQuery) String() string { - if this == nil { - return "nil" - } - keysForArtifactGCStrategies := make([]string, 0, len(this.ArtifactGCStrategies)) - for k := range this.ArtifactGCStrategies { - keysForArtifactGCStrategies = append(keysForArtifactGCStrategies, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForArtifactGCStrategies) - mapStringForArtifactGCStrategies := "map[ArtifactGCStrategy]bool{" - for _, k := range keysForArtifactGCStrategies { - mapStringForArtifactGCStrategies += fmt.Sprintf("%v: %v,", k, this.ArtifactGCStrategies[ArtifactGCStrategy(k)]) - } - mapStringForArtifactGCStrategies += "}" - keysForNodeTypes := make([]string, 0, len(this.NodeTypes)) - for k := range this.NodeTypes { - keysForNodeTypes = append(keysForNodeTypes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeTypes) - mapStringForNodeTypes := "map[NodeType]bool{" - for _, k := range keysForNodeTypes { - mapStringForNodeTypes += fmt.Sprintf("%v: %v,", k, this.NodeTypes[NodeType(k)]) - } - mapStringForNodeTypes += "}" - s := strings.Join([]string{`&ArtifactSearchQuery{`, - `ArtifactGCStrategies:` + mapStringForArtifactGCStrategies + `,`, - `ArtifactName:` + fmt.Sprintf("%v", this.ArtifactName) + `,`, - `TemplateName:` + fmt.Sprintf("%v", this.TemplateName) + `,`, - `NodeId:` + fmt.Sprintf("%v", this.NodeId) + `,`, - `Deleted:` + valueToStringGenerated(this.Deleted) + `,`, - `NodeTypes:` + mapStringForNodeTypes + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactSearchResult) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ArtifactSearchResult{`, - `Artifact:` + strings.Replace(strings.Replace(this.Artifact.String(), "Artifact", "Artifact", 1), `&`, ``, 1) + `,`, - `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactoryArtifact) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ArtifactoryArtifact{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `ArtifactoryAuth:` + strings.Replace(strings.Replace(this.ArtifactoryAuth.String(), "ArtifactoryAuth", "ArtifactoryAuth", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactoryArtifactRepository) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ArtifactoryArtifactRepository{`, - `ArtifactoryAuth:` + strings.Replace(strings.Replace(this.ArtifactoryAuth.String(), "ArtifactoryAuth", "ArtifactoryAuth", 1), `&`, ``, 1) + `,`, - `RepoURL:` + fmt.Sprintf("%v", this.RepoURL) + `,`, - `KeyFormat:` + fmt.Sprintf("%v", this.KeyFormat) + `,`, - `}`, - }, "") - return s -} -func (this *ArtifactoryAuth) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ArtifactoryAuth{`, - `UsernameSecret:` + strings.Replace(fmt.Sprintf("%v", this.UsernameSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `PasswordSecret:` + strings.Replace(fmt.Sprintf("%v", this.PasswordSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AzureArtifact) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AzureArtifact{`, - `AzureBlobContainer:` + strings.Replace(strings.Replace(this.AzureBlobContainer.String(), "AzureBlobContainer", "AzureBlobContainer", 1), `&`, ``, 1) + `,`, - `Blob:` + fmt.Sprintf("%v", this.Blob) + `,`, - `}`, - }, "") - return s -} -func (this *AzureArtifactRepository) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AzureArtifactRepository{`, - `AzureBlobContainer:` + strings.Replace(strings.Replace(this.AzureBlobContainer.String(), "AzureBlobContainer", "AzureBlobContainer", 1), `&`, ``, 1) + `,`, - `BlobNameFormat:` + fmt.Sprintf("%v", this.BlobNameFormat) + `,`, - `}`, - }, "") - return s -} -func (this *AzureBlobContainer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AzureBlobContainer{`, - `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `AccountKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.AccountKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `UseSDKCreds:` + fmt.Sprintf("%v", this.UseSDKCreds) + `,`, - `}`, - }, "") - return s -} -func (this *Backoff) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Backoff{`, - `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, - `Factor:` + strings.Replace(fmt.Sprintf("%v", this.Factor), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxDuration:` + fmt.Sprintf("%v", this.MaxDuration) + `,`, - `}`, - }, "") - return s -} -func (this *BasicAuth) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BasicAuth{`, - `UsernameSecret:` + strings.Replace(fmt.Sprintf("%v", this.UsernameSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `PasswordSecret:` + strings.Replace(fmt.Sprintf("%v", this.PasswordSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Cache) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Cache{`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapKeySelector", "v1.ConfigMapKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClientCertAuth) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClientCertAuth{`, - `ClientCertSecret:` + strings.Replace(fmt.Sprintf("%v", this.ClientCertSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `ClientKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.ClientKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterWorkflowTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterWorkflowTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowSpec", "WorkflowSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterWorkflowTemplateList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ClusterWorkflowTemplate{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterWorkflowTemplate", "ClusterWorkflowTemplate", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ClusterWorkflowTemplateList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *Column) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Column{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `}`, - }, "") - return s -} -func (this *Condition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Condition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerNode) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerNode{`, - `Container:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "v1.Container", 1), `&`, ``, 1) + `,`, - `Dependencies:` + fmt.Sprintf("%v", this.Dependencies) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerSetRetryStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerSetRetryStrategy{`, - `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, - `Retries:` + strings.Replace(fmt.Sprintf("%v", this.Retries), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerSetTemplate) String() string { - if this == nil { - return "nil" - } - repeatedStringForVolumeMounts := "[]VolumeMount{" - for _, f := range this.VolumeMounts { - repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumeMounts += "}" - repeatedStringForContainers := "[]ContainerNode{" - for _, f := range this.Containers { - repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ContainerNode", "ContainerNode", 1), `&`, ``, 1) + "," - } - repeatedStringForContainers += "}" - s := strings.Join([]string{`&ContainerSetTemplate{`, - `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, - `Containers:` + repeatedStringForContainers + `,`, - `RetryStrategy:` + strings.Replace(this.RetryStrategy.String(), "ContainerSetRetryStrategy", "ContainerSetRetryStrategy", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContinueOn) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContinueOn{`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, - `}`, - }, "") - return s -} -func (this *Counter) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Counter{`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *CreateS3BucketOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateS3BucketOptions{`, - `ObjectLocking:` + fmt.Sprintf("%v", this.ObjectLocking) + `,`, - `}`, - }, "") - return s -} -func (this *CronWorkflow) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CronWorkflow{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronWorkflowSpec", "CronWorkflowSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronWorkflowStatus", "CronWorkflowStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CronWorkflowList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]CronWorkflow{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronWorkflow", "CronWorkflow", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&CronWorkflowList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *CronWorkflowSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CronWorkflowSpec{`, - `WorkflowSpec:` + strings.Replace(strings.Replace(this.WorkflowSpec.String(), "WorkflowSpec", "WorkflowSpec", 1), `&`, ``, 1) + `,`, - `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, - `ConcurrencyPolicy:` + fmt.Sprintf("%v", this.ConcurrencyPolicy) + `,`, - `Suspend:` + fmt.Sprintf("%v", this.Suspend) + `,`, - `StartingDeadlineSeconds:` + valueToStringGenerated(this.StartingDeadlineSeconds) + `,`, - `SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`, - `FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`, - `Timezone:` + fmt.Sprintf("%v", this.Timezone) + `,`, - `WorkflowMetadata:` + strings.Replace(fmt.Sprintf("%v", this.WorkflowMetadata), "ObjectMeta", "v11.ObjectMeta", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CronWorkflowStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForActive := "[]ObjectReference{" - for _, f := range this.Active { - repeatedStringForActive += fmt.Sprintf("%v", f) + "," - } - repeatedStringForActive += "}" - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&CronWorkflowStatus{`, - `Active:` + repeatedStringForActive + `,`, - `LastScheduledTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduledTime), "Time", "v11.Time", 1) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *DAGTask) String() string { - if this == nil { - return "nil" - } - repeatedStringForWithItems := "[]Item{" - for _, f := range this.WithItems { - repeatedStringForWithItems += fmt.Sprintf("%v", f) + "," - } - repeatedStringForWithItems += "}" - keysForHooks := make([]string, 0, len(this.Hooks)) - for k := range this.Hooks { - keysForHooks = append(keysForHooks, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHooks) - mapStringForHooks := "LifecycleHooks{" - for _, k := range keysForHooks { - mapStringForHooks += fmt.Sprintf("%v: %v,", k, this.Hooks[LifecycleEvent(k)]) - } - mapStringForHooks += "}" - s := strings.Join([]string{`&DAGTask{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Template:` + fmt.Sprintf("%v", this.Template) + `,`, - `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, - `TemplateRef:` + strings.Replace(this.TemplateRef.String(), "TemplateRef", "TemplateRef", 1) + `,`, - `Dependencies:` + fmt.Sprintf("%v", this.Dependencies) + `,`, - `WithItems:` + repeatedStringForWithItems + `,`, - `WithParam:` + fmt.Sprintf("%v", this.WithParam) + `,`, - `WithSequence:` + strings.Replace(this.WithSequence.String(), "Sequence", "Sequence", 1) + `,`, - `When:` + fmt.Sprintf("%v", this.When) + `,`, - `ContinueOn:` + strings.Replace(this.ContinueOn.String(), "ContinueOn", "ContinueOn", 1) + `,`, - `OnExit:` + fmt.Sprintf("%v", this.OnExit) + `,`, - `Depends:` + fmt.Sprintf("%v", this.Depends) + `,`, - `Hooks:` + mapStringForHooks + `,`, - `Inline:` + strings.Replace(this.Inline.String(), "Template", "Template", 1) + `,`, - `}`, - }, "") - return s -} -func (this *DAGTemplate) String() string { - if this == nil { - return "nil" - } - repeatedStringForTasks := "[]DAGTask{" - for _, f := range this.Tasks { - repeatedStringForTasks += strings.Replace(strings.Replace(f.String(), "DAGTask", "DAGTask", 1), `&`, ``, 1) + "," - } - repeatedStringForTasks += "}" - s := strings.Join([]string{`&DAGTemplate{`, - `Target:` + fmt.Sprintf("%v", this.Target) + `,`, - `Tasks:` + repeatedStringForTasks + `,`, - `FailFast:` + valueToStringGenerated(this.FailFast) + `,`, - `}`, - }, "") - return s -} -func (this *Data) String() string { - if this == nil { - return "nil" - } - repeatedStringForTransformation := "[]TransformationStep{" - for _, f := range this.Transformation { - repeatedStringForTransformation += strings.Replace(strings.Replace(f.String(), "TransformationStep", "TransformationStep", 1), `&`, ``, 1) + "," - } - repeatedStringForTransformation += "}" - s := strings.Join([]string{`&Data{`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "DataSource", "DataSource", 1), `&`, ``, 1) + `,`, - `Transformation:` + repeatedStringForTransformation + `,`, - `}`, - }, "") - return s -} -func (this *DataSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DataSource{`, - `ArtifactPaths:` + strings.Replace(this.ArtifactPaths.String(), "ArtifactPaths", "ArtifactPaths", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Event) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Event{`, - `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, - `}`, - }, "") - return s -} -func (this *ExecutorConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecutorConfig{`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `}`, - }, "") - return s -} -func (this *GCSArtifact) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GCSArtifact{`, - `GCSBucket:` + strings.Replace(strings.Replace(this.GCSBucket.String(), "GCSBucket", "GCSBucket", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `}`, - }, "") - return s -} -func (this *GCSArtifactRepository) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GCSArtifactRepository{`, - `GCSBucket:` + strings.Replace(strings.Replace(this.GCSBucket.String(), "GCSBucket", "GCSBucket", 1), `&`, ``, 1) + `,`, - `KeyFormat:` + fmt.Sprintf("%v", this.KeyFormat) + `,`, - `}`, - }, "") - return s -} -func (this *GCSBucket) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GCSBucket{`, - `Bucket:` + fmt.Sprintf("%v", this.Bucket) + `,`, - `ServiceAccountKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.ServiceAccountKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Gauge) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Gauge{`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Realtime:` + valueToStringGenerated(this.Realtime) + `,`, - `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, - `}`, - }, "") - return s -} -func (this *GitArtifact) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GitArtifact{`, - `Repo:` + fmt.Sprintf("%v", this.Repo) + `,`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `Depth:` + valueToStringGenerated(this.Depth) + `,`, - `Fetch:` + fmt.Sprintf("%v", this.Fetch) + `,`, - `UsernameSecret:` + strings.Replace(fmt.Sprintf("%v", this.UsernameSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `PasswordSecret:` + strings.Replace(fmt.Sprintf("%v", this.PasswordSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `SSHPrivateKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.SSHPrivateKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `InsecureIgnoreHostKey:` + fmt.Sprintf("%v", this.InsecureIgnoreHostKey) + `,`, - `DisableSubmodules:` + fmt.Sprintf("%v", this.DisableSubmodules) + `,`, - `SingleBranch:` + fmt.Sprintf("%v", this.SingleBranch) + `,`, - `Branch:` + fmt.Sprintf("%v", this.Branch) + `,`, - `}`, - }, "") - return s -} -func (this *HDFSArtifact) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HDFSArtifact{`, - `HDFSConfig:` + strings.Replace(strings.Replace(this.HDFSConfig.String(), "HDFSConfig", "HDFSConfig", 1), `&`, ``, 1) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Force:` + fmt.Sprintf("%v", this.Force) + `,`, - `}`, - }, "") - return s -} -func (this *HDFSArtifactRepository) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HDFSArtifactRepository{`, - `HDFSConfig:` + strings.Replace(strings.Replace(this.HDFSConfig.String(), "HDFSConfig", "HDFSConfig", 1), `&`, ``, 1) + `,`, - `PathFormat:` + fmt.Sprintf("%v", this.PathFormat) + `,`, - `Force:` + fmt.Sprintf("%v", this.Force) + `,`, - `}`, - }, "") - return s -} -func (this *HDFSConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HDFSConfig{`, - `HDFSKrbConfig:` + strings.Replace(strings.Replace(this.HDFSKrbConfig.String(), "HDFSKrbConfig", "HDFSKrbConfig", 1), `&`, ``, 1) + `,`, - `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, - `HDFSUser:` + fmt.Sprintf("%v", this.HDFSUser) + `,`, - `}`, - }, "") - return s -} -func (this *HDFSKrbConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HDFSKrbConfig{`, - `KrbCCacheSecret:` + strings.Replace(fmt.Sprintf("%v", this.KrbCCacheSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `KrbKeytabSecret:` + strings.Replace(fmt.Sprintf("%v", this.KrbKeytabSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `KrbUsername:` + fmt.Sprintf("%v", this.KrbUsername) + `,`, - `KrbRealm:` + fmt.Sprintf("%v", this.KrbRealm) + `,`, - `KrbConfigConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.KrbConfigConfigMap), "ConfigMapKeySelector", "v1.ConfigMapKeySelector", 1) + `,`, - `KrbServicePrincipalName:` + fmt.Sprintf("%v", this.KrbServicePrincipalName) + `,`, - `}`, - }, "") - return s -} -func (this *HTTP) String() string { - if this == nil { - return "nil" - } - repeatedStringForHeaders := "[]HTTPHeader{" - for _, f := range this.Headers { - repeatedStringForHeaders += strings.Replace(strings.Replace(f.String(), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + "," - } - repeatedStringForHeaders += "}" - s := strings.Join([]string{`&HTTP{`, - `Method:` + fmt.Sprintf("%v", this.Method) + `,`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `Headers:` + repeatedStringForHeaders + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `Body:` + fmt.Sprintf("%v", this.Body) + `,`, - `SuccessCondition:` + fmt.Sprintf("%v", this.SuccessCondition) + `,`, - `InsecureSkipVerify:` + fmt.Sprintf("%v", this.InsecureSkipVerify) + `,`, - `BodyFrom:` + strings.Replace(this.BodyFrom.String(), "HTTPBodySource", "HTTPBodySource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPArtifact) String() string { - if this == nil { - return "nil" - } - repeatedStringForHeaders := "[]Header{" - for _, f := range this.Headers { - repeatedStringForHeaders += strings.Replace(strings.Replace(f.String(), "Header", "Header", 1), `&`, ``, 1) + "," - } - repeatedStringForHeaders += "}" - s := strings.Join([]string{`&HTTPArtifact{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `Headers:` + repeatedStringForHeaders + `,`, - `Auth:` + strings.Replace(this.Auth.String(), "HTTPAuth", "HTTPAuth", 1) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPAuth) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPAuth{`, - `ClientCert:` + strings.Replace(strings.Replace(this.ClientCert.String(), "ClientCertAuth", "ClientCertAuth", 1), `&`, ``, 1) + `,`, - `OAuth2:` + strings.Replace(strings.Replace(this.OAuth2.String(), "OAuth2Auth", "OAuth2Auth", 1), `&`, ``, 1) + `,`, - `BasicAuth:` + strings.Replace(strings.Replace(this.BasicAuth.String(), "BasicAuth", "BasicAuth", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPBodySource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPBodySource{`, - `Bytes:` + valueToStringGenerated(this.Bytes) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPHeader) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPHeader{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `ValueFrom:` + strings.Replace(this.ValueFrom.String(), "HTTPHeaderSource", "HTTPHeaderSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPHeaderSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPHeaderSource{`, - `SecretKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeyRef), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Header) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Header{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *Histogram) String() string { - if this == nil { - return "nil" - } - repeatedStringForBuckets := "[]Amount{" - for _, f := range this.Buckets { - repeatedStringForBuckets += strings.Replace(strings.Replace(f.String(), "Amount", "Amount", 1), `&`, ``, 1) + "," - } - repeatedStringForBuckets += "}" - s := strings.Join([]string{`&Histogram{`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Buckets:` + repeatedStringForBuckets + `,`, - `}`, - }, "") - return s -} -func (this *Inputs) String() string { - if this == nil { - return "nil" - } - repeatedStringForParameters := "[]Parameter{" - for _, f := range this.Parameters { - repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "Parameter", "Parameter", 1), `&`, ``, 1) + "," - } - repeatedStringForParameters += "}" - repeatedStringForArtifacts := "[]Artifact{" - for _, f := range this.Artifacts { - repeatedStringForArtifacts += strings.Replace(strings.Replace(f.String(), "Artifact", "Artifact", 1), `&`, ``, 1) + "," - } - repeatedStringForArtifacts += "}" - s := strings.Join([]string{`&Inputs{`, - `Parameters:` + repeatedStringForParameters + `,`, - `Artifacts:` + repeatedStringForArtifacts + `,`, - `}`, - }, "") - return s -} -func (this *LabelKeys) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelKeys{`, - `Items:` + fmt.Sprintf("%v", this.Items) + `,`, - `}`, - }, "") - return s -} -func (this *LabelValueFrom) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelValueFrom{`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s -} -func (this *LabelValues) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelValues{`, - `Items:` + fmt.Sprintf("%v", this.Items) + `,`, - `}`, - }, "") - return s -} -func (this *LifecycleHook) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LifecycleHook{`, - `Template:` + fmt.Sprintf("%v", this.Template) + `,`, - `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, - `TemplateRef:` + strings.Replace(this.TemplateRef.String(), "TemplateRef", "TemplateRef", 1) + `,`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s -} -func (this *Link) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Link{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `}`, - }, "") - return s -} -func (this *ManifestFrom) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ManifestFrom{`, - `Artifact:` + strings.Replace(this.Artifact.String(), "Artifact", "Artifact", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MemoizationStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoizationStatus{`, - `Hit:` + fmt.Sprintf("%v", this.Hit) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `CacheName:` + fmt.Sprintf("%v", this.CacheName) + `,`, - `}`, - }, "") - return s -} -func (this *Memoize) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Memoize{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Cache:` + strings.Replace(this.Cache.String(), "Cache", "Cache", 1) + `,`, - `MaxAge:` + fmt.Sprintf("%v", this.MaxAge) + `,`, - `}`, - }, "") - return s -} -func (this *Metadata) String() string { - if this == nil { - return "nil" - } - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - keysForLabels := make([]string, 0, len(this.Labels)) - for k := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&Metadata{`, - `Annotations:` + mapStringForAnnotations + `,`, - `Labels:` + mapStringForLabels + `,`, - `}`, - }, "") - return s -} -func (this *MetricLabel) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricLabel{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *Metrics) String() string { - if this == nil { - return "nil" - } - repeatedStringForPrometheus := "[]*Prometheus{" - for _, f := range this.Prometheus { - repeatedStringForPrometheus += strings.Replace(f.String(), "Prometheus", "Prometheus", 1) + "," - } - repeatedStringForPrometheus += "}" - s := strings.Join([]string{`&Metrics{`, - `Prometheus:` + repeatedStringForPrometheus + `,`, - `}`, - }, "") - return s -} -func (this *Mutex) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Mutex{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func (this *MutexHolding) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MutexHolding{`, - `Mutex:` + fmt.Sprintf("%v", this.Mutex) + `,`, - `Holder:` + fmt.Sprintf("%v", this.Holder) + `,`, - `}`, - }, "") - return s -} -func (this *MutexStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForHolding := "[]MutexHolding{" - for _, f := range this.Holding { - repeatedStringForHolding += strings.Replace(strings.Replace(f.String(), "MutexHolding", "MutexHolding", 1), `&`, ``, 1) + "," - } - repeatedStringForHolding += "}" - repeatedStringForWaiting := "[]MutexHolding{" - for _, f := range this.Waiting { - repeatedStringForWaiting += strings.Replace(strings.Replace(f.String(), "MutexHolding", "MutexHolding", 1), `&`, ``, 1) + "," - } - repeatedStringForWaiting += "}" - s := strings.Join([]string{`&MutexStatus{`, - `Holding:` + repeatedStringForHolding + `,`, - `Waiting:` + repeatedStringForWaiting + `,`, - `}`, - }, "") - return s -} -func (this *NodeFlag) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeFlag{`, - `Hooked:` + fmt.Sprintf("%v", this.Hooked) + `,`, - `Retried:` + fmt.Sprintf("%v", this.Retried) + `,`, - `}`, - }, "") - return s -} -func (this *NodeResult) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeResult{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Outputs:` + strings.Replace(this.Outputs.String(), "Outputs", "Outputs", 1) + `,`, - `Progress:` + fmt.Sprintf("%v", this.Progress) + `,`, - `}`, - }, "") - return s -} -func (this *NodeStatus) String() string { - if this == nil { - return "nil" - } - keysForResourcesDuration := make([]string, 0, len(this.ResourcesDuration)) - for k := range this.ResourcesDuration { - keysForResourcesDuration = append(keysForResourcesDuration, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForResourcesDuration) - mapStringForResourcesDuration := "ResourcesDuration{" - for _, k := range keysForResourcesDuration { - mapStringForResourcesDuration += fmt.Sprintf("%v: %v,", k, this.ResourcesDuration[k8s_io_api_core_v1.ResourceName(k)]) - } - mapStringForResourcesDuration += "}" - s := strings.Join([]string{`&NodeStatus{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `TemplateName:` + fmt.Sprintf("%v", this.TemplateName) + `,`, - `TemplateRef:` + strings.Replace(this.TemplateRef.String(), "TemplateRef", "TemplateRef", 1) + `,`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `BoundaryID:` + fmt.Sprintf("%v", this.BoundaryID) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, - `FinishedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FinishedAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, - `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, - `Daemoned:` + valueToStringGenerated(this.Daemoned) + `,`, - `Inputs:` + strings.Replace(this.Inputs.String(), "Inputs", "Inputs", 1) + `,`, - `Outputs:` + strings.Replace(this.Outputs.String(), "Outputs", "Outputs", 1) + `,`, - `Children:` + fmt.Sprintf("%v", this.Children) + `,`, - `OutboundNodes:` + fmt.Sprintf("%v", this.OutboundNodes) + `,`, - `TemplateScope:` + fmt.Sprintf("%v", this.TemplateScope) + `,`, - `ResourcesDuration:` + mapStringForResourcesDuration + `,`, - `HostNodeName:` + fmt.Sprintf("%v", this.HostNodeName) + `,`, - `MemoizationStatus:` + strings.Replace(this.MemoizationStatus.String(), "MemoizationStatus", "MemoizationStatus", 1) + `,`, - `EstimatedDuration:` + fmt.Sprintf("%v", this.EstimatedDuration) + `,`, - `SynchronizationStatus:` + strings.Replace(this.SynchronizationStatus.String(), "NodeSynchronizationStatus", "NodeSynchronizationStatus", 1) + `,`, - `Progress:` + fmt.Sprintf("%v", this.Progress) + `,`, - `NodeFlag:` + strings.Replace(this.NodeFlag.String(), "NodeFlag", "NodeFlag", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSynchronizationStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeSynchronizationStatus{`, - `Waiting:` + fmt.Sprintf("%v", this.Waiting) + `,`, - `}`, - }, "") - return s -} -func (this *NoneStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NoneStrategy{`, - `}`, - }, "") - return s -} -func (this *OAuth2Auth) String() string { - if this == nil { - return "nil" - } - repeatedStringForEndpointParams := "[]OAuth2EndpointParam{" - for _, f := range this.EndpointParams { - repeatedStringForEndpointParams += strings.Replace(strings.Replace(f.String(), "OAuth2EndpointParam", "OAuth2EndpointParam", 1), `&`, ``, 1) + "," - } - repeatedStringForEndpointParams += "}" - s := strings.Join([]string{`&OAuth2Auth{`, - `ClientIDSecret:` + strings.Replace(fmt.Sprintf("%v", this.ClientIDSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `ClientSecretSecret:` + strings.Replace(fmt.Sprintf("%v", this.ClientSecretSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `TokenURLSecret:` + strings.Replace(fmt.Sprintf("%v", this.TokenURLSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, - `EndpointParams:` + repeatedStringForEndpointParams + `,`, - `}`, - }, "") - return s -} -func (this *OAuth2EndpointParam) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&OAuth2EndpointParam{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *OSSArtifact) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&OSSArtifact{`, - `OSSBucket:` + strings.Replace(strings.Replace(this.OSSBucket.String(), "OSSBucket", "OSSBucket", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `}`, - }, "") - return s -} -func (this *OSSArtifactRepository) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&OSSArtifactRepository{`, - `OSSBucket:` + strings.Replace(strings.Replace(this.OSSBucket.String(), "OSSBucket", "OSSBucket", 1), `&`, ``, 1) + `,`, - `KeyFormat:` + fmt.Sprintf("%v", this.KeyFormat) + `,`, - `}`, - }, "") - return s -} -func (this *OSSBucket) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&OSSBucket{`, - `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, - `Bucket:` + fmt.Sprintf("%v", this.Bucket) + `,`, - `AccessKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.AccessKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `SecretKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `CreateBucketIfNotPresent:` + fmt.Sprintf("%v", this.CreateBucketIfNotPresent) + `,`, - `SecurityToken:` + fmt.Sprintf("%v", this.SecurityToken) + `,`, - `LifecycleRule:` + strings.Replace(this.LifecycleRule.String(), "OSSLifecycleRule", "OSSLifecycleRule", 1) + `,`, - `UseSDKCreds:` + fmt.Sprintf("%v", this.UseSDKCreds) + `,`, - `}`, - }, "") - return s -} -func (this *OSSLifecycleRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&OSSLifecycleRule{`, - `MarkInfrequentAccessAfterDays:` + fmt.Sprintf("%v", this.MarkInfrequentAccessAfterDays) + `,`, - `MarkDeletionAfterDays:` + fmt.Sprintf("%v", this.MarkDeletionAfterDays) + `,`, - `}`, - }, "") - return s -} -func (this *Object) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Object{`, - `Value:` + valueToStringGenerated(this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *Outputs) String() string { - if this == nil { - return "nil" - } - repeatedStringForParameters := "[]Parameter{" - for _, f := range this.Parameters { - repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "Parameter", "Parameter", 1), `&`, ``, 1) + "," - } - repeatedStringForParameters += "}" - repeatedStringForArtifacts := "[]Artifact{" - for _, f := range this.Artifacts { - repeatedStringForArtifacts += strings.Replace(strings.Replace(f.String(), "Artifact", "Artifact", 1), `&`, ``, 1) + "," - } - repeatedStringForArtifacts += "}" - s := strings.Join([]string{`&Outputs{`, - `Parameters:` + repeatedStringForParameters + `,`, - `Artifacts:` + repeatedStringForArtifacts + `,`, - `Result:` + valueToStringGenerated(this.Result) + `,`, - `ExitCode:` + valueToStringGenerated(this.ExitCode) + `,`, - `}`, - }, "") - return s -} -func (this *ParallelSteps) String() string { - if this == nil { - return "nil" - } - repeatedStringForSteps := "[]WorkflowStep{" - for _, f := range this.Steps { - repeatedStringForSteps += strings.Replace(strings.Replace(f.String(), "WorkflowStep", "WorkflowStep", 1), `&`, ``, 1) + "," - } - repeatedStringForSteps += "}" - s := strings.Join([]string{`&ParallelSteps{`, - `Steps:` + repeatedStringForSteps + `,`, - `}`, - }, "") - return s -} -func (this *Parameter) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Parameter{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Default:` + valueToStringGenerated(this.Default) + `,`, - `Value:` + valueToStringGenerated(this.Value) + `,`, - `ValueFrom:` + strings.Replace(this.ValueFrom.String(), "ValueFrom", "ValueFrom", 1) + `,`, - `GlobalName:` + fmt.Sprintf("%v", this.GlobalName) + `,`, - `Enum:` + fmt.Sprintf("%v", this.Enum) + `,`, - `Description:` + valueToStringGenerated(this.Description) + `,`, - `}`, - }, "") - return s -} -func (this *Plugin) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Plugin{`, - `Object:` + strings.Replace(strings.Replace(this.Object.String(), "Object", "Object", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodGC) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodGC{`, - `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, - `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, - `DeleteDelayDuration:` + fmt.Sprintf("%v", this.DeleteDelayDuration) + `,`, - `}`, - }, "") - return s -} -func (this *Prometheus) String() string { - if this == nil { - return "nil" - } - repeatedStringForLabels := "[]*MetricLabel{" - for _, f := range this.Labels { - repeatedStringForLabels += strings.Replace(f.String(), "MetricLabel", "MetricLabel", 1) + "," - } - repeatedStringForLabels += "}" - s := strings.Join([]string{`&Prometheus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + repeatedStringForLabels + `,`, - `Help:` + fmt.Sprintf("%v", this.Help) + `,`, - `When:` + fmt.Sprintf("%v", this.When) + `,`, - `Gauge:` + strings.Replace(this.Gauge.String(), "Gauge", "Gauge", 1) + `,`, - `Histogram:` + strings.Replace(this.Histogram.String(), "Histogram", "Histogram", 1) + `,`, - `Counter:` + strings.Replace(this.Counter.String(), "Counter", "Counter", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RawArtifact) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RawArtifact{`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceTemplate{`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `MergeStrategy:` + fmt.Sprintf("%v", this.MergeStrategy) + `,`, - `Manifest:` + fmt.Sprintf("%v", this.Manifest) + `,`, - `SetOwnerReference:` + fmt.Sprintf("%v", this.SetOwnerReference) + `,`, - `SuccessCondition:` + fmt.Sprintf("%v", this.SuccessCondition) + `,`, - `FailureCondition:` + fmt.Sprintf("%v", this.FailureCondition) + `,`, - `Flags:` + fmt.Sprintf("%v", this.Flags) + `,`, - `ManifestFrom:` + strings.Replace(this.ManifestFrom.String(), "ManifestFrom", "ManifestFrom", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RetryAffinity) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RetryAffinity{`, - `NodeAntiAffinity:` + strings.Replace(this.NodeAntiAffinity.String(), "RetryNodeAntiAffinity", "RetryNodeAntiAffinity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RetryNodeAntiAffinity) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RetryNodeAntiAffinity{`, - `}`, - }, "") - return s -} -func (this *RetryStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RetryStrategy{`, - `Limit:` + strings.Replace(fmt.Sprintf("%v", this.Limit), "IntOrString", "intstr.IntOrString", 1) + `,`, - `RetryPolicy:` + fmt.Sprintf("%v", this.RetryPolicy) + `,`, - `Backoff:` + strings.Replace(this.Backoff.String(), "Backoff", "Backoff", 1) + `,`, - `Affinity:` + strings.Replace(this.Affinity.String(), "RetryAffinity", "RetryAffinity", 1) + `,`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s -} -func (this *S3Artifact) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&S3Artifact{`, - `S3Bucket:` + strings.Replace(strings.Replace(this.S3Bucket.String(), "S3Bucket", "S3Bucket", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `}`, - }, "") - return s -} -func (this *S3ArtifactRepository) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&S3ArtifactRepository{`, - `S3Bucket:` + strings.Replace(strings.Replace(this.S3Bucket.String(), "S3Bucket", "S3Bucket", 1), `&`, ``, 1) + `,`, - `KeyFormat:` + fmt.Sprintf("%v", this.KeyFormat) + `,`, - `KeyPrefix:` + fmt.Sprintf("%v", this.KeyPrefix) + `,`, - `}`, - }, "") - return s -} -func (this *S3Bucket) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&S3Bucket{`, - `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, - `Bucket:` + fmt.Sprintf("%v", this.Bucket) + `,`, - `Region:` + fmt.Sprintf("%v", this.Region) + `,`, - `Insecure:` + valueToStringGenerated(this.Insecure) + `,`, - `AccessKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.AccessKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `SecretKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `RoleARN:` + fmt.Sprintf("%v", this.RoleARN) + `,`, - `UseSDKCreds:` + fmt.Sprintf("%v", this.UseSDKCreds) + `,`, - `CreateBucketIfNotPresent:` + strings.Replace(this.CreateBucketIfNotPresent.String(), "CreateS3BucketOptions", "CreateS3BucketOptions", 1) + `,`, - `EncryptionOptions:` + strings.Replace(this.EncryptionOptions.String(), "S3EncryptionOptions", "S3EncryptionOptions", 1) + `,`, - `CASecret:` + strings.Replace(fmt.Sprintf("%v", this.CASecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *S3EncryptionOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&S3EncryptionOptions{`, - `KmsKeyId:` + fmt.Sprintf("%v", this.KmsKeyId) + `,`, - `KmsEncryptionContext:` + fmt.Sprintf("%v", this.KmsEncryptionContext) + `,`, - `EnableEncryption:` + fmt.Sprintf("%v", this.EnableEncryption) + `,`, - `ServerSideCustomerKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.ServerSideCustomerKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScriptTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScriptTemplate{`, - `Container:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "v1.Container", 1), `&`, ``, 1) + `,`, - `Source:` + fmt.Sprintf("%v", this.Source) + `,`, - `}`, - }, "") - return s -} -func (this *SemaphoreHolding) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SemaphoreHolding{`, - `Semaphore:` + fmt.Sprintf("%v", this.Semaphore) + `,`, - `Holders:` + fmt.Sprintf("%v", this.Holders) + `,`, - `}`, - }, "") - return s -} -func (this *SemaphoreRef) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SemaphoreRef{`, - `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "v1.ConfigMapKeySelector", 1) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func (this *SemaphoreStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForHolding := "[]SemaphoreHolding{" - for _, f := range this.Holding { - repeatedStringForHolding += strings.Replace(strings.Replace(f.String(), "SemaphoreHolding", "SemaphoreHolding", 1), `&`, ``, 1) + "," - } - repeatedStringForHolding += "}" - repeatedStringForWaiting := "[]SemaphoreHolding{" - for _, f := range this.Waiting { - repeatedStringForWaiting += strings.Replace(strings.Replace(f.String(), "SemaphoreHolding", "SemaphoreHolding", 1), `&`, ``, 1) + "," - } - repeatedStringForWaiting += "}" - s := strings.Join([]string{`&SemaphoreStatus{`, - `Holding:` + repeatedStringForHolding + `,`, - `Waiting:` + repeatedStringForWaiting + `,`, - `}`, - }, "") - return s -} -func (this *Sequence) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Sequence{`, - `Count:` + strings.Replace(fmt.Sprintf("%v", this.Count), "IntOrString", "intstr.IntOrString", 1) + `,`, - `Start:` + strings.Replace(fmt.Sprintf("%v", this.Start), "IntOrString", "intstr.IntOrString", 1) + `,`, - `End:` + strings.Replace(fmt.Sprintf("%v", this.End), "IntOrString", "intstr.IntOrString", 1) + `,`, - `Format:` + fmt.Sprintf("%v", this.Format) + `,`, - `}`, - }, "") - return s -} -func (this *Submit) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Submit{`, - `WorkflowTemplateRef:` + strings.Replace(strings.Replace(this.WorkflowTemplateRef.String(), "WorkflowTemplateRef", "WorkflowTemplateRef", 1), `&`, ``, 1) + `,`, - `Arguments:` + strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1) + `,`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SubmitOpts) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubmitOpts{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, - `Entrypoint:` + fmt.Sprintf("%v", this.Entrypoint) + `,`, - `Parameters:` + fmt.Sprintf("%v", this.Parameters) + `,`, - `ServiceAccount:` + fmt.Sprintf("%v", this.ServiceAccount) + `,`, - `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, - `ServerDryRun:` + fmt.Sprintf("%v", this.ServerDryRun) + `,`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `OwnerReference:` + strings.Replace(fmt.Sprintf("%v", this.OwnerReference), "OwnerReference", "v11.OwnerReference", 1) + `,`, - `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, - `PodPriorityClassName:` + fmt.Sprintf("%v", this.PodPriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `}`, - }, "") - return s -} -func (this *SuppliedValueFrom) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SuppliedValueFrom{`, - `}`, - }, "") - return s -} -func (this *SuspendTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SuspendTemplate{`, - `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, - `}`, - }, "") - return s -} -func (this *Synchronization) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Synchronization{`, - `Semaphore:` + strings.Replace(this.Semaphore.String(), "SemaphoreRef", "SemaphoreRef", 1) + `,`, - `Mutex:` + strings.Replace(this.Mutex.String(), "Mutex", "Mutex", 1) + `,`, - `}`, - }, "") - return s -} -func (this *SynchronizationStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SynchronizationStatus{`, - `Semaphore:` + strings.Replace(this.Semaphore.String(), "SemaphoreStatus", "SemaphoreStatus", 1) + `,`, - `Mutex:` + strings.Replace(this.Mutex.String(), "MutexStatus", "MutexStatus", 1) + `,`, - `}`, - }, "") - return s -} -func (this *TTLStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TTLStrategy{`, - `SecondsAfterCompletion:` + valueToStringGenerated(this.SecondsAfterCompletion) + `,`, - `SecondsAfterSuccess:` + valueToStringGenerated(this.SecondsAfterSuccess) + `,`, - `SecondsAfterFailure:` + valueToStringGenerated(this.SecondsAfterFailure) + `,`, - `}`, - }, "") - return s -} -func (this *TarStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TarStrategy{`, - `CompressionLevel:` + valueToStringGenerated(this.CompressionLevel) + `,`, - `}`, - }, "") - return s -} -func (this *Template) String() string { - if this == nil { - return "nil" - } - repeatedStringForSteps := "[]ParallelSteps{" - for _, f := range this.Steps { - repeatedStringForSteps += strings.Replace(strings.Replace(f.String(), "ParallelSteps", "ParallelSteps", 1), `&`, ``, 1) + "," - } - repeatedStringForSteps += "}" - repeatedStringForVolumes := "[]Volume{" - for _, f := range this.Volumes { - repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumes += "}" - repeatedStringForInitContainers := "[]UserContainer{" - for _, f := range this.InitContainers { - repeatedStringForInitContainers += strings.Replace(strings.Replace(f.String(), "UserContainer", "UserContainer", 1), `&`, ``, 1) + "," - } - repeatedStringForInitContainers += "}" - repeatedStringForSidecars := "[]UserContainer{" - for _, f := range this.Sidecars { - repeatedStringForSidecars += strings.Replace(strings.Replace(f.String(), "UserContainer", "UserContainer", 1), `&`, ``, 1) + "," - } - repeatedStringForSidecars += "}" - repeatedStringForTolerations := "[]Toleration{" - for _, f := range this.Tolerations { - repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," - } - repeatedStringForTolerations += "}" - repeatedStringForHostAliases := "[]HostAlias{" - for _, f := range this.HostAliases { - repeatedStringForHostAliases += fmt.Sprintf("%v", f) + "," - } - repeatedStringForHostAliases += "}" - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) - } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&Template{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Inputs:` + strings.Replace(strings.Replace(this.Inputs.String(), "Inputs", "Inputs", 1), `&`, ``, 1) + `,`, - `Outputs:` + strings.Replace(strings.Replace(this.Outputs.String(), "Outputs", "Outputs", 1), `&`, ``, 1) + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, - `Metadata:` + strings.Replace(strings.Replace(this.Metadata.String(), "Metadata", "Metadata", 1), `&`, ``, 1) + `,`, - `Daemon:` + valueToStringGenerated(this.Daemon) + `,`, - `Steps:` + repeatedStringForSteps + `,`, - `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "v1.Container", 1) + `,`, - `Script:` + strings.Replace(this.Script.String(), "ScriptTemplate", "ScriptTemplate", 1) + `,`, - `Resource:` + strings.Replace(this.Resource.String(), "ResourceTemplate", "ResourceTemplate", 1) + `,`, - `DAG:` + strings.Replace(this.DAG.String(), "DAGTemplate", "DAGTemplate", 1) + `,`, - `Suspend:` + strings.Replace(this.Suspend.String(), "SuspendTemplate", "SuspendTemplate", 1) + `,`, - `Volumes:` + repeatedStringForVolumes + `,`, - `InitContainers:` + repeatedStringForInitContainers + `,`, - `Sidecars:` + repeatedStringForSidecars + `,`, - `ArchiveLocation:` + strings.Replace(this.ArchiveLocation.String(), "ArtifactLocation", "ArtifactLocation", 1) + `,`, - `ActiveDeadlineSeconds:` + strings.Replace(fmt.Sprintf("%v", this.ActiveDeadlineSeconds), "IntOrString", "intstr.IntOrString", 1) + `,`, - `RetryStrategy:` + strings.Replace(this.RetryStrategy.String(), "RetryStrategy", "RetryStrategy", 1) + `,`, - `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, - `Tolerations:` + repeatedStringForTolerations + `,`, - `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, - `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `HostAliases:` + repeatedStringForHostAliases + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, - `PodSpecPatch:` + fmt.Sprintf("%v", this.PodSpecPatch) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `Executor:` + strings.Replace(this.Executor.String(), "ExecutorConfig", "ExecutorConfig", 1) + `,`, - `Metrics:` + strings.Replace(this.Metrics.String(), "Metrics", "Metrics", 1) + `,`, - `Synchronization:` + strings.Replace(this.Synchronization.String(), "Synchronization", "Synchronization", 1) + `,`, - `Memoize:` + strings.Replace(this.Memoize.String(), "Memoize", "Memoize", 1) + `,`, - `Timeout:` + fmt.Sprintf("%v", this.Timeout) + `,`, - `Data:` + strings.Replace(this.Data.String(), "Data", "Data", 1) + `,`, - `ContainerSet:` + strings.Replace(this.ContainerSet.String(), "ContainerSetTemplate", "ContainerSetTemplate", 1) + `,`, - `FailFast:` + valueToStringGenerated(this.FailFast) + `,`, - `HTTP:` + strings.Replace(this.HTTP.String(), "HTTP", "HTTP", 1) + `,`, - `Plugin:` + strings.Replace(this.Plugin.String(), "Plugin", "Plugin", 1) + `,`, - `}`, - }, "") - return s -} -func (this *TemplateRef) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TemplateRef{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Template:` + fmt.Sprintf("%v", this.Template) + `,`, - `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, - `}`, - }, "") - return s -} -func (this *TransformationStep) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TransformationStep{`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s -} -func (this *UserContainer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UserContainer{`, - `Container:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "v1.Container", 1), `&`, ``, 1) + `,`, - `MirrorVolumeMounts:` + valueToStringGenerated(this.MirrorVolumeMounts) + `,`, - `}`, - }, "") - return s -} -func (this *ValueFrom) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ValueFrom{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, - `JQFilter:` + fmt.Sprintf("%v", this.JQFilter) + `,`, - `Parameter:` + fmt.Sprintf("%v", this.Parameter) + `,`, - `Default:` + valueToStringGenerated(this.Default) + `,`, - `Supplied:` + strings.Replace(this.Supplied.String(), "SuppliedValueFrom", "SuppliedValueFrom", 1) + `,`, - `Event:` + fmt.Sprintf("%v", this.Event) + `,`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "v1.ConfigMapKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Version) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Version{`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `BuildDate:` + fmt.Sprintf("%v", this.BuildDate) + `,`, - `GitCommit:` + fmt.Sprintf("%v", this.GitCommit) + `,`, - `GitTag:` + fmt.Sprintf("%v", this.GitTag) + `,`, - `GitTreeState:` + fmt.Sprintf("%v", this.GitTreeState) + `,`, - `GoVersion:` + fmt.Sprintf("%v", this.GoVersion) + `,`, - `Compiler:` + fmt.Sprintf("%v", this.Compiler) + `,`, - `Platform:` + fmt.Sprintf("%v", this.Platform) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeClaimGC) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeClaimGC{`, - `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, - `}`, - }, "") - return s -} -func (this *Workflow) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Workflow{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowSpec", "WorkflowSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "WorkflowStatus", "WorkflowStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowArtifactGCTask) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WorkflowArtifactGCTask{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ArtifactGCSpec", "ArtifactGCSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ArtifactGCStatus", "ArtifactGCStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowArtifactGCTaskList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]WorkflowArtifactGCTask{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "WorkflowArtifactGCTask", "WorkflowArtifactGCTask", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&WorkflowArtifactGCTaskList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowEventBinding) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WorkflowEventBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowEventBindingSpec", "WorkflowEventBindingSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowEventBindingList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]WorkflowEventBinding{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "WorkflowEventBinding", "WorkflowEventBinding", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&WorkflowEventBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowEventBindingSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WorkflowEventBindingSpec{`, - `Event:` + strings.Replace(strings.Replace(this.Event.String(), "Event", "Event", 1), `&`, ``, 1) + `,`, - `Submit:` + strings.Replace(this.Submit.String(), "Submit", "Submit", 1) + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowLevelArtifactGC) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WorkflowLevelArtifactGC{`, - `ArtifactGC:` + strings.Replace(strings.Replace(this.ArtifactGC.String(), "ArtifactGC", "ArtifactGC", 1), `&`, ``, 1) + `,`, - `ForceFinalizerRemoval:` + fmt.Sprintf("%v", this.ForceFinalizerRemoval) + `,`, - `PodSpecPatch:` + fmt.Sprintf("%v", this.PodSpecPatch) + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Workflow{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Workflow", "Workflow", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&WorkflowList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowMetadata) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - keysForLabelsFrom := make([]string, 0, len(this.LabelsFrom)) - for k := range this.LabelsFrom { - keysForLabelsFrom = append(keysForLabelsFrom, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabelsFrom) - mapStringForLabelsFrom := "map[string]LabelValueFrom{" - for _, k := range keysForLabelsFrom { - mapStringForLabelsFrom += fmt.Sprintf("%v: %v,", k, this.LabelsFrom[k]) - } - mapStringForLabelsFrom += "}" - s := strings.Join([]string{`&WorkflowMetadata{`, - `Labels:` + mapStringForLabels + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `LabelsFrom:` + mapStringForLabelsFrom + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForTemplates := "[]Template{" - for _, f := range this.Templates { - repeatedStringForTemplates += strings.Replace(strings.Replace(f.String(), "Template", "Template", 1), `&`, ``, 1) + "," - } - repeatedStringForTemplates += "}" - repeatedStringForVolumes := "[]Volume{" - for _, f := range this.Volumes { - repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumes += "}" - repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" - for _, f := range this.VolumeClaimTemplates { - repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumeClaimTemplates += "}" - repeatedStringForTolerations := "[]Toleration{" - for _, f := range this.Tolerations { - repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," - } - repeatedStringForTolerations += "}" - repeatedStringForImagePullSecrets := "[]LocalObjectReference{" - for _, f := range this.ImagePullSecrets { - repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," - } - repeatedStringForImagePullSecrets += "}" - repeatedStringForHostAliases := "[]HostAlias{" - for _, f := range this.HostAliases { - repeatedStringForHostAliases += fmt.Sprintf("%v", f) + "," - } - repeatedStringForHostAliases += "}" - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) - } - mapStringForNodeSelector += "}" - keysForHooks := make([]string, 0, len(this.Hooks)) - for k := range this.Hooks { - keysForHooks = append(keysForHooks, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHooks) - mapStringForHooks := "LifecycleHooks{" - for _, k := range keysForHooks { - mapStringForHooks += fmt.Sprintf("%v: %v,", k, this.Hooks[LifecycleEvent(k)]) - } - mapStringForHooks += "}" - s := strings.Join([]string{`&WorkflowSpec{`, - `Templates:` + repeatedStringForTemplates + `,`, - `Entrypoint:` + fmt.Sprintf("%v", this.Entrypoint) + `,`, - `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `Volumes:` + repeatedStringForVolumes + `,`, - `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, - `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, - `ArtifactRepositoryRef:` + strings.Replace(fmt.Sprintf("%v", this.ArtifactRepositoryRef), "ArtifactRepositoryRef", "ArtifactRepositoryRef", 1) + `,`, - `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, - `Tolerations:` + repeatedStringForTolerations + `,`, - `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, - `HostNetwork:` + valueToStringGenerated(this.HostNetwork) + `,`, - `DNSPolicy:` + valueToStringGenerated(this.DNSPolicy) + `,`, - `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "v1.PodDNSConfig", 1) + `,`, - `OnExit:` + fmt.Sprintf("%v", this.OnExit) + `,`, - `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, - `PodGC:` + strings.Replace(this.PodGC.String(), "PodGC", "PodGC", 1) + `,`, - `PodPriorityClassName:` + fmt.Sprintf("%v", this.PodPriorityClassName) + `,`, - `PodPriority:` + valueToStringGenerated(this.PodPriority) + `,`, - `HostAliases:` + repeatedStringForHostAliases + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, - `PodSpecPatch:` + fmt.Sprintf("%v", this.PodSpecPatch) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `Executor:` + strings.Replace(this.Executor.String(), "ExecutorConfig", "ExecutorConfig", 1) + `,`, - `TTLStrategy:` + strings.Replace(this.TTLStrategy.String(), "TTLStrategy", "TTLStrategy", 1) + `,`, - `PodDisruptionBudget:` + strings.Replace(fmt.Sprintf("%v", this.PodDisruptionBudget), "PodDisruptionBudgetSpec", "v12.PodDisruptionBudgetSpec", 1) + `,`, - `Metrics:` + strings.Replace(this.Metrics.String(), "Metrics", "Metrics", 1) + `,`, - `Shutdown:` + fmt.Sprintf("%v", this.Shutdown) + `,`, - `WorkflowTemplateRef:` + strings.Replace(this.WorkflowTemplateRef.String(), "WorkflowTemplateRef", "WorkflowTemplateRef", 1) + `,`, - `Synchronization:` + strings.Replace(this.Synchronization.String(), "Synchronization", "Synchronization", 1) + `,`, - `VolumeClaimGC:` + strings.Replace(this.VolumeClaimGC.String(), "VolumeClaimGC", "VolumeClaimGC", 1) + `,`, - `RetryStrategy:` + strings.Replace(this.RetryStrategy.String(), "RetryStrategy", "RetryStrategy", 1) + `,`, - `PodMetadata:` + strings.Replace(this.PodMetadata.String(), "Metadata", "Metadata", 1) + `,`, - `TemplateDefaults:` + strings.Replace(this.TemplateDefaults.String(), "Template", "Template", 1) + `,`, - `ArchiveLogs:` + valueToStringGenerated(this.ArchiveLogs) + `,`, - `Hooks:` + mapStringForHooks + `,`, - `WorkflowMetadata:` + strings.Replace(this.WorkflowMetadata.String(), "WorkflowMetadata", "WorkflowMetadata", 1) + `,`, - `ArtifactGC:` + strings.Replace(this.ArtifactGC.String(), "WorkflowLevelArtifactGC", "WorkflowLevelArtifactGC", 1) + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForPersistentVolumeClaims := "[]Volume{" - for _, f := range this.PersistentVolumeClaims { - repeatedStringForPersistentVolumeClaims += fmt.Sprintf("%v", f) + "," - } - repeatedStringForPersistentVolumeClaims += "}" - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "Condition", "Condition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - keysForNodes := make([]string, 0, len(this.Nodes)) - for k := range this.Nodes { - keysForNodes = append(keysForNodes, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodes) - mapStringForNodes := "Nodes{" - for _, k := range keysForNodes { - mapStringForNodes += fmt.Sprintf("%v: %v,", k, this.Nodes[k]) - } - mapStringForNodes += "}" - keysForStoredTemplates := make([]string, 0, len(this.StoredTemplates)) - for k := range this.StoredTemplates { - keysForStoredTemplates = append(keysForStoredTemplates, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForStoredTemplates) - mapStringForStoredTemplates := "map[string]Template{" - for _, k := range keysForStoredTemplates { - mapStringForStoredTemplates += fmt.Sprintf("%v: %v,", k, this.StoredTemplates[k]) - } - mapStringForStoredTemplates += "}" - keysForResourcesDuration := make([]string, 0, len(this.ResourcesDuration)) - for k := range this.ResourcesDuration { - keysForResourcesDuration = append(keysForResourcesDuration, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForResourcesDuration) - mapStringForResourcesDuration := "ResourcesDuration{" - for _, k := range keysForResourcesDuration { - mapStringForResourcesDuration += fmt.Sprintf("%v: %v,", k, this.ResourcesDuration[k8s_io_api_core_v1.ResourceName(k)]) - } - mapStringForResourcesDuration += "}" - keysForTaskResultsCompletionStatus := make([]string, 0, len(this.TaskResultsCompletionStatus)) - for k := range this.TaskResultsCompletionStatus { - keysForTaskResultsCompletionStatus = append(keysForTaskResultsCompletionStatus, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForTaskResultsCompletionStatus) - mapStringForTaskResultsCompletionStatus := "map[string]bool{" - for _, k := range keysForTaskResultsCompletionStatus { - mapStringForTaskResultsCompletionStatus += fmt.Sprintf("%v: %v,", k, this.TaskResultsCompletionStatus[k]) - } - mapStringForTaskResultsCompletionStatus += "}" - s := strings.Join([]string{`&WorkflowStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, - `FinishedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FinishedAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `CompressedNodes:` + fmt.Sprintf("%v", this.CompressedNodes) + `,`, - `Nodes:` + mapStringForNodes + `,`, - `PersistentVolumeClaims:` + repeatedStringForPersistentVolumeClaims + `,`, - `Outputs:` + strings.Replace(this.Outputs.String(), "Outputs", "Outputs", 1) + `,`, - `StoredTemplates:` + mapStringForStoredTemplates + `,`, - `OffloadNodeStatusVersion:` + fmt.Sprintf("%v", this.OffloadNodeStatusVersion) + `,`, - `ResourcesDuration:` + mapStringForResourcesDuration + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `StoredWorkflowSpec:` + strings.Replace(this.StoredWorkflowSpec.String(), "WorkflowSpec", "WorkflowSpec", 1) + `,`, - `Synchronization:` + strings.Replace(this.Synchronization.String(), "SynchronizationStatus", "SynchronizationStatus", 1) + `,`, - `EstimatedDuration:` + fmt.Sprintf("%v", this.EstimatedDuration) + `,`, - `Progress:` + fmt.Sprintf("%v", this.Progress) + `,`, - `ArtifactRepositoryRef:` + strings.Replace(fmt.Sprintf("%v", this.ArtifactRepositoryRef), "ArtifactRepositoryRefStatus", "ArtifactRepositoryRefStatus", 1) + `,`, - `ArtifactGCStatus:` + strings.Replace(this.ArtifactGCStatus.String(), "ArtGCStatus", "ArtGCStatus", 1) + `,`, - `TaskResultsCompletionStatus:` + mapStringForTaskResultsCompletionStatus + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowStep) String() string { - if this == nil { - return "nil" - } - repeatedStringForWithItems := "[]Item{" - for _, f := range this.WithItems { - repeatedStringForWithItems += fmt.Sprintf("%v", f) + "," - } - repeatedStringForWithItems += "}" - keysForHooks := make([]string, 0, len(this.Hooks)) - for k := range this.Hooks { - keysForHooks = append(keysForHooks, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHooks) - mapStringForHooks := "LifecycleHooks{" - for _, k := range keysForHooks { - mapStringForHooks += fmt.Sprintf("%v: %v,", k, this.Hooks[LifecycleEvent(k)]) - } - mapStringForHooks += "}" - s := strings.Join([]string{`&WorkflowStep{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Template:` + fmt.Sprintf("%v", this.Template) + `,`, - `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, - `TemplateRef:` + strings.Replace(this.TemplateRef.String(), "TemplateRef", "TemplateRef", 1) + `,`, - `WithItems:` + repeatedStringForWithItems + `,`, - `WithParam:` + fmt.Sprintf("%v", this.WithParam) + `,`, - `WithSequence:` + strings.Replace(this.WithSequence.String(), "Sequence", "Sequence", 1) + `,`, - `When:` + fmt.Sprintf("%v", this.When) + `,`, - `ContinueOn:` + strings.Replace(this.ContinueOn.String(), "ContinueOn", "ContinueOn", 1) + `,`, - `OnExit:` + fmt.Sprintf("%v", this.OnExit) + `,`, - `Hooks:` + mapStringForHooks + `,`, - `Inline:` + strings.Replace(this.Inline.String(), "Template", "Template", 1) + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowTaskResult) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WorkflowTaskResult{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `NodeResult:` + strings.Replace(strings.Replace(this.NodeResult.String(), "NodeResult", "NodeResult", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowTaskResultList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]WorkflowTaskResult{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "WorkflowTaskResult", "WorkflowTaskResult", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&WorkflowTaskResultList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowTaskSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WorkflowTaskSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowTaskSetSpec", "WorkflowTaskSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "WorkflowTaskSetStatus", "WorkflowTaskSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowTaskSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]WorkflowTaskSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "WorkflowTaskSet", "WorkflowTaskSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&WorkflowTaskSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowTaskSetSpec) String() string { - if this == nil { - return "nil" - } - keysForTasks := make([]string, 0, len(this.Tasks)) - for k := range this.Tasks { - keysForTasks = append(keysForTasks, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForTasks) - mapStringForTasks := "map[string]Template{" - for _, k := range keysForTasks { - mapStringForTasks += fmt.Sprintf("%v: %v,", k, this.Tasks[k]) - } - mapStringForTasks += "}" - s := strings.Join([]string{`&WorkflowTaskSetSpec{`, - `Tasks:` + mapStringForTasks + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowTaskSetStatus) String() string { - if this == nil { - return "nil" - } - keysForNodes := make([]string, 0, len(this.Nodes)) - for k := range this.Nodes { - keysForNodes = append(keysForNodes, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodes) - mapStringForNodes := "map[string]NodeResult{" - for _, k := range keysForNodes { - mapStringForNodes += fmt.Sprintf("%v: %v,", k, this.Nodes[k]) - } - mapStringForNodes += "}" - s := strings.Join([]string{`&WorkflowTaskSetStatus{`, - `Nodes:` + mapStringForNodes + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WorkflowTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowSpec", "WorkflowSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowTemplateList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]WorkflowTemplate{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "WorkflowTemplate", "WorkflowTemplate", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&WorkflowTemplateList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *WorkflowTemplateRef) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WorkflowTemplateRef{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, - `}`, - }, "") - return s -} -func (this *ZipStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ZipStrategy{`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Amount) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Amount: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Amount: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = encoding_json.Number(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArchiveStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArchiveStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArchiveStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tar", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tar == nil { - m.Tar = &TarStrategy{} - } - if err := m.Tar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field None", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.None == nil { - m.None = &NoneStrategy{} - } - if err := m.None.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Zip", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Zip == nil { - m.Zip = &ZipStrategy{} - } - if err := m.Zip.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Arguments) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Arguments: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Arguments: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parameters = append(m.Parameters, Parameter{}) - if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Artifacts = append(m.Artifacts, Artifact{}) - if err := m.Artifacts[len(m.Artifacts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtGCStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtGCStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtGCStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StrategiesProcessed", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StrategiesProcessed == nil { - m.StrategiesProcessed = make(map[ArtifactGCStrategy]bool) - } - var mapkey ArtifactGCStrategy - var mapvalue bool - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ArtifactGCStrategy(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvaluetemp |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - mapvalue = bool(mapvaluetemp != 0) - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.StrategiesProcessed[ArtifactGCStrategy(mapkey)] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodsRecouped", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodsRecouped == nil { - m.PodsRecouped = make(map[string]bool) - } - var mapkey string - var mapvalue bool - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvaluetemp |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - mapvalue = bool(mapvaluetemp != 0) - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.PodsRecouped[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NotSpecified", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NotSpecified = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Artifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Artifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Artifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Mode = &v - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.From = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactLocation", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ArtifactLocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GlobalName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GlobalName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Archive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Archive == nil { - m.Archive = &ArchiveStrategy{} - } - if err := m.Archive.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Optional = bool(v != 0) - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RecurseMode", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RecurseMode = bool(v != 0) - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FromExpression", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FromExpression = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactGC", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArtifactGC == nil { - m.ArtifactGC = &ArtifactGC{} - } - if err := m.ArtifactGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Deleted = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactGC) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactGC: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactGC: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Strategy = ArtifactGCStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodMetadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodMetadata == nil { - m.PodMetadata = &Metadata{} - } - if err := m.PodMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactGCSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactGCSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactGCSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactsByNode", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArtifactsByNode == nil { - m.ArtifactsByNode = make(map[string]ArtifactNodeSpec) - } - var mapkey string - mapvalue := &ArtifactNodeSpec{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ArtifactNodeSpec{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ArtifactsByNode[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactGCStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactGCStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactGCStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactResultsByNode", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArtifactResultsByNode == nil { - m.ArtifactResultsByNode = make(map[string]ArtifactResultNodeStatus) - } - var mapkey string - mapvalue := &ArtifactResultNodeStatus{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ArtifactResultNodeStatus{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ArtifactResultsByNode[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactLocation) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactLocation: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactLocation: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ArchiveLogs", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ArchiveLogs = &b - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field S3", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.S3 == nil { - m.S3 = &S3Artifact{} - } - if err := m.S3.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Git == nil { - m.Git = &GitArtifact{} - } - if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HTTP == nil { - m.HTTP = &HTTPArtifact{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Artifactory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Artifactory == nil { - m.Artifactory = &ArtifactoryArtifact{} - } - if err := m.Artifactory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HDFS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HDFS == nil { - m.HDFS = &HDFSArtifact{} - } - if err := m.HDFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Raw == nil { - m.Raw = &RawArtifact{} - } - if err := m.Raw.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OSS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.OSS == nil { - m.OSS = &OSSArtifact{} - } - if err := m.OSS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GCS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.GCS == nil { - m.GCS = &GCSArtifact{} - } - if err := m.GCS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Azure", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Azure == nil { - m.Azure = &AzureArtifact{} - } - if err := m.Azure.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactNodeSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactNodeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactNodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArchiveLocation", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArchiveLocation == nil { - m.ArchiveLocation = &ArtifactLocation{} - } - if err := m.ArchiveLocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Artifacts == nil { - m.Artifacts = make(map[string]Artifact) - } - var mapkey string - mapvalue := &Artifact{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Artifact{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Artifacts[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactPaths) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactPaths: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactPaths: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Artifact", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Artifact.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactRepository) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactRepository: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ArchiveLogs", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ArchiveLogs = &b - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field S3", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.S3 == nil { - m.S3 = &S3ArtifactRepository{} - } - if err := m.S3.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Artifactory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Artifactory == nil { - m.Artifactory = &ArtifactoryArtifactRepository{} - } - if err := m.Artifactory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HDFS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HDFS == nil { - m.HDFS = &HDFSArtifactRepository{} - } - if err := m.HDFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OSS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.OSS == nil { - m.OSS = &OSSArtifactRepository{} - } - if err := m.OSS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GCS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.GCS == nil { - m.GCS = &GCSArtifactRepository{} - } - if err := m.GCS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Azure", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Azure == nil { - m.Azure = &AzureArtifactRepository{} - } - if err := m.Azure.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactRepositoryRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactRepositoryRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactRepositoryRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConfigMap = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactRepositoryRefStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactRepositoryRefStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactRepositoryRefStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactRepositoryRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ArtifactRepositoryRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Default = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactRepository", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArtifactRepository == nil { - m.ArtifactRepository = &ArtifactRepository{} - } - if err := m.ArtifactRepository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Success = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Error = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactResultNodeStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactResultNodeStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactResultNodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactResults", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArtifactResults == nil { - m.ArtifactResults = make(map[string]ArtifactResult) - } - var mapkey string - mapvalue := &ArtifactResult{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ArtifactResult{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ArtifactResults[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactSearchQuery) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactSearchQuery: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactSearchQuery: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactGCStrategies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArtifactGCStrategies == nil { - m.ArtifactGCStrategies = make(map[ArtifactGCStrategy]bool) - } - var mapkey ArtifactGCStrategy - var mapvalue bool - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ArtifactGCStrategy(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvaluetemp |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - mapvalue = bool(mapvaluetemp != 0) - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ArtifactGCStrategies[ArtifactGCStrategy(mapkey)] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ArtifactName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TemplateName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Deleted = &b - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeTypes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeTypes == nil { - m.NodeTypes = make(map[NodeType]bool) - } - var mapkey NodeType - var mapvalue bool - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = NodeType(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvaluetemp |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - mapvalue = bool(mapvaluetemp != 0) - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.NodeTypes[NodeType(mapkey)] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactSearchResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactSearchResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactSearchResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Artifact", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Artifact.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactoryArtifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactoryArtifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactoryArtifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactoryAuth", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ArtifactoryAuth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactoryArtifactRepository) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactoryArtifactRepository: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactoryArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactoryAuth", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ArtifactoryAuth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RepoURL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RepoURL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyFormat", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KeyFormat = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactoryAuth) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactoryAuth: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactoryAuth: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UsernameSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UsernameSecret == nil { - m.UsernameSecret = &v1.SecretKeySelector{} - } - if err := m.UsernameSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PasswordSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PasswordSecret == nil { - m.PasswordSecret = &v1.SecretKeySelector{} - } - if err := m.PasswordSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureArtifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureArtifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureArtifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AzureBlobContainer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.AzureBlobContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Blob = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureArtifactRepository) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureArtifactRepository: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AzureBlobContainer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.AzureBlobContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlobNameFormat", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BlobNameFormat = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureBlobContainer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureBlobContainer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureBlobContainer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Endpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Container = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccountKeySecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AccountKeySecret == nil { - m.AccountKeySecret = &v1.SecretKeySelector{} - } - if err := m.AccountKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UseSDKCreds", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.UseSDKCreds = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Backoff) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Backoff: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Backoff: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Duration = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Factor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Factor == nil { - m.Factor = &intstr.IntOrString{} - } - if err := m.Factor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxDuration", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MaxDuration = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BasicAuth) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BasicAuth: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BasicAuth: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UsernameSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UsernameSecret == nil { - m.UsernameSecret = &v1.SecretKeySelector{} - } - if err := m.UsernameSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PasswordSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PasswordSecret == nil { - m.PasswordSecret = &v1.SecretKeySelector{} - } - if err := m.PasswordSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Cache) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Cache: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Cache: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMap == nil { - m.ConfigMap = &v1.ConfigMapKeySelector{} - } - if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientCertAuth) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientCertAuth: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientCertAuth: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCertSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ClientCertSecret == nil { - m.ClientCertSecret = &v1.SecretKeySelector{} - } - if err := m.ClientCertSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientKeySecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ClientKeySecret == nil { - m.ClientKeySecret = &v1.SecretKeySelector{} - } - if err := m.ClientKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterWorkflowTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterWorkflowTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterWorkflowTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterWorkflowTemplateList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterWorkflowTemplateList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterWorkflowTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ClusterWorkflowTemplate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Column) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Column: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Column: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Condition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Condition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Condition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_apimachinery_pkg_apis_meta_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerNode) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerNode: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerNode: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Dependencies = append(m.Dependencies, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerSetRetryStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerSetRetryStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerSetRetryStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Duration = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Retries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Retries == nil { - m.Retries = &intstr.IntOrString{} - } - if err := m.Retries.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerSetTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerSetTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerSetTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeMounts = append(m.VolumeMounts, v1.VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Containers = append(m.Containers, ContainerNode{}) - if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RetryStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RetryStrategy == nil { - m.RetryStrategy = &ContainerSetRetryStrategy{} - } - if err := m.RetryStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContinueOn) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContinueOn: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContinueOn: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Error = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Failed = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Counter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Counter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateS3BucketOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateS3BucketOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateS3BucketOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectLocking", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ObjectLocking = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronWorkflow) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronWorkflow: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronWorkflow: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronWorkflowList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronWorkflowList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronWorkflowList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CronWorkflow{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronWorkflowSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronWorkflowSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronWorkflowSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSpec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.WorkflowSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schedule = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConcurrencyPolicy = ConcurrencyPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Suspend = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.StartingDeadlineSeconds = &v - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SuccessfulJobsHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.FailedJobsHistoryLimit = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timezone", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timezone = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowMetadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.WorkflowMetadata == nil { - m.WorkflowMetadata = &v11.ObjectMeta{} - } - if err := m.WorkflowMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronWorkflowStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronWorkflowStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronWorkflowStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Active = append(m.Active, v1.ObjectReference{}) - if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScheduledTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastScheduledTime == nil { - m.LastScheduledTime = &v11.Time{} - } - if err := m.LastScheduledTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DAGTask) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DAGTask: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DAGTask: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Template = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TemplateRef == nil { - m.TemplateRef = &TemplateRef{} - } - if err := m.TemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Dependencies = append(m.Dependencies, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WithItems", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WithItems = append(m.WithItems, Item{}) - if err := m.WithItems[len(m.WithItems)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WithParam", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WithParam = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WithSequence", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.WithSequence == nil { - m.WithSequence = &Sequence{} - } - if err := m.WithSequence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field When", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.When = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContinueOn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ContinueOn == nil { - m.ContinueOn = &ContinueOn{} - } - if err := m.ContinueOn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OnExit", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OnExit = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Depends", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Depends = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hooks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Hooks == nil { - m.Hooks = make(LifecycleHooks) - } - var mapkey LifecycleEvent - mapvalue := &LifecycleHook{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = LifecycleEvent(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &LifecycleHook{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Hooks[LifecycleEvent(mapkey)] = *mapvalue - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inline", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Inline == nil { - m.Inline = &Template{} - } - if err := m.Inline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DAGTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DAGTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DAGTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Target = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tasks = append(m.Tasks, DAGTask{}) - if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FailFast", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.FailFast = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Data) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Data: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Data: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Transformation", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Transformation = append(m.Transformation, TransformationStep{}) - if err := m.Transformation[len(m.Transformation)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DataSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DataSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DataSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactPaths", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArtifactPaths == nil { - m.ArtifactPaths = &ArtifactPaths{} - } - if err := m.ArtifactPaths.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Selector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecutorConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecutorConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecutorConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GCSArtifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GCSArtifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GCSArtifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GCSBucket", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.GCSBucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GCSArtifactRepository) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GCSArtifactRepository: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GCSArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GCSBucket", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.GCSBucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyFormat", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KeyFormat = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GCSBucket) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GCSBucket: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GCSBucket: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bucket = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountKeySecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ServiceAccountKeySecret == nil { - m.ServiceAccountKeySecret = &v1.SecretKeySelector{} - } - if err := m.ServiceAccountKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Gauge) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Gauge: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Realtime", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Realtime = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operation = GaugeOperation(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GitArtifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GitArtifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GitArtifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Repo", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Repo = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Revision = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Depth", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Depth = &v - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fetch", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Fetch = append(m.Fetch, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UsernameSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UsernameSecret == nil { - m.UsernameSecret = &v1.SecretKeySelector{} - } - if err := m.UsernameSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PasswordSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PasswordSecret == nil { - m.PasswordSecret = &v1.SecretKeySelector{} - } - if err := m.PasswordSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SSHPrivateKeySecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SSHPrivateKeySecret == nil { - m.SSHPrivateKeySecret = &v1.SecretKeySelector{} - } - if err := m.SSHPrivateKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InsecureIgnoreHostKey", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.InsecureIgnoreHostKey = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisableSubmodules", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DisableSubmodules = bool(v != 0) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SingleBranch", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SingleBranch = bool(v != 0) - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Branch", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Branch = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HDFSArtifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HDFSArtifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HDFSArtifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HDFSConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.HDFSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Force = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HDFSArtifactRepository) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HDFSArtifactRepository: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HDFSArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HDFSConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.HDFSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathFormat", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathFormat = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Force = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HDFSConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HDFSConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HDFSConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HDFSKrbConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.HDFSKrbConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HDFSUser", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HDFSUser = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HDFSKrbConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HDFSKrbConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HDFSKrbConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KrbCCacheSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KrbCCacheSecret == nil { - m.KrbCCacheSecret = &v1.SecretKeySelector{} - } - if err := m.KrbCCacheSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KrbKeytabSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KrbKeytabSecret == nil { - m.KrbKeytabSecret = &v1.SecretKeySelector{} - } - if err := m.KrbKeytabSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KrbUsername", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KrbUsername = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KrbRealm", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KrbRealm = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KrbConfigConfigMap", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KrbConfigConfigMap == nil { - m.KrbConfigConfigMap = &v1.ConfigMapKeySelector{} - } - if err := m.KrbConfigConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KrbServicePrincipalName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KrbServicePrincipalName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTP) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTP: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTP: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Method = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Headers = append(m.Headers, HTTPHeader{}) - if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Body = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SuccessCondition", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SuccessCondition = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipVerify", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.InsecureSkipVerify = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BodyFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BodyFrom == nil { - m.BodyFrom = &HTTPBodySource{} - } - if err := m.BodyFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPArtifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPArtifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPArtifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Headers = append(m.Headers, Header{}) - if err := m.Headers[len(m.Headers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Auth == nil { - m.Auth = &HTTPAuth{} - } - if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPAuth) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPAuth: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPAuth: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCert", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientCert.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OAuth2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.OAuth2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BasicAuth", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BasicAuth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPBodySource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPBodySource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPBodySource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...) - if m.Bytes == nil { - m.Bytes = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ValueFrom == nil { - m.ValueFrom = &HTTPHeaderSource{} - } - if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPHeaderSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPHeaderSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPHeaderSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretKeyRef == nil { - m.SecretKeyRef = &v1.SecretKeySelector{} - } - if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Header) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Header: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Histogram) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Histogram: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Buckets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Buckets = append(m.Buckets, Amount{}) - if err := m.Buckets[len(m.Buckets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Inputs) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Inputs: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Inputs: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parameters = append(m.Parameters, Parameter{}) - if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Artifacts = append(m.Artifacts, Artifact{}) - if err := m.Artifacts[len(m.Artifacts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Item) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Item: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Item: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelKeys) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelKeys: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelKeys: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelValueFrom) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelValueFrom: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValueFrom: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expression = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelValues) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelValues: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValues: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LifecycleHook) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LifecycleHook: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LifecycleHook: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Template = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TemplateRef == nil { - m.TemplateRef = &TemplateRef{} - } - if err := m.TemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expression = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Link) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Link: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ManifestFrom) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ManifestFrom: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ManifestFrom: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Artifact", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Artifact == nil { - m.Artifact = &Artifact{} - } - if err := m.Artifact.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoizationStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoizationStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoizationStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hit", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Hit = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CacheName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CacheName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Memoize) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Memoize: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Memoize: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Cache == nil { - m.Cache = &Cache{} - } - if err := m.Cache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxAge", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MaxAge = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricLabel) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricLabel: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricLabel: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prometheus", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Prometheus = append(m.Prometheus, &Prometheus{}) - if err := m.Prometheus[len(m.Prometheus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Mutex) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Mutex: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Mutex: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MutexHolding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MutexHolding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MutexHolding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mutex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mutex = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Holder", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Holder = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MutexStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MutexStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MutexStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Holding", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Holding = append(m.Holding, MutexHolding{}) - if err := m.Holding[len(m.Holding)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Waiting = append(m.Waiting, MutexHolding{}) - if err := m.Waiting[len(m.Waiting)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeFlag) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeFlag: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeFlag: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hooked", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Hooked = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Retried", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Retried = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = NodePhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Outputs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Outputs == nil { - m.Outputs = &Outputs{} - } - if err := m.Outputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Progress = Progress(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DisplayName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = NodeType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TemplateName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TemplateRef == nil { - m.TemplateRef = &TemplateRef{} - } - if err := m.TemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = NodePhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BoundaryID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BoundaryID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodIP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Daemoned", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Daemoned = &b - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Inputs == nil { - m.Inputs = &Inputs{} - } - if err := m.Inputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Outputs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Outputs == nil { - m.Outputs = &Outputs{} - } - if err := m.Outputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Children = append(m.Children, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OutboundNodes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OutboundNodes = append(m.OutboundNodes, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateScope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TemplateScope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourcesDuration", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourcesDuration == nil { - m.ResourcesDuration = make(ResourcesDuration) - } - var mapkey k8s_io_api_core_v1.ResourceName - var mapvalue int64 - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvalue |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ResourcesDuration[k8s_io_api_core_v1.ResourceName(mapkey)] = ((ResourceDuration)(mapvalue)) - iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNodeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostNodeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoizationStatus", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MemoizationStatus == nil { - m.MemoizationStatus = &MemoizationStatus{} - } - if err := m.MemoizationStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 24: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EstimatedDuration", wireType) - } - m.EstimatedDuration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EstimatedDuration |= EstimatedDuration(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 25: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SynchronizationStatus", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SynchronizationStatus == nil { - m.SynchronizationStatus = &NodeSynchronizationStatus{} - } - if err := m.SynchronizationStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 26: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Progress = Progress(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 27: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeFlag", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeFlag == nil { - m.NodeFlag = &NodeFlag{} - } - if err := m.NodeFlag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeSynchronizationStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeSynchronizationStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSynchronizationStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Waiting = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NoneStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NoneStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NoneStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OAuth2Auth) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OAuth2Auth: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OAuth2Auth: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientIDSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ClientIDSecret == nil { - m.ClientIDSecret = &v1.SecretKeySelector{} - } - if err := m.ClientIDSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientSecretSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ClientSecretSecret == nil { - m.ClientSecretSecret = &v1.SecretKeySelector{} - } - if err := m.ClientSecretSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TokenURLSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TokenURLSecret == nil { - m.TokenURLSecret = &v1.SecretKeySelector{} - } - if err := m.TokenURLSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndpointParams", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EndpointParams = append(m.EndpointParams, OAuth2EndpointParam{}) - if err := m.EndpointParams[len(m.EndpointParams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OAuth2EndpointParam) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OAuth2EndpointParam: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OAuth2EndpointParam: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OSSArtifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OSSArtifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OSSArtifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OSSBucket", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.OSSBucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OSSArtifactRepository) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OSSArtifactRepository: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OSSArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OSSBucket", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.OSSBucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyFormat", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KeyFormat = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OSSBucket) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OSSBucket: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OSSBucket: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Endpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bucket = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessKeySecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AccessKeySecret == nil { - m.AccessKeySecret = &v1.SecretKeySelector{} - } - if err := m.AccessKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretKeySecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretKeySecret == nil { - m.SecretKeySecret = &v1.SecretKeySelector{} - } - if err := m.SecretKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateBucketIfNotPresent", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CreateBucketIfNotPresent = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityToken", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecurityToken = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LifecycleRule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LifecycleRule == nil { - m.LifecycleRule = &OSSLifecycleRule{} - } - if err := m.LifecycleRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UseSDKCreds", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.UseSDKCreds = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OSSLifecycleRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OSSLifecycleRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OSSLifecycleRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MarkInfrequentAccessAfterDays", wireType) - } - m.MarkInfrequentAccessAfterDays = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MarkInfrequentAccessAfterDays |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MarkDeletionAfterDays", wireType) - } - m.MarkDeletionAfterDays = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MarkDeletionAfterDays |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Object) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Object: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Object: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Outputs) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Outputs: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Outputs: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parameters = append(m.Parameters, Parameter{}) - if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Artifacts = append(m.Artifacts, Artifact{}) - if err := m.Artifacts[len(m.Artifacts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Result = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ExitCode = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ParallelSteps) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ParallelSteps: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ParallelSteps: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Steps = append(m.Steps, WorkflowStep{}) - if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Parameter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Parameter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Parameter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AnyString(dAtA[iNdEx:postIndex]) - m.Default = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AnyString(dAtA[iNdEx:postIndex]) - m.Value = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ValueFrom == nil { - m.ValueFrom = &ValueFrom{} - } - if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GlobalName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GlobalName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Enum", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Enum = append(m.Enum, AnyString(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AnyString(dAtA[iNdEx:postIndex]) - m.Description = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Plugin) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Plugin: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Plugin: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodGC) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodGC: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodGC: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Strategy = PodGCStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LabelSelector == nil { - m.LabelSelector = &v11.LabelSelector{} - } - if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteDelayDuration", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeleteDelayDuration = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Prometheus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Prometheus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Prometheus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, &MetricLabel{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Help = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field When", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.When = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Gauge == nil { - m.Gauge = &Gauge{} - } - if err := m.Gauge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Histogram == nil { - m.Histogram = &Histogram{} - } - if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Counter == nil { - m.Counter = &Counter{} - } - if err := m.Counter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RawArtifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RawArtifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RawArtifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Action = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MergeStrategy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MergeStrategy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Manifest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Manifest = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SetOwnerReference", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SetOwnerReference = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SuccessCondition", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SuccessCondition = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailureCondition", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FailureCondition = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Flags = append(m.Flags, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ManifestFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ManifestFrom == nil { - m.ManifestFrom = &ManifestFrom{} - } - if err := m.ManifestFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RetryAffinity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RetryAffinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RetryAffinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeAntiAffinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeAntiAffinity == nil { - m.NodeAntiAffinity = &RetryNodeAntiAffinity{} - } - if err := m.NodeAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RetryNodeAntiAffinity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RetryNodeAntiAffinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RetryNodeAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RetryStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RetryStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RetryStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Limit == nil { - m.Limit = &intstr.IntOrString{} - } - if err := m.Limit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RetryPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RetryPolicy = RetryPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backoff", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Backoff == nil { - m.Backoff = &Backoff{} - } - if err := m.Backoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Affinity == nil { - m.Affinity = &RetryAffinity{} - } - if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expression = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *S3Artifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: S3Artifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: S3Artifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field S3Bucket", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.S3Bucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *S3ArtifactRepository) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: S3ArtifactRepository: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: S3ArtifactRepository: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field S3Bucket", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.S3Bucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyFormat", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KeyFormat = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KeyPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *S3Bucket) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: S3Bucket: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: S3Bucket: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Endpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bucket = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Region = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Insecure = &b - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessKeySecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AccessKeySecret == nil { - m.AccessKeySecret = &v1.SecretKeySelector{} - } - if err := m.AccessKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretKeySecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretKeySecret == nil { - m.SecretKeySecret = &v1.SecretKeySelector{} - } - if err := m.SecretKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleARN", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RoleARN = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UseSDKCreds", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.UseSDKCreds = bool(v != 0) - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateBucketIfNotPresent", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CreateBucketIfNotPresent == nil { - m.CreateBucketIfNotPresent = &CreateS3BucketOptions{} - } - if err := m.CreateBucketIfNotPresent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EncryptionOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.EncryptionOptions == nil { - m.EncryptionOptions = &S3EncryptionOptions{} - } - if err := m.EncryptionOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CASecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CASecret == nil { - m.CASecret = &v1.SecretKeySelector{} - } - if err := m.CASecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *S3EncryptionOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: S3EncryptionOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: S3EncryptionOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KmsKeyId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KmsKeyId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KmsEncryptionContext", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KmsEncryptionContext = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnableEncryption", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EnableEncryption = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerSideCustomerKeySecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ServerSideCustomerKeySecret == nil { - m.ServerSideCustomerKeySecret = &v1.SecretKeySelector{} - } - if err := m.ServerSideCustomerKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScriptTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScriptTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScriptTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Source = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SemaphoreHolding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SemaphoreHolding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SemaphoreHolding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Semaphore", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Semaphore = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Holders", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Holders = append(m.Holders, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SemaphoreRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SemaphoreRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SemaphoreRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapKeyRef == nil { - m.ConfigMapKeyRef = &v1.ConfigMapKeySelector{} - } - if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SemaphoreStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SemaphoreStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SemaphoreStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Holding", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Holding = append(m.Holding, SemaphoreHolding{}) - if err := m.Holding[len(m.Holding)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Waiting = append(m.Waiting, SemaphoreHolding{}) - if err := m.Waiting[len(m.Waiting)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Sequence) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Sequence: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Sequence: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Count == nil { - m.Count = &intstr.IntOrString{} - } - if err := m.Count.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Start == nil { - m.Start = &intstr.IntOrString{} - } - if err := m.Start.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.End == nil { - m.End = &intstr.IntOrString{} - } - if err := m.End.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Format = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Submit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Submit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Submit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowTemplateRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.WorkflowTemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Arguments == nil { - m.Arguments = &Arguments{} - } - if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubmitOpts) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubmitOpts: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubmitOpts: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GenerateName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entrypoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Entrypoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parameters = append(m.Parameters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceAccount = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DryRun = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerDryRun", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ServerDryRun = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OwnerReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.OwnerReference == nil { - m.OwnerReference = &v11.OwnerReference{} - } - if err := m.OwnerReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Annotations = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodPriorityClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodPriorityClassName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Priority = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SuppliedValueFrom) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SuppliedValueFrom: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SuppliedValueFrom: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SuspendTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SuspendTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SuspendTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Duration = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Synchronization) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Synchronization: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Synchronization: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Semaphore", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Semaphore == nil { - m.Semaphore = &SemaphoreRef{} - } - if err := m.Semaphore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mutex", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Mutex == nil { - m.Mutex = &Mutex{} - } - if err := m.Mutex.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SynchronizationStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SynchronizationStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SynchronizationStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Semaphore", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Semaphore == nil { - m.Semaphore = &SemaphoreStatus{} - } - if err := m.Semaphore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mutex", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Mutex == nil { - m.Mutex = &MutexStatus{} - } - if err := m.Mutex.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TTLStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TTLStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TTLStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SecondsAfterCompletion", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SecondsAfterCompletion = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SecondsAfterSuccess", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SecondsAfterSuccess = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SecondsAfterFailure", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SecondsAfterFailure = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TarStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TarStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TarStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompressionLevel", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CompressionLevel = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Template) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Template: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Template: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Inputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Outputs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Outputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeSelector == nil { - m.NodeSelector = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.NodeSelector[mapkey] = mapvalue - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Affinity == nil { - m.Affinity = &v1.Affinity{} - } - if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Daemon", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Daemon = &b - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Steps = append(m.Steps, ParallelSteps{}) - if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Container == nil { - m.Container = &v1.Container{} - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Script", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Script == nil { - m.Script = &ScriptTemplate{} - } - if err := m.Script.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &ResourceTemplate{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DAG", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DAG == nil { - m.DAG = &DAGTemplate{} - } - if err := m.DAG.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Suspend == nil { - m.Suspend = &SuspendTemplate{} - } - if err := m.Suspend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, v1.Volume{}) - if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.InitContainers = append(m.InitContainers, UserContainer{}) - if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sidecars", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Sidecars = append(m.Sidecars, UserContainer{}) - if err := m.Sidecars[len(m.Sidecars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArchiveLocation", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArchiveLocation == nil { - m.ArchiveLocation = &ArtifactLocation{} - } - if err := m.ArchiveLocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ActiveDeadlineSeconds == nil { - m.ActiveDeadlineSeconds = &intstr.IntOrString{} - } - if err := m.ActiveDeadlineSeconds.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RetryStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RetryStrategy == nil { - m.RetryStrategy = &RetryStrategy{} - } - if err := m.RetryStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 23: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Parallelism = &v - case 24: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tolerations = append(m.Tolerations, v1.Toleration{}) - if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 25: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchedulerName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 26: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PriorityClassName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 27: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Priority = &v - case 28: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 29: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostAliases = append(m.HostAliases, v1.HostAlias{}) - if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 30: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecurityContext == nil { - m.SecurityContext = &v1.PodSecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 31: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSpecPatch", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodSpecPatch = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 32: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AutomountServiceAccountToken = &b - case 33: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Executor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Executor == nil { - m.Executor = &ExecutorConfig{} - } - if err := m.Executor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 35: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metrics == nil { - m.Metrics = &Metrics{} - } - if err := m.Metrics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 36: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Synchronization", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Synchronization == nil { - m.Synchronization = &Synchronization{} - } - if err := m.Synchronization.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 37: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memoize", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Memoize == nil { - m.Memoize = &Memoize{} - } - if err := m.Memoize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 38: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 39: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Data == nil { - m.Data = &Data{} - } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 40: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerSet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ContainerSet == nil { - m.ContainerSet = &ContainerSetTemplate{} - } - if err := m.ContainerSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 41: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FailFast", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.FailFast = &b - case 42: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HTTP == nil { - m.HTTP = &HTTP{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 43: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Plugin", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Plugin == nil { - m.Plugin = &Plugin{} - } - if err := m.Plugin.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TemplateRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TemplateRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TemplateRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Template = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ClusterScope = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TransformationStep) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TransformationStep: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TransformationStep: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expression = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserContainer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserContainer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserContainer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MirrorVolumeMounts", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.MirrorVolumeMounts = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValueFrom) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValueFrom: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValueFrom: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JSONPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JQFilter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JQFilter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parameter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AnyString(dAtA[iNdEx:postIndex]) - m.Default = &s - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Supplied", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Supplied == nil { - m.Supplied = &SuppliedValueFrom{} - } - if err := m.Supplied.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Event = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expression = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapKeyRef == nil { - m.ConfigMapKeyRef = &v1.ConfigMapKeySelector{} - } - if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Version) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Version: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BuildDate", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BuildDate = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GitCommit", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GitCommit = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GitTag", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GitTag = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GitTreeState", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GitTreeState = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GoVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GoVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Compiler", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Compiler = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Platform = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeClaimGC) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeClaimGC: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeClaimGC: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Strategy = VolumeClaimGCStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Workflow) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Workflow: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowArtifactGCTask) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowArtifactGCTask: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowArtifactGCTask: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowArtifactGCTaskList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowArtifactGCTaskList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowArtifactGCTaskList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, WorkflowArtifactGCTask{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowEventBinding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowEventBinding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowEventBinding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowEventBindingList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowEventBindingList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowEventBindingList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, WorkflowEventBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowEventBindingSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowEventBindingSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowEventBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Submit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Submit == nil { - m.Submit = &Submit{} - } - if err := m.Submit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowLevelArtifactGC) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowLevelArtifactGC: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowLevelArtifactGC: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactGC", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ArtifactGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ForceFinalizerRemoval", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ForceFinalizerRemoval = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSpecPatch", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodSpecPatch = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Workflow{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelsFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LabelsFrom == nil { - m.LabelsFrom = make(map[string]LabelValueFrom) - } - var mapkey string - mapvalue := &LabelValueFrom{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &LabelValueFrom{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.LabelsFrom[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Templates = append(m.Templates, Template{}) - if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entrypoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Entrypoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, v1.Volume{}) - if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v1.PersistentVolumeClaim{}) - if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Parallelism = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactRepositoryRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArtifactRepositoryRef == nil { - m.ArtifactRepositoryRef = &ArtifactRepositoryRef{} - } - if err := m.ArtifactRepositoryRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Suspend = &b - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeSelector == nil { - m.NodeSelector = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.NodeSelector[mapkey] = mapvalue - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Affinity == nil { - m.Affinity = &v1.Affinity{} - } - if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tolerations = append(m.Tolerations, v1.Toleration{}) - if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImagePullSecrets = append(m.ImagePullSecrets, v1.LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.HostNetwork = &b - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.DNSPolicy(dAtA[iNdEx:postIndex]) - m.DNSPolicy = &s - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DNSConfig == nil { - m.DNSConfig = &v1.PodDNSConfig{} - } - if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OnExit", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OnExit = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 19: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ActiveDeadlineSeconds = &v - case 20: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Priority = &v - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchedulerName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodGC", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodGC == nil { - m.PodGC = &PodGC{} - } - if err := m.PodGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodPriorityClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodPriorityClassName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 24: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PodPriority", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PodPriority = &v - case 25: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostAliases = append(m.HostAliases, v1.HostAlias{}) - if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 26: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecurityContext == nil { - m.SecurityContext = &v1.PodSecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 27: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSpecPatch", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodSpecPatch = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 28: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AutomountServiceAccountToken = &b - case 29: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Executor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Executor == nil { - m.Executor = &ExecutorConfig{} - } - if err := m.Executor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 30: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TTLStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TTLStrategy == nil { - m.TTLStrategy = &TTLStrategy{} - } - if err := m.TTLStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 31: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionBudget", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodDisruptionBudget == nil { - m.PodDisruptionBudget = &v12.PodDisruptionBudgetSpec{} - } - if err := m.PodDisruptionBudget.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 32: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metrics == nil { - m.Metrics = &Metrics{} - } - if err := m.Metrics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 33: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shutdown", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shutdown = ShutdownStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 34: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowTemplateRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.WorkflowTemplateRef == nil { - m.WorkflowTemplateRef = &WorkflowTemplateRef{} - } - if err := m.WorkflowTemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 35: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Synchronization", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Synchronization == nil { - m.Synchronization = &Synchronization{} - } - if err := m.Synchronization.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 36: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimGC", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VolumeClaimGC == nil { - m.VolumeClaimGC = &VolumeClaimGC{} - } - if err := m.VolumeClaimGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 37: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RetryStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RetryStrategy == nil { - m.RetryStrategy = &RetryStrategy{} - } - if err := m.RetryStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 38: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodMetadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodMetadata == nil { - m.PodMetadata = &Metadata{} - } - if err := m.PodMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 39: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateDefaults", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TemplateDefaults == nil { - m.TemplateDefaults = &Template{} - } - if err := m.TemplateDefaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 40: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ArchiveLogs", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ArchiveLogs = &b - case 41: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hooks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Hooks == nil { - m.Hooks = make(LifecycleHooks) - } - var mapkey LifecycleEvent - mapvalue := &LifecycleHook{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = LifecycleEvent(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &LifecycleHook{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Hooks[LifecycleEvent(mapkey)] = *mapvalue - iNdEx = postIndex - case 42: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowMetadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.WorkflowMetadata == nil { - m.WorkflowMetadata = &WorkflowMetadata{} - } - if err := m.WorkflowMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 43: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactGC", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArtifactGC == nil { - m.ArtifactGC = &WorkflowLevelArtifactGC{} - } - if err := m.ArtifactGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = WorkflowPhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompressedNodes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CompressedNodes = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Nodes == nil { - m.Nodes = make(Nodes) - } - var mapkey string - mapvalue := &NodeStatus{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &NodeStatus{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Nodes[mapkey] = *mapvalue - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaims", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PersistentVolumeClaims = append(m.PersistentVolumeClaims, v1.Volume{}) - if err := m.PersistentVolumeClaims[len(m.PersistentVolumeClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Outputs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Outputs == nil { - m.Outputs = &Outputs{} - } - if err := m.Outputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoredTemplates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StoredTemplates == nil { - m.StoredTemplates = make(map[string]Template) - } - var mapkey string - mapvalue := &Template{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Template{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.StoredTemplates[mapkey] = *mapvalue - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OffloadNodeStatusVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OffloadNodeStatusVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourcesDuration", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourcesDuration == nil { - m.ResourcesDuration = make(ResourcesDuration) - } - var mapkey k8s_io_api_core_v1.ResourceName - var mapvalue int64 - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvalue |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ResourcesDuration[k8s_io_api_core_v1.ResourceName(mapkey)] = ((ResourceDuration)(mapvalue)) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoredWorkflowSpec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StoredWorkflowSpec == nil { - m.StoredWorkflowSpec = &WorkflowSpec{} - } - if err := m.StoredWorkflowSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Synchronization", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Synchronization == nil { - m.Synchronization = &SynchronizationStatus{} - } - if err := m.Synchronization.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EstimatedDuration", wireType) - } - m.EstimatedDuration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EstimatedDuration |= EstimatedDuration(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Progress = Progress(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactRepositoryRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArtifactRepositoryRef == nil { - m.ArtifactRepositoryRef = &ArtifactRepositoryRefStatus{} - } - if err := m.ArtifactRepositoryRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ArtifactGCStatus", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ArtifactGCStatus == nil { - m.ArtifactGCStatus = &ArtGCStatus{} - } - if err := m.ArtifactGCStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TaskResultsCompletionStatus", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TaskResultsCompletionStatus == nil { - m.TaskResultsCompletionStatus = make(map[string]bool) - } - var mapkey string - var mapvalue bool - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvaluetemp |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - mapvalue = bool(mapvaluetemp != 0) - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.TaskResultsCompletionStatus[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowStep) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowStep: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowStep: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Template = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TemplateRef == nil { - m.TemplateRef = &TemplateRef{} - } - if err := m.TemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WithItems", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WithItems = append(m.WithItems, Item{}) - if err := m.WithItems[len(m.WithItems)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WithParam", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WithParam = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WithSequence", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.WithSequence == nil { - m.WithSequence = &Sequence{} - } - if err := m.WithSequence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field When", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.When = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContinueOn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ContinueOn == nil { - m.ContinueOn = &ContinueOn{} - } - if err := m.ContinueOn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OnExit", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OnExit = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hooks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Hooks == nil { - m.Hooks = make(LifecycleHooks) - } - var mapkey LifecycleEvent - mapvalue := &LifecycleHook{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = LifecycleEvent(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &LifecycleHook{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Hooks[LifecycleEvent(mapkey)] = *mapvalue - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inline", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Inline == nil { - m.Inline = &Template{} - } - if err := m.Inline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowTaskResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowTaskResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowTaskResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeResult", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.NodeResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowTaskResultList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowTaskResultList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowTaskResultList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, WorkflowTaskResult{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowTaskSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowTaskSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowTaskSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowTaskSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowTaskSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowTaskSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, WorkflowTaskSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowTaskSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowTaskSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowTaskSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tasks == nil { - m.Tasks = make(map[string]Template) - } - var mapkey string - mapvalue := &Template{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Template{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Tasks[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowTaskSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowTaskSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowTaskSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Nodes == nil { - m.Nodes = make(map[string]NodeResult) - } - var mapkey string - mapvalue := &NodeResult{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &NodeResult{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Nodes[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowTemplateList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowTemplateList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, WorkflowTemplate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowTemplateRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowTemplateRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowTemplateRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ClusterScope = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ZipStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ZipStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ZipStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto deleted file mode 100644 index 0609fdf6d..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto +++ /dev/null @@ -1,2251 +0,0 @@ - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/api/policy/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// Amount represent a numeric amount. -// +kubebuilder:validation:Type=number -message Amount { - optional string value = 1; -} - -// ArchiveStrategy describes how to archive files/directory when saving artifacts -message ArchiveStrategy { - optional TarStrategy tar = 1; - - optional NoneStrategy none = 2; - - optional ZipStrategy zip = 3; -} - -// Arguments to a template -message Arguments { - // Parameters is the list of parameters to pass to the template or workflow - // +patchStrategy=merge - // +patchMergeKey=name - repeated Parameter parameters = 1; - - // Artifacts is the list of artifacts to pass to the template or workflow - // +patchStrategy=merge - // +patchMergeKey=name - repeated Artifact artifacts = 2; -} - -// ArtGCStatus maintains state related to ArtifactGC -message ArtGCStatus { - // have Pods been started to perform this strategy? (enables us not to re-process what we've already done) - map<string, bool> strategiesProcessed = 1; - - // have completed Pods been processed? (mapped by Pod name) - // used to prevent re-processing the Status of a Pod more than once - map<string, bool> podsRecouped = 2; - - // if this is true, we already checked to see if we need to do it and we don't - optional bool notSpecified = 3; -} - -// Artifact indicates an artifact to place at a specified path -message Artifact { - // name of the artifact. must be unique within a template's inputs/outputs. - optional string name = 1; - - // Path is the container path to the artifact - optional string path = 2; - - // mode bits to use on this file, must be a value between 0 and 0777 - // set when loading input artifacts. - optional int32 mode = 3; - - // From allows an artifact to reference an artifact from a previous step - optional string from = 4; - - // ArtifactLocation contains the location of the artifact - optional ArtifactLocation artifactLocation = 5; - - // GlobalName exports an output artifact to the global scope, making it available as - // '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts - optional string globalName = 6; - - // Archive controls how the artifact will be saved to the artifact repository. - optional ArchiveStrategy archive = 7; - - // Make Artifacts optional, if Artifacts doesn't generate or exist - optional bool optional = 8; - - // SubPath allows an artifact to be sourced from a subpath within the specified source - optional string subPath = 9; - - // If mode is set, apply the permission recursively into the artifact if it is a folder - optional bool recurseMode = 10; - - // FromExpression, if defined, is evaluated to specify the value for the artifact - optional string fromExpression = 11; - - // ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows - optional ArtifactGC artifactGC = 12; - - // Has this been deleted? - optional bool deleted = 13; -} - -// ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed -message ArtifactGC { - // Strategy is the strategy to use. - // +kubebuilder:validation:Enum="";OnWorkflowCompletion;OnWorkflowDeletion;Never - optional string strategy = 1; - - // PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion - optional Metadata podMetadata = 2; - - // ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion - optional string serviceAccountName = 3; -} - -// ArtifactGCSpec specifies the Artifacts that need to be deleted -message ArtifactGCSpec { - // ArtifactsByNode maps Node name to information pertaining to Artifacts on that Node - map<string, ArtifactNodeSpec> artifactsByNode = 1; -} - -// ArtifactGCStatus describes the result of the deletion -message ArtifactGCStatus { - // ArtifactResultsByNode maps Node name to result - map<string, ArtifactResultNodeStatus> artifactResultsByNode = 1; -} - -// ArtifactLocation describes a location for a single or multiple artifacts. -// It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). -// It is also used to describe the location of multiple artifacts such as the archive location -// of a single workflow step, which the executor will use as a default location to store its files. -message ArtifactLocation { - // ArchiveLogs indicates if the container logs should be archived - optional bool archiveLogs = 1; - - // S3 contains S3 artifact location details - optional S3Artifact s3 = 2; - - // Git contains git artifact location details - optional GitArtifact git = 3; - - // HTTP contains HTTP artifact location details - optional HTTPArtifact http = 4; - - // Artifactory contains artifactory artifact location details - optional ArtifactoryArtifact artifactory = 5; - - // HDFS contains HDFS artifact location details - optional HDFSArtifact hdfs = 6; - - // Raw contains raw artifact location details - optional RawArtifact raw = 7; - - // OSS contains OSS artifact location details - optional OSSArtifact oss = 8; - - // GCS contains GCS artifact location details - optional GCSArtifact gcs = 9; - - // Azure contains Azure Storage artifact location details - optional AzureArtifact azure = 10; -} - -// ArtifactNodeSpec specifies the Artifacts that need to be deleted for a given Node -message ArtifactNodeSpec { - // ArchiveLocation is the template-level Artifact location specification - optional ArtifactLocation archiveLocation = 1; - - // Artifacts maps artifact name to Artifact description - map<string, Artifact> artifacts = 2; -} - -// ArtifactPaths expands a step from a collection of artifacts -message ArtifactPaths { - // Artifact is the artifact location from which to source the artifacts, it can be a directory - optional Artifact artifact = 1; -} - -// ArtifactRepository represents an artifact repository in which a controller will store its artifacts -message ArtifactRepository { - // ArchiveLogs enables log archiving - optional bool archiveLogs = 1; - - // S3 stores artifact in a S3-compliant object store - optional S3ArtifactRepository s3 = 2; - - // Artifactory stores artifacts to JFrog Artifactory - optional ArtifactoryArtifactRepository artifactory = 3; - - // HDFS stores artifacts in HDFS - optional HDFSArtifactRepository hdfs = 4; - - // OSS stores artifact in a OSS-compliant object store - optional OSSArtifactRepository oss = 5; - - // GCS stores artifact in a GCS object store - optional GCSArtifactRepository gcs = 6; - - // Azure stores artifact in an Azure Storage account - optional AzureArtifactRepository azure = 7; -} - -// +protobuf.options.(gogoproto.goproto_stringer)=false -message ArtifactRepositoryRef { - // The name of the config map. Defaults to "artifact-repositories". - optional string configMap = 1; - - // The config map key. Defaults to the value of the "workflows.argoproj.io/default-artifact-repository" annotation. - optional string key = 2; -} - -// +protobuf.options.(gogoproto.goproto_stringer)=false -message ArtifactRepositoryRefStatus { - optional ArtifactRepositoryRef artifactRepositoryRef = 1; - - // The namespace of the config map. Defaults to the workflow's namespace, or the controller's namespace (if found). - optional string namespace = 2; - - // If this ref represents the default artifact repository, rather than a config map. - optional bool default = 3; - - // The repository the workflow will use. This maybe empty before v3.1. - optional ArtifactRepository artifactRepository = 4; -} - -// ArtifactResult describes the result of attempting to delete a given Artifact -message ArtifactResult { - // Name is the name of the Artifact - optional string name = 1; - - // Success describes whether the deletion succeeded - optional bool success = 2; - - // Error is an optional error message which should be set if Success==false - optional string error = 3; -} - -// ArtifactResultNodeStatus describes the result of the deletion on a given node -message ArtifactResultNodeStatus { - // ArtifactResults maps Artifact name to result of the deletion - map<string, ArtifactResult> artifactResults = 1; -} - -message ArtifactSearchQuery { - map<string, bool> artifactGCStrategies = 1; - - optional string artifactName = 2; - - optional string templateName = 3; - - optional string nodeId = 4; - - optional bool deleted = 5; - - map<string, bool> nodeTypes = 6; -} - -message ArtifactSearchResult { - optional Artifact artifact = 1; - - optional string nodeID = 2; -} - -// ArtifactoryArtifact is the location of an artifactory artifact -message ArtifactoryArtifact { - // URL of the artifact - optional string url = 1; - - optional ArtifactoryAuth artifactoryAuth = 2; -} - -// ArtifactoryArtifactRepository defines the controller configuration for an artifactory artifact repository -message ArtifactoryArtifactRepository { - optional ArtifactoryAuth artifactoryAuth = 1; - - // RepoURL is the url for artifactory repo. - optional string repoURL = 2; - - // KeyFormat defines the format of how to store keys and can reference workflow variables. - optional string keyFormat = 3; -} - -// ArtifactoryAuth describes the secret selectors required for authenticating to artifactory -message ArtifactoryAuth { - // UsernameSecret is the secret selector to the repository username - optional k8s.io.api.core.v1.SecretKeySelector usernameSecret = 1; - - // PasswordSecret is the secret selector to the repository password - optional k8s.io.api.core.v1.SecretKeySelector passwordSecret = 2; -} - -// AzureArtifact is the location of a an Azure Storage artifact -message AzureArtifact { - optional AzureBlobContainer azureBlobContainer = 1; - - // Blob is the blob name (i.e., path) in the container where the artifact resides - optional string blob = 2; -} - -// AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository -message AzureArtifactRepository { - optional AzureBlobContainer blobContainer = 1; - - // BlobNameFormat is defines the format of how to store blob names. Can reference workflow variables - optional string blobNameFormat = 2; -} - -// AzureBlobContainer contains the access information for interfacing with an Azure Blob Storage container -message AzureBlobContainer { - // Endpoint is the service url associated with an account. It is most likely "https://<ACCOUNT_NAME>.blob.core.windows.net" - optional string endpoint = 1; - - // Container is the container where resources will be stored - optional string container = 2; - - // AccountKeySecret is the secret selector to the Azure Blob Storage account access key - optional k8s.io.api.core.v1.SecretKeySelector accountKeySecret = 3; - - // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. - optional bool useSDKCreds = 4; -} - -// Backoff is a backoff strategy to use within retryStrategy -message Backoff { - // Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h") - optional string duration = 1; - - // Factor is a factor to multiply the base duration after each failed retry - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString factor = 2; - - // MaxDuration is the maximum amount of time allowed for a workflow in the backoff strategy. - // It is important to note that if the workflow template includes activeDeadlineSeconds, the pod's deadline is initially set with activeDeadlineSeconds. - // However, when the workflow fails, the pod's deadline is then overridden by maxDuration. - // This ensures that the workflow does not exceed the specified maximum duration when retries are involved. - optional string maxDuration = 3; -} - -// BasicAuth describes the secret selectors required for basic authentication -message BasicAuth { - // UsernameSecret is the secret selector to the repository username - optional k8s.io.api.core.v1.SecretKeySelector usernameSecret = 1; - - // PasswordSecret is the secret selector to the repository password - optional k8s.io.api.core.v1.SecretKeySelector passwordSecret = 2; -} - -// Cache is the configuration for the type of cache to be used -message Cache { - // ConfigMap sets a ConfigMap-based cache - optional k8s.io.api.core.v1.ConfigMapKeySelector configMap = 1; -} - -// ClientCertAuth holds necessary information for client authentication via certificates -message ClientCertAuth { - optional k8s.io.api.core.v1.SecretKeySelector clientCertSecret = 1; - - optional k8s.io.api.core.v1.SecretKeySelector clientKeySecret = 2; -} - -// ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope -// +genclient -// +genclient:noStatus -// +genclient:nonNamespaced -// +kubebuilder:resource:scope=Cluster,shortName=clusterwftmpl;cwft -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message ClusterWorkflowTemplate { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional WorkflowSpec spec = 2; -} - -// ClusterWorkflowTemplateList is list of ClusterWorkflowTemplate resources -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message ClusterWorkflowTemplateList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated ClusterWorkflowTemplate items = 2; -} - -// Column is a custom column that will be exposed in the Workflow List View. -// +patchStrategy=merge -// +patchMergeKey=name -message Column { - // The name of this column, e.g., "Workflow Completed". - optional string name = 1; - - // The type of this column, "label" or "annotation". - optional string type = 2; - - // The key of the label or annotation, e.g., "workflows.argoproj.io/completed". - optional string key = 3; -} - -message Condition { - // Type is the type of condition - optional string type = 1; - - // Status is the status of the condition - optional string status = 2; - - // Message is the condition message - optional string message = 3; -} - -message ContainerNode { - optional k8s.io.api.core.v1.Container container = 1; - - repeated string dependencies = 2; -} - -// ContainerSetRetryStrategy provides controls on how to retry a container set -message ContainerSetRetryStrategy { - // Duration is the time between each retry, examples values are "300ms", "1s" or "5m". - // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". - optional string duration = 1; - - // Retries is the maximum number of retry attempts for each container. It does not include the - // first, original attempt; the maximum number of total attempts will be `retries + 1`. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString retries = 2; -} - -message ContainerSetTemplate { - repeated ContainerNode containers = 4; - - repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 3; - - // RetryStrategy describes how to retry container nodes if the container set fails. - // Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers. - optional ContainerSetRetryStrategy retryStrategy = 5; -} - -// ContinueOn defines if a workflow should continue even if a task or step fails/errors. -// It can be specified if the workflow should continue when the pod errors, fails or both. -message ContinueOn { - // +optional - optional bool error = 1; - - // +optional - optional bool failed = 2; -} - -// Counter is a Counter prometheus metric -message Counter { - // Value is the value of the metric - optional string value = 1; -} - -// CreateS3BucketOptions options used to determine automatic automatic bucket-creation process -message CreateS3BucketOptions { - // ObjectLocking Enable object locking - optional bool objectLocking = 3; -} - -// CronWorkflow is the definition of a scheduled workflow resource -// +genclient -// +genclient:noStatus -// +kubebuilder:resource:shortName=cwf;cronwf -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message CronWorkflow { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional CronWorkflowSpec spec = 2; - - optional CronWorkflowStatus status = 3; -} - -// CronWorkflowList is list of CronWorkflow resources -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message CronWorkflowList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated CronWorkflow items = 2; -} - -// CronWorkflowSpec is the specification of a CronWorkflow -message CronWorkflowSpec { - // WorkflowSpec is the spec of the workflow to be run - optional WorkflowSpec workflowSpec = 1; - - // Schedule is a schedule to run the Workflow in Cron format - optional string schedule = 2; - - // ConcurrencyPolicy is the K8s-style concurrency policy that will be used - optional string concurrencyPolicy = 3; - - // Suspend is a flag that will stop new CronWorkflows from running if set to true - optional bool suspend = 4; - - // StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its - // original scheduled time if it is missed. - optional int64 startingDeadlineSeconds = 5; - - // SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time - optional int32 successfulJobsHistoryLimit = 6; - - // FailedJobsHistoryLimit is the number of failed jobs to be kept at a time - optional int32 failedJobsHistoryLimit = 7; - - // Timezone is the timezone against which the cron schedule will be calculated, e.g. "Asia/Tokyo". Default is machine's local time. - optional string timezone = 8; - - // WorkflowMetadata contains some metadata of the workflow to be run - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta workflowMeta = 9; -} - -// CronWorkflowStatus is the status of a CronWorkflow -message CronWorkflowStatus { - // Active is a list of active workflows stemming from this CronWorkflow - repeated k8s.io.api.core.v1.ObjectReference active = 1; - - // LastScheduleTime is the last time the CronWorkflow was scheduled - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduledTime = 2; - - // Conditions is a list of conditions the CronWorkflow may have - repeated Condition conditions = 3; -} - -// DAGTask represents a node in the graph during DAG execution -message DAGTask { - // Name is the name of the target - optional string name = 1; - - // Name of template to execute - optional string template = 2; - - // Inline is the template. Template must be empty if this is declared (and vice-versa). - optional Template inline = 14; - - // Arguments are the parameter and artifact arguments to the template - optional Arguments arguments = 3; - - // TemplateRef is the reference to the template resource to execute. - optional TemplateRef templateRef = 4; - - // Dependencies are name of other targets which this depends on - repeated string dependencies = 5; - - // WithItems expands a task into multiple parallel tasks from the items in the list - repeated Item withItems = 6; - - // WithParam expands a task into multiple parallel tasks from the value in the parameter, - // which is expected to be a JSON list. - optional string withParam = 7; - - // WithSequence expands a task into a numeric sequence - optional Sequence withSequence = 8; - - // When is an expression in which the task should conditionally execute - optional string when = 9; - - // ContinueOn makes argo to proceed with the following step even if this step fails. - // Errors and Failed states can be specified - optional ContinueOn continueOn = 10; - - // OnExit is a template reference which is invoked at the end of the - // template, irrespective of the success, failure, or error of the - // primary template. - // DEPRECATED: Use Hooks[exit].Template instead. - optional string onExit = 11; - - // Depends are name of other targets which this depends on - optional string depends = 12; - - // Hooks hold the lifecycle hook which is invoked at lifecycle of - // task, irrespective of the success, failure, or error status of the primary task - map<string, LifecycleHook> hooks = 13; -} - -// DAGTemplate is a template subtype for directed acyclic graph templates -message DAGTemplate { - // Target are one or more names of targets to execute in a DAG - optional string target = 1; - - // Tasks are a list of DAG tasks - // +patchStrategy=merge - // +patchMergeKey=name - repeated DAGTask tasks = 2; - - // This flag is for DAG logic. The DAG logic has a built-in "fail fast" feature to stop scheduling new steps, - // as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed - // before failing the DAG itself. - // The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to - // completion (either success or failure), regardless of the failed outcomes of branches in the DAG. - // More info and example about this feature at https://github.com/argoproj/argo-workflows/issues/1442 - optional bool failFast = 3; -} - -// Data is a data template -message Data { - // Source sources external data into a data template - optional DataSource source = 1; - - // Transformation applies a set of transformations - repeated TransformationStep transformation = 2; -} - -// DataSource sources external data into a data template -message DataSource { - // ArtifactPaths is a data transformation that collects a list of artifact paths - optional ArtifactPaths artifactPaths = 1; -} - -message Event { - // Selector (https://github.com/expr-lang/expr) that we must must match the event. E.g. `payload.message == "test"` - optional string selector = 1; -} - -// ExecutorConfig holds configurations of an executor container. -message ExecutorConfig { - // ServiceAccountName specifies the service account name of the executor container. - optional string serviceAccountName = 1; -} - -// GCSArtifact is the location of a GCS artifact -message GCSArtifact { - optional GCSBucket gCSBucket = 1; - - // Key is the path in the bucket where the artifact resides - optional string key = 2; -} - -// GCSArtifactRepository defines the controller configuration for a GCS artifact repository -message GCSArtifactRepository { - optional GCSBucket gCSBucket = 1; - - // KeyFormat defines the format of how to store keys and can reference workflow variables. - optional string keyFormat = 2; -} - -// GCSBucket contains the access information for interfacring with a GCS bucket -message GCSBucket { - // Bucket is the name of the bucket - optional string bucket = 1; - - // ServiceAccountKeySecret is the secret selector to the bucket's service account key - optional k8s.io.api.core.v1.SecretKeySelector serviceAccountKeySecret = 2; -} - -// Gauge is a Gauge prometheus metric -message Gauge { - // Value is the value to be used in the operation with the metric's current value. If no operation is set, - // value is the value of the metric - optional string value = 1; - - // Realtime emits this metric in real time if applicable - optional bool realtime = 2; - - // Operation defines the operation to apply with value and the metrics' current value - // +optional - optional string operation = 3; -} - -// GitArtifact is the location of an git artifact -message GitArtifact { - // Repo is the git repository - optional string repo = 1; - - // Revision is the git commit, tag, branch to checkout - optional string revision = 2; - - // Depth specifies clones/fetches should be shallow and include the given - // number of commits from the branch tip - optional uint64 depth = 3; - - // Fetch specifies a number of refs that should be fetched before checkout - repeated string fetch = 4; - - // UsernameSecret is the secret selector to the repository username - optional k8s.io.api.core.v1.SecretKeySelector usernameSecret = 5; - - // PasswordSecret is the secret selector to the repository password - optional k8s.io.api.core.v1.SecretKeySelector passwordSecret = 6; - - // SSHPrivateKeySecret is the secret selector to the repository ssh private key - optional k8s.io.api.core.v1.SecretKeySelector sshPrivateKeySecret = 7; - - // InsecureIgnoreHostKey disables SSH strict host key checking during git clone - optional bool insecureIgnoreHostKey = 8; - - // DisableSubmodules disables submodules during git clone - optional bool disableSubmodules = 9; - - // SingleBranch enables single branch clone, using the `branch` parameter - optional bool singleBranch = 10; - - // Branch is the branch to fetch when `SingleBranch` is enabled - optional string branch = 11; -} - -// HDFSArtifact is the location of an HDFS artifact -message HDFSArtifact { - optional HDFSConfig hDFSConfig = 1; - - // Path is a file path in HDFS - optional string path = 2; - - // Force copies a file forcibly even if it exists - optional bool force = 3; -} - -// HDFSArtifactRepository defines the controller configuration for an HDFS artifact repository -message HDFSArtifactRepository { - optional HDFSConfig hDFSConfig = 1; - - // PathFormat is defines the format of path to store a file. Can reference workflow variables - optional string pathFormat = 2; - - // Force copies a file forcibly even if it exists - optional bool force = 3; -} - -// HDFSConfig is configurations for HDFS -message HDFSConfig { - optional HDFSKrbConfig hDFSKrbConfig = 1; - - // Addresses is accessible addresses of HDFS name nodes - repeated string addresses = 2; - - // HDFSUser is the user to access HDFS file system. - // It is ignored if either ccache or keytab is used. - optional string hdfsUser = 3; -} - -// HDFSKrbConfig is auth configurations for Kerberos -message HDFSKrbConfig { - // KrbCCacheSecret is the secret selector for Kerberos ccache - // Either ccache or keytab can be set to use Kerberos. - optional k8s.io.api.core.v1.SecretKeySelector krbCCacheSecret = 1; - - // KrbKeytabSecret is the secret selector for Kerberos keytab - // Either ccache or keytab can be set to use Kerberos. - optional k8s.io.api.core.v1.SecretKeySelector krbKeytabSecret = 2; - - // KrbUsername is the Kerberos username used with Kerberos keytab - // It must be set if keytab is used. - optional string krbUsername = 3; - - // KrbRealm is the Kerberos realm used with Kerberos keytab - // It must be set if keytab is used. - optional string krbRealm = 4; - - // KrbConfig is the configmap selector for Kerberos config as string - // It must be set if either ccache or keytab is used. - optional k8s.io.api.core.v1.ConfigMapKeySelector krbConfigConfigMap = 5; - - // KrbServicePrincipalName is the principal name of Kerberos service - // It must be set if either ccache or keytab is used. - optional string krbServicePrincipalName = 6; -} - -message HTTP { - // Method is HTTP methods for HTTP Request - optional string method = 1; - - // URL of the HTTP Request - optional string url = 2; - - // Headers are an optional list of headers to send with HTTP requests - repeated HTTPHeader headers = 3; - - // TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds - optional int64 timeoutSeconds = 4; - - // SuccessCondition is an expression if evaluated to true is considered successful - optional string successCondition = 6; - - // Body is content of the HTTP Request - optional string body = 5; - - // BodyFrom is content of the HTTP Request as Bytes - optional HTTPBodySource bodyFrom = 8; - - // InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client - optional bool insecureSkipVerify = 7; -} - -// HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container -message HTTPArtifact { - // URL of the artifact - optional string url = 1; - - // Headers are an optional list of headers to send with HTTP requests for artifacts - repeated Header headers = 2; - - // Auth contains information for client authentication - optional HTTPAuth auth = 3; -} - -message HTTPAuth { - optional ClientCertAuth clientCert = 1; - - optional OAuth2Auth oauth2 = 2; - - optional BasicAuth basicAuth = 3; -} - -// HTTPBodySource contains the source of the HTTP body. -message HTTPBodySource { - optional bytes bytes = 1; -} - -message HTTPHeader { - optional string name = 1; - - optional string value = 2; - - optional HTTPHeaderSource valueFrom = 3; -} - -message HTTPHeaderSource { - optional k8s.io.api.core.v1.SecretKeySelector secretKeyRef = 1; -} - -// Header indicate a key-value request header to be used when fetching artifacts over HTTP -message Header { - // Name is the header name - optional string name = 1; - - // Value is the literal value to use for the header - optional string value = 2; -} - -// Histogram is a Histogram prometheus metric -message Histogram { - // Value is the value of the metric - optional string value = 3; - - // Buckets is a list of bucket divisors for the histogram - repeated Amount buckets = 4; -} - -// Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another -message Inputs { - // Parameters are a list of parameters passed as inputs - // +patchStrategy=merge - // +patchMergeKey=name - repeated Parameter parameters = 1; - - // Artifact are a list of artifacts passed as inputs - // +patchStrategy=merge - // +patchMergeKey=name - repeated Artifact artifacts = 2; -} - -// Item expands a single workflow step into multiple parallel steps -// The value of Item can be a map, string, bool, or number -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -// +kubebuilder:validation:Type=object -message Item { - optional bytes value = 1; -} - -// LabelKeys is list of keys -message LabelKeys { - repeated string items = 1; -} - -message LabelValueFrom { - optional string expression = 1; -} - -// Labels is list of workflow labels -message LabelValues { - repeated string items = 1; -} - -message LifecycleHook { - // Template is the name of the template to execute by the hook - optional string template = 1; - - // Arguments hold arguments to the template - optional Arguments arguments = 2; - - // TemplateRef is the reference to the template resource to execute by the hook - optional TemplateRef templateRef = 3; - - // Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not - // be retried and the retry strategy will be ignored - optional string expression = 4; -} - -// A link to another app. -// +patchStrategy=merge -// +patchMergeKey=name -message Link { - // The name of the link, E.g. "Workflow Logs" or "Pod Logs" - optional string name = 1; - - // "workflow", "pod", "pod-logs", "event-source-logs", "sensor-logs", "workflow-list" or "chat" - optional string scope = 2; - - // The URL. Can contain "${metadata.namespace}", "${metadata.name}", "${status.startedAt}", "${status.finishedAt}" or any other element in workflow yaml, e.g. "${workflow.metadata.annotations.userDefinedKey}" - optional string url = 3; -} - -message ManifestFrom { - // Artifact contains the artifact to use - optional Artifact artifact = 1; -} - -// MemoizationStatus is the status of this memoized node -message MemoizationStatus { - // Hit indicates whether this node was created from a cache entry - optional bool hit = 1; - - // Key is the name of the key used for this node's cache - optional string key = 2; - - // Cache is the name of the cache that was used - optional string cacheName = 3; -} - -// Memoization enables caching for the Outputs of the template -message Memoize { - // Key is the key to use as the caching key - optional string key = 1; - - // Cache sets and configures the kind of cache - optional Cache cache = 2; - - // MaxAge is the maximum age (e.g. "180s", "24h") of an entry that is still considered valid. If an entry is older - // than the MaxAge, it will be ignored. - optional string maxAge = 3; -} - -// Pod metdata -message Metadata { - map<string, string> annotations = 1; - - map<string, string> labels = 2; -} - -// MetricLabel is a single label for a prometheus metric -message MetricLabel { - optional string key = 1; - - optional string value = 2; -} - -// Metrics are a list of metrics emitted from a Workflow/Template -message Metrics { - // Prometheus is a list of prometheus metrics to be emitted - repeated Prometheus prometheus = 1; -} - -// Mutex holds Mutex configuration -message Mutex { - // name of the mutex - optional string name = 1; - - // Namespace is the namespace of the mutex, default: [namespace of workflow] - optional string namespace = 2; -} - -// MutexHolding describes the mutex and the object which is holding it. -message MutexHolding { - // Reference for the mutex - // e.g: ${namespace}/mutex/${mutexName} - optional string mutex = 1; - - // Holder is a reference to the object which holds the Mutex. - // Holding Scenario: - // 1. Current workflow's NodeID which is holding the lock. - // e.g: ${NodeID} - // Waiting Scenario: - // 1. Current workflow or other workflow NodeID which is holding the lock. - // e.g: ${WorkflowName}/${NodeID} - optional string holder = 2; -} - -// MutexStatus contains which objects hold mutex locks, and which objects this workflow is waiting on to release locks. -message MutexStatus { - // Holding is a list of mutexes and their respective objects that are held by mutex lock for this workflow. - // +listType=atomic - repeated MutexHolding holding = 1; - - // Waiting is a list of mutexes and their respective objects this workflow is waiting for. - // +listType=atomic - repeated MutexHolding waiting = 2; -} - -message NodeFlag { - // Hooked tracks whether or not this node was triggered by hook or onExit - optional bool hooked = 1; - - // Retried tracks whether or not this node was retried by retryStrategy - optional bool retried = 2; -} - -message NodeResult { - optional string phase = 1; - - optional string message = 2; - - optional Outputs outputs = 3; - - optional string progress = 4; -} - -// NodeStatus contains status information about an individual node in the workflow -message NodeStatus { - // ID is a unique identifier of a node within the worklow - // It is implemented as a hash of the node name, which makes the ID deterministic - optional string id = 1; - - // Name is unique name in the node tree used to generate the node ID - optional string name = 2; - - // DisplayName is a human readable representation of the node. Unique within a template boundary - optional string displayName = 3; - - // Type indicates type of node - optional string type = 4; - - // TemplateName is the template name which this node corresponds to. - // Not applicable to virtual nodes (e.g. Retry, StepGroup) - optional string templateName = 5; - - // TemplateRef is the reference to the template resource which this node corresponds to. - // Not applicable to virtual nodes (e.g. Retry, StepGroup) - optional TemplateRef templateRef = 6; - - // TemplateScope is the template scope in which the template of this node was retrieved. - optional string templateScope = 20; - - // Phase a simple, high-level summary of where the node is in its lifecycle. - // Can be used as a state machine. - // Will be one of these values "Pending", "Running" before the node is completed, or "Succeeded", - // "Skipped", "Failed", "Error", or "Omitted" as a final state. - optional string phase = 7; - - // BoundaryID indicates the node ID of the associated template root node in which this node belongs to - optional string boundaryID = 8; - - // A human readable message indicating details about why the node is in this condition. - optional string message = 9; - - // Time at which this node started - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 10; - - // Time at which this node completed - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 11; - - // EstimatedDuration in seconds. - optional int64 estimatedDuration = 24; - - // Progress to completion - optional string progress = 26; - - // ResourcesDuration is indicative, but not accurate, resource duration. This is populated when the nodes completes. - map<string, int64> resourcesDuration = 21; - - // PodIP captures the IP of the pod for daemoned steps - optional string podIP = 12; - - // Daemoned tracks whether or not this node was daemoned and need to be terminated - optional bool daemoned = 13; - - // NodeFlag tracks some history of node. e.g.) hooked, retried, etc. - optional NodeFlag nodeFlag = 27; - - // Inputs captures input parameter values and artifact locations supplied to this template invocation - optional Inputs inputs = 14; - - // Outputs captures output parameter values and artifact locations produced by this template invocation - optional Outputs outputs = 15; - - // Children is a list of child node IDs - repeated string children = 16; - - // OutboundNodes tracks the node IDs which are considered "outbound" nodes to a template invocation. - // For every invocation of a template, there are nodes which we considered as "outbound". Essentially, - // these are last nodes in the execution sequence to run, before the template is considered completed. - // These nodes are then connected as parents to a following step. - // - // In the case of single pod steps (i.e. container, script, resource templates), this list will be nil - // since the pod itself is already considered the "outbound" node. - // In the case of DAGs, outbound nodes are the "target" tasks (tasks with no children). - // In the case of steps, outbound nodes are all the containers involved in the last step group. - // NOTE: since templates are composable, the list of outbound nodes are carried upwards when - // a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of - // a template, will be a superset of the outbound nodes of its last children. - repeated string outboundNodes = 17; - - // HostNodeName name of the Kubernetes node on which the Pod is running, if applicable - optional string hostNodeName = 22; - - // MemoizationStatus holds information about cached nodes - optional MemoizationStatus memoizationStatus = 23; - - // SynchronizationStatus is the synchronization status of the node - optional NodeSynchronizationStatus synchronizationStatus = 25; -} - -// NodeSynchronizationStatus stores the status of a node -message NodeSynchronizationStatus { - // Waiting is the name of the lock that this node is waiting for - optional string waiting = 1; -} - -// NoneStrategy indicates to skip tar process and upload the files or directory tree as independent -// files. Note that if the artifact is a directory, the artifact driver must support the ability to -// save/load the directory appropriately. -message NoneStrategy { -} - -// OAuth2Auth holds all information for client authentication via OAuth2 tokens -message OAuth2Auth { - optional k8s.io.api.core.v1.SecretKeySelector clientIDSecret = 1; - - optional k8s.io.api.core.v1.SecretKeySelector clientSecretSecret = 2; - - optional k8s.io.api.core.v1.SecretKeySelector tokenURLSecret = 3; - - repeated string scopes = 5; - - repeated OAuth2EndpointParam endpointParams = 6; -} - -// EndpointParam is for requesting optional fields that should be sent in the oauth request -message OAuth2EndpointParam { - // Name is the header name - optional string key = 1; - - // Value is the literal value to use for the header - optional string value = 2; -} - -// OSSArtifact is the location of an Alibaba Cloud OSS artifact -message OSSArtifact { - optional OSSBucket oSSBucket = 1; - - // Key is the path in the bucket where the artifact resides - optional string key = 2; -} - -// OSSArtifactRepository defines the controller configuration for an OSS artifact repository -message OSSArtifactRepository { - optional OSSBucket oSSBucket = 1; - - // KeyFormat defines the format of how to store keys and can reference workflow variables. - optional string keyFormat = 2; -} - -// OSSBucket contains the access information required for interfacing with an Alibaba Cloud OSS bucket -message OSSBucket { - // Endpoint is the hostname of the bucket endpoint - optional string endpoint = 1; - - // Bucket is the name of the bucket - optional string bucket = 2; - - // AccessKeySecret is the secret selector to the bucket's access key - optional k8s.io.api.core.v1.SecretKeySelector accessKeySecret = 3; - - // SecretKeySecret is the secret selector to the bucket's secret key - optional k8s.io.api.core.v1.SecretKeySelector secretKeySecret = 4; - - // CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist - optional bool createBucketIfNotPresent = 5; - - // SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm - optional string securityToken = 6; - - // LifecycleRule specifies how to manage bucket's lifecycle - optional OSSLifecycleRule lifecycleRule = 7; - - // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. - optional bool useSDKCreds = 8; -} - -// OSSLifecycleRule specifies how to manage bucket's lifecycle -message OSSLifecycleRule { - // MarkInfrequentAccessAfterDays is the number of days before we convert the objects in the bucket to Infrequent Access (IA) storage type - optional int32 markInfrequentAccessAfterDays = 1; - - // MarkDeletionAfterDays is the number of days before we delete objects in the bucket - optional int32 markDeletionAfterDays = 2; -} - -// +kubebuilder:validation:Type=object -message Object { - optional bytes value = 1; -} - -// Outputs hold parameters, artifacts, and results from a step -message Outputs { - // Parameters holds the list of output parameters produced by a step - // +patchStrategy=merge - // +patchMergeKey=name - repeated Parameter parameters = 1; - - // Artifacts holds the list of output artifacts produced by a step - // +patchStrategy=merge - // +patchMergeKey=name - repeated Artifact artifacts = 2; - - // Result holds the result (stdout) of a script template - optional string result = 3; - - // ExitCode holds the exit code of a script template - optional string exitCode = 4; -} - -// +kubebuilder:validation:Type=array -message ParallelSteps { - repeated WorkflowStep steps = 1; -} - -// Parameter indicate a passed string parameter to a service template with an optional default value -message Parameter { - // Name is the parameter name - optional string name = 1; - - // Default is the default value to use for an input parameter if a value was not supplied - optional string default = 2; - - // Value is the literal value to use for the parameter. - // If specified in the context of an input parameter, the value takes precedence over any passed values - optional string value = 3; - - // ValueFrom is the source for the output parameter's value - optional ValueFrom valueFrom = 4; - - // GlobalName exports an output parameter to the global scope, making it available as - // '{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters - optional string globalName = 5; - - // Enum holds a list of string values to choose from, for the actual value of the parameter - repeated string enum = 6; - - // Description is the parameter description - optional string description = 7; -} - -// Plugin is an Object with exactly one key -message Plugin { - optional Object object = 1; -} - -// PodGC describes how to delete completed pods as they complete -message PodGC { - // Strategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess". If unset, does not delete Pods - optional string strategy = 1; - - // LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 2; - - // DeleteDelayDuration specifies the duration before pods in the GC queue get deleted. - optional string deleteDelayDuration = 3; -} - -// Prometheus is a prometheus metric to be emitted -message Prometheus { - // Name is the name of the metric - optional string name = 1; - - // Labels is a list of metric labels - repeated MetricLabel labels = 2; - - // Help is a string that describes the metric - optional string help = 3; - - // When is a conditional statement that decides when to emit the metric - optional string when = 4; - - // Gauge is a gauge metric - optional Gauge gauge = 5; - - // Histogram is a histogram metric - optional Histogram histogram = 6; - - // Counter is a counter metric - optional Counter counter = 7; -} - -// RawArtifact allows raw string content to be placed as an artifact in a container -message RawArtifact { - // Data is the string contents of the artifact - optional string data = 1; -} - -// ResourceTemplate is a template subtype to manipulate kubernetes resources -message ResourceTemplate { - // Action is the action to perform to the resource. - // Must be one of: get, create, apply, delete, replace, patch - optional string action = 1; - - // MergeStrategy is the strategy used to merge a patch. It defaults to "strategic" - // Must be one of: strategic, merge, json - optional string mergeStrategy = 2; - - // Manifest contains the kubernetes manifest - optional string manifest = 3; - - // ManifestFrom is the source for a single kubernetes manifest - optional ManifestFrom manifestFrom = 8; - - // SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource. - optional bool setOwnerReference = 4; - - // SuccessCondition is a label selector expression which describes the conditions - // of the k8s resource in which it is acceptable to proceed to the following step - optional string successCondition = 5; - - // FailureCondition is a label selector expression which describes the conditions - // of the k8s resource in which the step was considered failed - optional string failureCondition = 6; - - // Flags is a set of additional options passed to kubectl before submitting a resource - // I.e. to disable resource validation: - // flags: [ - // "--validate=false" # disable resource validation - // ] - repeated string flags = 7; -} - -// RetryAffinity prevents running steps on the same host. -message RetryAffinity { - optional RetryNodeAntiAffinity nodeAntiAffinity = 1; -} - -// RetryNodeAntiAffinity is a placeholder for future expansion, only empty nodeAntiAffinity is allowed. -// In order to prevent running steps on the same host, it uses "kubernetes.io/hostname". -message RetryNodeAntiAffinity { -} - -// RetryStrategy provides controls on how to retry a workflow step -message RetryStrategy { - // Limit is the maximum number of retry attempts when retrying a container. It does not include the original - // container; the maximum number of total attempts will be `limit + 1`. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString limit = 1; - - // RetryPolicy is a policy of NodePhase statuses that will be retried - optional string retryPolicy = 2; - - // Backoff is a backoff strategy - optional Backoff backoff = 3; - - // Affinity prevents running workflow's step on the same host - optional RetryAffinity affinity = 4; - - // Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not - // be retried and the retry strategy will be ignored - optional string expression = 5; -} - -// S3Artifact is the location of an S3 artifact -message S3Artifact { - optional S3Bucket s3Bucket = 1; - - // Key is the key in the bucket where the artifact resides - optional string key = 2; -} - -// S3ArtifactRepository defines the controller configuration for an S3 artifact repository -message S3ArtifactRepository { - optional S3Bucket s3Bucket = 1; - - // KeyFormat defines the format of how to store keys and can reference workflow variables. - optional string keyFormat = 2; - - // KeyPrefix is prefix used as part of the bucket key in which the controller will store artifacts. - // DEPRECATED. Use KeyFormat instead - optional string keyPrefix = 3; -} - -// S3Bucket contains the access information required for interfacing with an S3 bucket -message S3Bucket { - // Endpoint is the hostname of the bucket endpoint - optional string endpoint = 1; - - // Bucket is the name of the bucket - optional string bucket = 2; - - // Region contains the optional bucket region - optional string region = 3; - - // Insecure will connect to the service with TLS - optional bool insecure = 4; - - // AccessKeySecret is the secret selector to the bucket's access key - optional k8s.io.api.core.v1.SecretKeySelector accessKeySecret = 5; - - // SecretKeySecret is the secret selector to the bucket's secret key - optional k8s.io.api.core.v1.SecretKeySelector secretKeySecret = 6; - - // RoleARN is the Amazon Resource Name (ARN) of the role to assume. - optional string roleARN = 7; - - // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. - optional bool useSDKCreds = 8; - - // CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is. - optional CreateS3BucketOptions createBucketIfNotPresent = 9; - - optional S3EncryptionOptions encryptionOptions = 10; - - // CASecret specifies the secret that contains the CA, used to verify the TLS connection - optional k8s.io.api.core.v1.SecretKeySelector caSecret = 11; -} - -// S3EncryptionOptions used to determine encryption options during s3 operations -message S3EncryptionOptions { - // KMSKeyId tells the driver to encrypt the object using the specified KMS Key. - optional string kmsKeyId = 1; - - // KmsEncryptionContext is a json blob that contains an encryption context. See https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context for more information - optional string kmsEncryptionContext = 2; - - // EnableEncryption tells the driver to encrypt objects if set to true. If kmsKeyId and serverSideCustomerKeySecret are not set, SSE-S3 will be used - optional bool enableEncryption = 3; - - // ServerSideCustomerKeySecret tells the driver to encrypt the output artifacts using SSE-C with the specified secret. - optional k8s.io.api.core.v1.SecretKeySelector serverSideCustomerKeySecret = 4; -} - -// ScriptTemplate is a template subtype to enable scripting through code steps -message ScriptTemplate { - optional k8s.io.api.core.v1.Container container = 1; - - // Source contains the source code of the script to execute - optional string source = 2; -} - -message SemaphoreHolding { - // Semaphore stores the semaphore name. - optional string semaphore = 1; - - // Holders stores the list of current holder names in the workflow. - // +listType=atomic - repeated string holders = 2; -} - -// SemaphoreRef is a reference of Semaphore -message SemaphoreRef { - // ConfigMapKeyRef is configmap selector for Semaphore configuration - optional k8s.io.api.core.v1.ConfigMapKeySelector configMapKeyRef = 1; - - // Namespace is the namespace of the configmap, default: [namespace of workflow] - optional string namespace = 2; -} - -message SemaphoreStatus { - // Holding stores the list of resource acquired synchronization lock for workflows. - repeated SemaphoreHolding holding = 1; - - // Waiting indicates the list of current synchronization lock holders. - repeated SemaphoreHolding waiting = 2; -} - -// Sequence expands a workflow step into numeric range -message Sequence { - // Count is number of elements in the sequence (default: 0). Not to be used with end - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString count = 1; - - // Number at which to start the sequence (default: 0) - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString start = 2; - - // Number at which to end the sequence (default: 0). Not to be used with Count - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString end = 3; - - // Format is a printf format string to format the value in the sequence - optional string format = 4; -} - -message Submit { - // WorkflowTemplateRef the workflow template to submit - optional WorkflowTemplateRef workflowTemplateRef = 1; - - // Metadata optional means to customize select fields of the workflow metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; - - // Arguments extracted from the event and then set as arguments to the workflow created. - optional Arguments arguments = 2; -} - -// SubmitOpts are workflow submission options -message SubmitOpts { - // Name overrides metadata.name - optional string name = 1; - - // GenerateName overrides metadata.generateName - optional string generateName = 2; - - // Entrypoint overrides spec.entrypoint - optional string entrypoint = 4; - - // Parameters passes input parameters to workflow - repeated string parameters = 5; - - // ServiceAccount runs all pods in the workflow using specified ServiceAccount. - optional string serviceAccount = 7; - - // DryRun validates the workflow on the client-side without creating it. This option is not supported in API - optional bool dryRun = 8; - - // ServerDryRun validates the workflow on the server-side without creating it - optional bool serverDryRun = 9; - - // Labels adds to metadata.labels - optional string labels = 10; - - // OwnerReference creates a metadata.ownerReference - optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference ownerReference = 11; - - // Annotations adds to metadata.labels - optional string annotations = 12; - - // Set the podPriorityClassName of the workflow - optional string podPriorityClassName = 13; - - // Priority is used if controller is configured to process limited number of workflows in parallel, higher priority workflows - // are processed first. - optional int32 priority = 14; -} - -// SuppliedValueFrom is a placeholder for a value to be filled in directly, either through the CLI, API, etc. -message SuppliedValueFrom { -} - -// SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time -message SuspendTemplate { - // Duration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds. - // Could also be a Duration, e.g.: "2m", "6h" - optional string duration = 1; -} - -// Synchronization holds synchronization lock configuration -message Synchronization { - // Semaphore holds the Semaphore configuration - optional SemaphoreRef semaphore = 1; - - // Mutex holds the Mutex lock details - optional Mutex mutex = 2; -} - -// SynchronizationStatus stores the status of semaphore and mutex. -message SynchronizationStatus { - // Semaphore stores this workflow's Semaphore holder details - optional SemaphoreStatus semaphore = 1; - - // Mutex stores this workflow's mutex holder details - optional MutexStatus mutex = 2; -} - -// TTLStrategy is the strategy for the time to live depending on if the workflow succeeded or failed -message TTLStrategy { - // SecondsAfterCompletion is the number of seconds to live after completion - optional int32 secondsAfterCompletion = 1; - - // SecondsAfterSuccess is the number of seconds to live after success - optional int32 secondsAfterSuccess = 2; - - // SecondsAfterFailure is the number of seconds to live after failure - optional int32 secondsAfterFailure = 3; -} - -// TarStrategy will tar and gzip the file or directory when saving -message TarStrategy { - // CompressionLevel specifies the gzip compression level to use for the artifact. - // Defaults to gzip.DefaultCompression. - optional int32 compressionLevel = 1; -} - -// Template is a reusable and composable unit of execution in a workflow -message Template { - // Name is the name of the template - optional string name = 1; - - // Inputs describe what inputs parameters and artifacts are supplied to this template - optional Inputs inputs = 5; - - // Outputs describe the parameters and artifacts that this template produces - optional Outputs outputs = 6; - - // NodeSelector is a selector to schedule this step of the workflow to be - // run on the selected node(s). Overrides the selector set at the workflow level. - map<string, string> nodeSelector = 7; - - // Affinity sets the pod's scheduling constraints - // Overrides the affinity set at the workflow level (if any) - optional k8s.io.api.core.v1.Affinity affinity = 8; - - // Metdata sets the pods's metadata, i.e. annotations and labels - optional Metadata metadata = 9; - - // Daemon will allow a workflow to proceed to the next step so long as the container reaches readiness - optional bool daemon = 10; - - // Steps define a series of sequential/parallel workflow steps - repeated ParallelSteps steps = 11; - - // Container is the main container image to run in the pod - optional k8s.io.api.core.v1.Container container = 12; - - // ContainerSet groups multiple containers within a single pod. - optional ContainerSetTemplate containerSet = 40; - - // Script runs a portion of code against an interpreter - optional ScriptTemplate script = 13; - - // Resource template subtype which can run k8s resources - optional ResourceTemplate resource = 14; - - // DAG template subtype which runs a DAG - optional DAGTemplate dag = 15; - - // Suspend template subtype which can suspend a workflow when reaching the step - optional SuspendTemplate suspend = 16; - - // Data is a data template - optional Data data = 39; - - // HTTP makes a HTTP request - optional HTTP http = 42; - - // Plugin is a plugin template - optional Plugin plugin = 43; - - // Volumes is a list of volumes that can be mounted by containers in a template. - // +patchStrategy=merge - // +patchMergeKey=name - repeated k8s.io.api.core.v1.Volume volumes = 17; - - // InitContainers is a list of containers which run before the main container. - // +patchStrategy=merge - // +patchMergeKey=name - repeated UserContainer initContainers = 18; - - // Sidecars is a list of containers which run alongside the main container - // Sidecars are automatically killed when the main container completes - // +patchStrategy=merge - // +patchMergeKey=name - repeated UserContainer sidecars = 19; - - // Location in which all files related to the step will be stored (logs, artifacts, etc...). - // Can be overridden by individual items in Outputs. If omitted, will use the default - // artifact repository location configured in the controller, appended with the - // <workflowname>/<nodename> in the key. - optional ArtifactLocation archiveLocation = 20; - - // Optional duration in seconds relative to the StartTime that the pod may be active on a node - // before the system actively tries to terminate the pod; value must be positive integer - // This field is only applicable to container and script templates. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString activeDeadlineSeconds = 21; - - // RetryStrategy describes how to retry a template when it fails - optional RetryStrategy retryStrategy = 22; - - // Parallelism limits the max total parallel pods that can execute at the same time within the - // boundaries of this template invocation. If additional steps/dag templates are invoked, the - // pods created by those templates will not be counted towards this total. - optional int64 parallelism = 23; - - // FailFast, if specified, will fail this template if any of its child pods has failed. This is useful for when this - // template is expanded with `withItems`, etc. - optional bool failFast = 41; - - // Tolerations to apply to workflow pods. - // +patchStrategy=merge - // +patchMergeKey=key - repeated k8s.io.api.core.v1.Toleration tolerations = 24; - - // If specified, the pod will be dispatched by specified scheduler. - // Or it will be dispatched by workflow scope scheduler if specified. - // If neither specified, the pod will be dispatched by default scheduler. - // +optional - optional string schedulerName = 25; - - // PriorityClassName to apply to workflow pods. - optional string priorityClassName = 26; - - // Priority to apply to workflow pods. - optional int32 priority = 27; - - // ServiceAccountName to apply to workflow pods - optional string serviceAccountName = 28; - - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. - // ServiceAccountName of ExecutorConfig must be specified if this value is false. - optional bool automountServiceAccountToken = 32; - - // Executor holds configurations of the executor container. - optional ExecutorConfig executor = 33; - - // HostAliases is an optional list of hosts and IPs that will be injected into the pod spec - // +patchStrategy=merge - // +patchMergeKey=ip - repeated k8s.io.api.core.v1.HostAlias hostAliases = 29; - - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - optional k8s.io.api.core.v1.PodSecurityContext securityContext = 30; - - // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of - // container fields which are not strings (e.g. resource limits). - optional string podSpecPatch = 31; - - // Metrics are a list of metrics emitted from this template - optional Metrics metrics = 35; - - // Synchronization holds synchronization lock configuration for this template - optional Synchronization synchronization = 36; - - // Memoize allows templates to use outputs generated from already executed templates - optional Memoize memoize = 37; - - // Timeout allows to set the total node execution timeout duration counting from the node's start time. - // This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates. - optional string timeout = 38; -} - -// TemplateRef is a reference of template resource. -message TemplateRef { - // Name is the resource name of the template. - optional string name = 1; - - // Template is the name of referred template in the resource. - optional string template = 2; - - // ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate). - optional bool clusterScope = 4; -} - -message TransformationStep { - // Expression defines an expr expression to apply - optional string expression = 1; -} - -// UserContainer is a container specified by a user. -message UserContainer { - optional k8s.io.api.core.v1.Container container = 1; - - // MirrorVolumeMounts will mount the same volumes specified in the main container - // to the container (including artifacts), at the same mountPaths. This enables - // dind daemon to partially see the same filesystem as the main container in - // order to use features such as docker volume binding - optional bool mirrorVolumeMounts = 2; -} - -// ValueFrom describes a location in which to obtain the value to a parameter -message ValueFrom { - // Path in the container to retrieve an output parameter value from in container templates - optional string path = 1; - - // JSONPath of a resource to retrieve an output parameter value from in resource templates - optional string jsonPath = 2; - - // JQFilter expression against the resource object in resource templates - optional string jqFilter = 3; - - // Selector (https://github.com/expr-lang/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message` - optional string event = 7; - - // Parameter reference to a step or dag task in which to retrieve an output parameter value from - // (e.g. '{{steps.mystep.outputs.myparam}}') - optional string parameter = 4; - - // Supplied value to be filled in directly, either through the CLI, API, etc. - optional SuppliedValueFrom supplied = 6; - - // ConfigMapKeyRef is configmap selector for input parameter configuration - optional k8s.io.api.core.v1.ConfigMapKeySelector configMapKeyRef = 9; - - // Default specifies a value to be used if retrieving the value from the specified source fails - optional string default = 5; - - // Expression, if defined, is evaluated to specify the value for the parameter - optional string expression = 8; -} - -message Version { - optional string version = 1; - - optional string buildDate = 2; - - optional string gitCommit = 3; - - optional string gitTag = 4; - - optional string gitTreeState = 5; - - optional string goVersion = 6; - - optional string compiler = 7; - - optional string platform = 8; -} - -// VolumeClaimGC describes how to delete volumes from completed Workflows -message VolumeClaimGC { - // Strategy is the strategy to use. One of "OnWorkflowCompletion", "OnWorkflowSuccess". Defaults to "OnWorkflowSuccess" - optional string strategy = 1; -} - -// Workflow is the definition of a workflow resource -// +genclient -// +genclient:noStatus -// +kubebuilder:resource:shortName=wf -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Status of the workflow" -// +kubebuilder:printcolumn:name="Age",type="date",format="date-time",JSONPath=".status.startedAt",description="When the workflow was started" -// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Human readable message indicating details about why the workflow is in this condition." -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message Workflow { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional WorkflowSpec spec = 2; - - optional WorkflowStatus status = 3; -} - -// WorkflowArtifactGCTask specifies the Artifacts that need to be deleted as well as the status of deletion -// +genclient -// +kubebuilder:resource:shortName=wfat -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -message WorkflowArtifactGCTask { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional ArtifactGCSpec spec = 2; - - optional ArtifactGCStatus status = 3; -} - -// WorkflowArtifactGCTaskList is list of WorkflowArtifactGCTask resources -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message WorkflowArtifactGCTaskList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated WorkflowArtifactGCTask items = 2; -} - -// WorkflowEventBinding is the definition of an event resource -// +genclient -// +genclient:noStatus -// +kubebuilder:resource:shortName=wfeb -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message WorkflowEventBinding { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional WorkflowEventBindingSpec spec = 2; -} - -// WorkflowEventBindingList is list of event resources -// +kubebuilder:resource:shortName=wfebs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message WorkflowEventBindingList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated WorkflowEventBinding items = 2; -} - -message WorkflowEventBindingSpec { - // Event is the event to bind to - optional Event event = 1; - - // Submit is the workflow template to submit - optional Submit submit = 2; -} - -// WorkflowLevelArtifactGC describes how to delete artifacts from completed Workflows - this spec is used on the Workflow level -message WorkflowLevelArtifactGC { - // ArtifactGC is an embedded struct - optional ArtifactGC artifactGC = 1; - - // ForceFinalizerRemoval: if set to true, the finalizer will be removed in the case that Artifact GC fails - optional bool forceFinalizerRemoval = 2; - - // PodSpecPatch holds strategic merge patch to apply against the artgc pod spec. - optional string podSpecPatch = 3; -} - -// WorkflowList is list of Workflow resources -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message WorkflowList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated Workflow items = 2; -} - -message WorkflowMetadata { - map<string, string> labels = 1; - - map<string, string> annotations = 2; - - map<string, LabelValueFrom> labelsFrom = 3; -} - -// WorkflowSpec is the specification of a Workflow. -message WorkflowSpec { - // Templates is a list of workflow templates used in a workflow - // +patchStrategy=merge - // +patchMergeKey=name - repeated Template templates = 1; - - // Entrypoint is a template reference to the starting point of the workflow. - optional string entrypoint = 2; - - // Arguments contain the parameters and artifacts sent to the workflow entrypoint - // Parameters are referencable globally using the 'workflow' variable prefix. - // e.g. {{workflow.parameters.myparam}} - optional Arguments arguments = 3; - - // ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as. - optional string serviceAccountName = 4; - - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. - // ServiceAccountName of ExecutorConfig must be specified if this value is false. - optional bool automountServiceAccountToken = 28; - - // Executor holds configurations of executor containers of the workflow. - optional ExecutorConfig executor = 29; - - // Volumes is a list of volumes that can be mounted by containers in a workflow. - // +patchStrategy=merge - // +patchMergeKey=name - repeated k8s.io.api.core.v1.Volume volumes = 5; - - // VolumeClaimTemplates is a list of claims that containers are allowed to reference. - // The Workflow controller will create the claims at the beginning of the workflow - // and delete the claims upon completion of the workflow - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 6; - - // Parallelism limits the max total parallel pods that can execute at the same time in a workflow - optional int64 parallelism = 7; - - // ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config. - optional ArtifactRepositoryRef artifactRepositoryRef = 8; - - // Suspend will suspend the workflow and prevent execution of any future steps in the workflow - optional bool suspend = 9; - - // NodeSelector is a selector which will result in all pods of the workflow - // to be scheduled on the selected node(s). This is able to be overridden by - // a nodeSelector specified in the template. - map<string, string> nodeSelector = 10; - - // Affinity sets the scheduling constraints for all pods in the workflow. - // Can be overridden by an affinity specified in the template - optional k8s.io.api.core.v1.Affinity affinity = 11; - - // Tolerations to apply to workflow pods. - // +patchStrategy=merge - // +patchMergeKey=key - repeated k8s.io.api.core.v1.Toleration tolerations = 12; - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod - // +patchStrategy=merge - // +patchMergeKey=name - repeated k8s.io.api.core.v1.LocalObjectReference imagePullSecrets = 13; - - // Host networking requested for this workflow pod. Default to false. - optional bool hostNetwork = 14; - - // Set DNS policy for workflow pods. - // Defaults to "ClusterFirst". - // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - // To have DNS options set along with hostNetwork, you have to specify DNS policy - // explicitly to 'ClusterFirstWithHostNet'. - optional string dnsPolicy = 15; - - // PodDNSConfig defines the DNS parameters of a pod in addition to - // those generated from DNSPolicy. - optional k8s.io.api.core.v1.PodDNSConfig dnsConfig = 16; - - // OnExit is a template reference which is invoked at the end of the - // workflow, irrespective of the success, failure, or error of the - // primary workflow. - optional string onExit = 17; - - // TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it - // Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be - // deleted after the time to live expires. If this field is unset, - // the controller config map will hold the default values. - optional TTLStrategy ttlStrategy = 30; - - // Optional duration in seconds relative to the workflow start time which the workflow is - // allowed to run before the controller terminates the workflow. A value of zero is used to - // terminate a Running workflow - optional int64 activeDeadlineSeconds = 19; - - // Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first. - optional int32 priority = 20; - - // Set scheduler name for all pods. - // Will be overridden if container/script template's scheduler name is set. - // Default scheduler will be used if neither specified. - // +optional - optional string schedulerName = 21; - - // PodGC describes the strategy to use when deleting completed pods - optional PodGC podGC = 22; - - // PriorityClassName to apply to workflow pods. - optional string podPriorityClassName = 23; - - // Priority to apply to workflow pods. - // DEPRECATED: Use PodPriorityClassName instead. - optional int32 podPriority = 24; - - // +patchStrategy=merge - // +patchMergeKey=ip - repeated k8s.io.api.core.v1.HostAlias hostAliases = 25; - - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - optional k8s.io.api.core.v1.PodSecurityContext securityContext = 26; - - // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of - // container fields which are not strings (e.g. resource limits). - optional string podSpecPatch = 27; - - // PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. - // Controller will automatically add the selector with workflow name, if selector is empty. - // Optional: Defaults to empty. - // +optional - optional k8s.io.api.policy.v1.PodDisruptionBudgetSpec podDisruptionBudget = 31; - - // Metrics are a list of metrics emitted from this Workflow - optional Metrics metrics = 32; - - // Shutdown will shutdown the workflow according to its ShutdownStrategy - optional string shutdown = 33; - - // WorkflowTemplateRef holds a reference to a WorkflowTemplate for execution - optional WorkflowTemplateRef workflowTemplateRef = 34; - - // Synchronization holds synchronization lock configuration for this Workflow - optional Synchronization synchronization = 35; - - // VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows - optional VolumeClaimGC volumeClaimGC = 36; - - // RetryStrategy for all templates in the workflow. - optional RetryStrategy retryStrategy = 37; - - // PodMetadata defines additional metadata that should be applied to workflow pods - optional Metadata podMetadata = 38; - - // TemplateDefaults holds default template values that will apply to all templates in the Workflow, unless overridden on the template-level - optional Template templateDefaults = 39; - - // ArchiveLogs indicates if the container logs should be archived - optional bool archiveLogs = 40; - - // Hooks holds the lifecycle hook which is invoked at lifecycle of - // step, irrespective of the success, failure, or error status of the primary step - map<string, LifecycleHook> hooks = 41; - - // WorkflowMetadata contains some metadata of the workflow to refer to - optional WorkflowMetadata workflowMetadata = 42; - - // ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts - // unless Artifact.ArtifactGC is specified, which overrides this) - optional WorkflowLevelArtifactGC artifactGC = 43; -} - -// WorkflowStatus contains overall status information about a workflow -message WorkflowStatus { - // Phase a simple, high-level summary of where the workflow is in its lifecycle. - // Will be "" (Unknown), "Pending", or "Running" before the workflow is completed, and "Succeeded", - // "Failed" or "Error" once the workflow has completed. - optional string phase = 1; - - // Time at which this workflow started - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 2; - - // Time at which this workflow completed - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 3; - - // EstimatedDuration in seconds. - optional int64 estimatedDuration = 16; - - // Progress to completion - optional string progress = 17; - - // A human readable message indicating details about why the workflow is in this condition. - optional string message = 4; - - // Compressed and base64 decoded Nodes map - optional string compressedNodes = 5; - - // Nodes is a mapping between a node ID and the node's status. - map<string, NodeStatus> nodes = 6; - - // Whether on not node status has been offloaded to a database. If exists, then Nodes and CompressedNodes will be empty. - // This will actually be populated with a hash of the offloaded data. - optional string offloadNodeStatusVersion = 10; - - // StoredTemplates is a mapping between a template ref and the node's status. - map<string, Template> storedTemplates = 9; - - // PersistentVolumeClaims tracks all PVCs that were created as part of the workflow. - // The contents of this list are drained at the end of the workflow. - repeated k8s.io.api.core.v1.Volume persistentVolumeClaims = 7; - - // Outputs captures output values and artifact locations produced by the workflow via global outputs - optional Outputs outputs = 8; - - // Conditions is a list of conditions the Workflow may have - repeated Condition conditions = 13; - - // ResourcesDuration is the total for the workflow - map<string, int64> resourcesDuration = 12; - - // StoredWorkflowSpec stores the WorkflowTemplate spec for future execution. - optional WorkflowSpec storedWorkflowTemplateSpec = 14; - - // Synchronization stores the status of synchronization locks - optional SynchronizationStatus synchronization = 15; - - // ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile. - optional ArtifactRepositoryRefStatus artifactRepositoryRef = 18; - - // ArtifactGCStatus maintains the status of Artifact Garbage Collection - optional ArtGCStatus artifactGCStatus = 19; - - // TaskResultsCompletionStatus tracks task result completion status (mapped by node ID). Used to prevent premature archiving and garbage collection. - map<string, bool> taskResultsCompletionStatus = 20; -} - -// WorkflowStep is a reference to a template to execute in a series of step -message WorkflowStep { - // Name of the step - optional string name = 1; - - // Template is the name of the template to execute as the step - optional string template = 2; - - // Inline is the template. Template must be empty if this is declared (and vice-versa). - optional Template inline = 13; - - // Arguments hold arguments to the template - optional Arguments arguments = 3; - - // TemplateRef is the reference to the template resource to execute as the step. - optional TemplateRef templateRef = 4; - - // WithItems expands a step into multiple parallel steps from the items in the list - repeated Item withItems = 5; - - // WithParam expands a step into multiple parallel steps from the value in the parameter, - // which is expected to be a JSON list. - optional string withParam = 6; - - // WithSequence expands a step into a numeric sequence - optional Sequence withSequence = 7; - - // When is an expression in which the step should conditionally execute - optional string when = 8; - - // ContinueOn makes argo to proceed with the following step even if this step fails. - // Errors and Failed states can be specified - optional ContinueOn continueOn = 9; - - // OnExit is a template reference which is invoked at the end of the - // template, irrespective of the success, failure, or error of the - // primary template. - // DEPRECATED: Use Hooks[exit].Template instead. - optional string onExit = 11; - - // Hooks holds the lifecycle hook which is invoked at lifecycle of - // step, irrespective of the success, failure, or error status of the primary step - map<string, LifecycleHook> hooks = 12; -} - -// WorkflowTaskResult is a used to communicate a result back to the controller. Unlike WorkflowTaskSet, it has -// more capacity. This is an internal type. Users should never create this resource directly, much like you would -// never create a ReplicaSet directly. -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message WorkflowTaskResult { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional NodeResult nodeResult = 2; -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message WorkflowTaskResultList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated WorkflowTaskResult items = 2; -} - -// +genclient -// +kubebuilder:resource:shortName=wfts -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -message WorkflowTaskSet { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional WorkflowTaskSetSpec spec = 2; - - optional WorkflowTaskSetStatus status = 3; -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message WorkflowTaskSetList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated WorkflowTaskSet items = 2; -} - -message WorkflowTaskSetSpec { - map<string, Template> tasks = 1; -} - -message WorkflowTaskSetStatus { - map<string, NodeResult> nodes = 1; -} - -// WorkflowTemplate is the definition of a workflow template resource -// +genclient -// +genclient:noStatus -// +kubebuilder:resource:shortName=wftmpl -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message WorkflowTemplate { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional WorkflowSpec spec = 2; -} - -// WorkflowTemplateList is list of WorkflowTemplate resources -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message WorkflowTemplateList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated WorkflowTemplate items = 2; -} - -// WorkflowTemplateRef is a reference to a WorkflowTemplate resource. -message WorkflowTemplateRef { - // Name is the resource name of the workflow template. - optional string name = 1; - - // ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate). - optional bool clusterScope = 2; -} - -// ZipStrategy will unzip zipped input artifacts -message ZipStrategy { -} - diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.swagger.json b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.swagger.json deleted file mode 100644 index 4cf065b4f..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.swagger.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "pkg/apis/workflow/v1alpha1/generated.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": {}, - "definitions": {} -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/http_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/http_types.go deleted file mode 100644 index 79f9c3e29..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/http_types.go +++ /dev/null @@ -1,62 +0,0 @@ -package v1alpha1 - -import ( - "net/http" - - v1 "k8s.io/api/core/v1" -) - -type HTTPHeaderSource struct { - SecretKeyRef *v1.SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,1,opt,name=secretKeyRef"` -} - -type HTTPHeaders []HTTPHeader - -// HTTPBodySource contains the source of the HTTP body. -type HTTPBodySource struct { - Bytes []byte `json:"bytes,omitempty" protobuf:"bytes,1,opt,name=bytes"` -} - -func (h HTTPHeaders) ToHeader() http.Header { - outHeader := make(http.Header) - for _, header := range h { - // When this is used, header valueFrom should already be resolved - if header.ValueFrom != nil { - continue - } - outHeader[header.Name] = []string{header.Value} - } - return outHeader -} - -type HTTPHeader struct { - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` - ValueFrom *HTTPHeaderSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"` -} - -type HTTP struct { - // Method is HTTP methods for HTTP Request - Method string `json:"method,omitempty" protobuf:"bytes,1,opt,name=method"` - // URL of the HTTP Request - URL string `json:"url" protobuf:"bytes,2,opt,name=url"` - // Headers are an optional list of headers to send with HTTP requests - Headers HTTPHeaders `json:"headers,omitempty" protobuf:"bytes,3,rep,name=headers"` - // TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds - TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"bytes,4,opt,name=timeoutSeconds"` - // SuccessCondition is an expression if evaluated to true is considered successful - SuccessCondition string `json:"successCondition,omitempty" protobuf:"bytes,6,opt,name=successCondition"` - // Body is content of the HTTP Request - Body string `json:"body,omitempty" protobuf:"bytes,5,opt,name=body"` - // BodyFrom is content of the HTTP Request as Bytes - BodyFrom *HTTPBodySource `json:"bodyFrom,omitempty" protobuf:"bytes,8,opt,name=bodyFrom"` - // InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client - InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty" protobuf:"bytes,7,opt,name=insecureSkipVerify"` -} - -func (h *HTTP) GetBodyBytes() []byte { - if h.BodyFrom != nil { - return h.BodyFrom.Bytes - } - return nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/info.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/info.go deleted file mode 100644 index 19390e431..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/info.go +++ /dev/null @@ -1,25 +0,0 @@ -package v1alpha1 - -// A link to another app. -// +patchStrategy=merge -// +patchMergeKey=name -type Link struct { - // The name of the link, E.g. "Workflow Logs" or "Pod Logs" - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // "workflow", "pod", "pod-logs", "event-source-logs", "sensor-logs", "workflow-list" or "chat" - Scope string `json:"scope" protobuf:"bytes,2,opt,name=scope"` - // The URL. Can contain "${metadata.namespace}", "${metadata.name}", "${status.startedAt}", "${status.finishedAt}" or any other element in workflow yaml, e.g. "${workflow.metadata.annotations.userDefinedKey}" - URL string `json:"url" protobuf:"bytes,3,opt,name=url"` -} - -// Column is a custom column that will be exposed in the Workflow List View. -// +patchStrategy=merge -// +patchMergeKey=name -type Column struct { - // The name of this column, e.g., "Workflow Completed". - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // The type of this column, "label" or "annotation". - Type string `json:"type" protobuf:"bytes,2,opt,name=type"` - // The key of the label or annotation, e.g., "workflows.argoproj.io/completed". - Key string `json:"key" protobuf:"bytes,3,opt,name=key"` -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/item.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/item.go deleted file mode 100644 index 35e53c31f..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/item.go +++ /dev/null @@ -1,119 +0,0 @@ -package v1alpha1 - -import ( - "encoding/json" - "fmt" - "strconv" - - jsonutil "github.com/argoproj/argo-workflows/v3/util/json" -) - -// Type represents the stored type of Item. -type Type int - -const ( - Number Type = iota - String - Bool - Map - List -) - -// Item expands a single workflow step into multiple parallel steps -// The value of Item can be a map, string, bool, or number -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -// +kubebuilder:validation:Type=object -type Item struct { - Value json.RawMessage `json:"-" protobuf:"bytes,1,opt,name=value,casttype=encoding/json.RawMessage"` -} - -func ParseItem(s string) (Item, error) { - item := Item{} - return item, json.Unmarshal([]byte(s), &item) -} - -func (i *Item) GetType() Type { - strValue := string(i.Value) - if _, err := strconv.Atoi(strValue); err == nil { - return Number - } - if _, err := strconv.ParseFloat(strValue, 64); err == nil { - return Number - } - if _, err := strconv.ParseBool(strValue); err == nil { - return Bool - } - var list []interface{} - if err := json.Unmarshal(i.Value, &list); err == nil { - return List - } - var object map[string]interface{} - if err := json.Unmarshal(i.Value, &object); err == nil { - return Map - } - return String -} - -func (i *Item) UnmarshalJSON(value []byte) error { - return i.Value.UnmarshalJSON(value) -} - -func (i *Item) String() string { - x, err := json.Marshal(i) // this produces a normalised string, e.g. white-space - if err != nil { - panic(err) - } - // this convenience to remove quotes from strings will cause many problems - if x[0] == '"' { - return jsonutil.Fix(string(x[1 : len(x)-1])) - } - return jsonutil.Fix(string(x)) -} - -func (i Item) Format(s fmt.State, _ rune) { - _, _ = fmt.Fprintf(s, "%s", i.String()) //nolint -} - -func (i Item) MarshalJSON() ([]byte, error) { - return i.Value.MarshalJSON() -} - -func (i *Item) DeepCopyInto(out *Item) { - inBytes, err := json.Marshal(i) - if err != nil { - panic(err) - } - err = json.Unmarshal(inBytes, out) - if err != nil { - panic(err) - } -} - -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (i Item) OpenAPISchemaType() []string { - return nil -} - -func (i Item) OpenAPISchemaFormat() string { return "" } - -// you MUST assert `GetType() == Map` before invocation as this does not return errors -func (i *Item) GetMapVal() map[string]Item { - val := make(map[string]Item) - _ = json.Unmarshal(i.Value, &val) - return val -} - -// you MUST assert `GetType() == List` before invocation as this does not return errors -func (i *Item) GetListVal() []Item { - val := make([]Item, 0) - _ = json.Unmarshal(i.Value, &val) - return val -} - -// you MUST assert `GetType() == String` before invocation as this does not return errors -func (i *Item) GetStrVal() string { - val := "" - _ = json.Unmarshal(i.Value, &val) - return val -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/label.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/label.go deleted file mode 100644 index 6c38754ba..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/label.go +++ /dev/null @@ -1,11 +0,0 @@ -package v1alpha1 - -// Labels is list of workflow labels -type LabelValues struct { - Items []string `json:"items,omitempty" protobuf:"bytes,1,opt,name=items"` -} - -// LabelKeys is list of keys -type LabelKeys struct { - Items []string `json:"items,omitempty" protobuf:"bytes,1,opt,name=items"` -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/marshall.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/marshall.go deleted file mode 100644 index 2d35e5ba9..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/marshall.go +++ /dev/null @@ -1,86 +0,0 @@ -package v1alpha1 - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - - "sigs.k8s.io/yaml" -) - -// MustUnmarshal is a utility function to unmarshall either a file, byte array, or string of JSON or YAMl into a object. -// text - a byte array or string, if starts with "@" it assumed to be a file and read from disk, is starts with "{" assumed to be JSON, otherwise assumed to be YAML -// v - a pointer to an object -func MustUnmarshal(text, v interface{}) { - switch x := text.(type) { - case string: - MustUnmarshal([]byte(x), v) - case []byte: - if len(x) == 0 { - panic("no text to unmarshal") - } - if x[0] == '@' { - filename := string(x[1:]) - y, err := os.ReadFile(filepath.Clean(filename)) - if err != nil { - panic(fmt.Errorf("failed to read file %s: %w", filename, err)) - } - MustUnmarshal(y, v) - } else if x[0] == '{' { - if err := json.Unmarshal(x, v); err != nil { - panic(fmt.Errorf("failed to unmarshal JSON %q: %w", string(x), err)) - } - } else { - if err := yaml.UnmarshalStrict(x, v); err != nil { - panic(fmt.Errorf("failed to unmarshal YAML %q: %w", string(x), err)) - } - } - default: - panic(fmt.Errorf("cannot unmarshal type %T", text)) - } -} - -func MustMarshallJSON(v interface{}) string { - data, err := json.Marshal(v) - if err != nil { - panic(err) - } - return string(data) -} - -func MustUnmarshalClusterWorkflowTemplate(text interface{}) *ClusterWorkflowTemplate { - x := &ClusterWorkflowTemplate{} - MustUnmarshal(text, &x) - return x -} - -func MustUnmarshalCronWorkflow(text interface{}) *CronWorkflow { - x := &CronWorkflow{} - MustUnmarshal(text, &x) - return x -} - -func MustUnmarshalTemplate(text interface{}) *Template { - x := &Template{} - MustUnmarshal(text, &x) - return x -} - -func MustUnmarshalWorkflow(text interface{}) *Workflow { - x := &Workflow{} - MustUnmarshal(text, &x) - return x -} - -func MustUnmarshalWorkflowTemplate(text interface{}) *WorkflowTemplate { - x := &WorkflowTemplate{} - MustUnmarshal(text, &x) - return x -} - -func MustUnmarshalWorkflowArtifactGCTask(text interface{}) *WorkflowArtifactGCTask { - x := &WorkflowArtifactGCTask{} - MustUnmarshal(text, &x) - return x -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/object_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/object_types.go deleted file mode 100644 index a21ee90c1..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/object_types.go +++ /dev/null @@ -1,24 +0,0 @@ -package v1alpha1 - -import ( - "encoding/json" -) - -// +kubebuilder:validation:Type=object -type Object struct { - Value json.RawMessage `json:"-" protobuf:"bytes,1,opt,name=value,casttype=encoding/json.RawMessage"` -} - -func (i *Object) UnmarshalJSON(value []byte) error { - return i.Value.UnmarshalJSON(value) -} - -func (i Object) MarshalJSON() ([]byte, error) { - return i.Value.MarshalJSON() -} - -func (i Object) OpenAPISchemaType() []string { - return []string{"object"} -} - -func (i Object) OpenAPISchemaFormat() string { return "" } diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/openapi_generated.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/openapi_generated.go deleted file mode 100644 index 04f22dcf9..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ /dev/null @@ -1,8364 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by openapi-gen. DO NOT EDIT. - -// This file was autogenerated by openapi-gen. Do not edit it manually! - -package v1alpha1 - -import ( - common "k8s.io/kube-openapi/pkg/common" - spec "k8s.io/kube-openapi/pkg/validation/spec" -) - -func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { - return map[string]common.OpenAPIDefinition{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Amount": schema_pkg_apis_workflow_v1alpha1_Amount(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy": schema_pkg_apis_workflow_v1alpha1_ArchiveStrategy(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments": schema_pkg_apis_workflow_v1alpha1_Arguments(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtGCStatus": schema_pkg_apis_workflow_v1alpha1_ArtGCStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact": schema_pkg_apis_workflow_v1alpha1_Artifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC": schema_pkg_apis_workflow_v1alpha1_ArtifactGC(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCSpec": schema_pkg_apis_workflow_v1alpha1_ArtifactGCSpec(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCStatus": schema_pkg_apis_workflow_v1alpha1_ArtifactGCStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation": schema_pkg_apis_workflow_v1alpha1_ArtifactLocation(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactNodeSpec": schema_pkg_apis_workflow_v1alpha1_ArtifactNodeSpec(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactPaths": schema_pkg_apis_workflow_v1alpha1_ArtifactPaths(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepository": schema_pkg_apis_workflow_v1alpha1_ArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRef": schema_pkg_apis_workflow_v1alpha1_ArtifactRepositoryRef(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRefStatus": schema_pkg_apis_workflow_v1alpha1_ArtifactRepositoryRefStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResult": schema_pkg_apis_workflow_v1alpha1_ArtifactResult(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResultNodeStatus": schema_pkg_apis_workflow_v1alpha1_ArtifactResultNodeStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactSearchQuery": schema_pkg_apis_workflow_v1alpha1_ArtifactSearchQuery(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactSearchResult": schema_pkg_apis_workflow_v1alpha1_ArtifactSearchResult(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact": schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifactRepository": schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryAuth": schema_pkg_apis_workflow_v1alpha1_ArtifactoryAuth(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact": schema_pkg_apis_workflow_v1alpha1_AzureArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifactRepository": schema_pkg_apis_workflow_v1alpha1_AzureArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureBlobContainer": schema_pkg_apis_workflow_v1alpha1_AzureBlobContainer(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Backoff": schema_pkg_apis_workflow_v1alpha1_Backoff(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.BasicAuth": schema_pkg_apis_workflow_v1alpha1_BasicAuth(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Cache": schema_pkg_apis_workflow_v1alpha1_Cache(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClientCertAuth": schema_pkg_apis_workflow_v1alpha1_ClientCertAuth(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClusterWorkflowTemplate": schema_pkg_apis_workflow_v1alpha1_ClusterWorkflowTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClusterWorkflowTemplateList": schema_pkg_apis_workflow_v1alpha1_ClusterWorkflowTemplateList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Column": schema_pkg_apis_workflow_v1alpha1_Column(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition": schema_pkg_apis_workflow_v1alpha1_Condition(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerNode": schema_pkg_apis_workflow_v1alpha1_ContainerNode(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetRetryStrategy": schema_pkg_apis_workflow_v1alpha1_ContainerSetRetryStrategy(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetTemplate": schema_pkg_apis_workflow_v1alpha1_ContainerSetTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn": schema_pkg_apis_workflow_v1alpha1_ContinueOn(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Counter": schema_pkg_apis_workflow_v1alpha1_Counter(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions": schema_pkg_apis_workflow_v1alpha1_CreateS3BucketOptions(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflow": schema_pkg_apis_workflow_v1alpha1_CronWorkflow(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowList": schema_pkg_apis_workflow_v1alpha1_CronWorkflowList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowSpec": schema_pkg_apis_workflow_v1alpha1_CronWorkflowSpec(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowStatus": schema_pkg_apis_workflow_v1alpha1_CronWorkflowStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTask": schema_pkg_apis_workflow_v1alpha1_DAGTask(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTemplate": schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Data": schema_pkg_apis_workflow_v1alpha1_Data(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DataSource": schema_pkg_apis_workflow_v1alpha1_DataSource(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Event": schema_pkg_apis_workflow_v1alpha1_Event(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig": schema_pkg_apis_workflow_v1alpha1_ExecutorConfig(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact": schema_pkg_apis_workflow_v1alpha1_GCSArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifactRepository": schema_pkg_apis_workflow_v1alpha1_GCSArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSBucket": schema_pkg_apis_workflow_v1alpha1_GCSBucket(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Gauge": schema_pkg_apis_workflow_v1alpha1_Gauge(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact": schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact": schema_pkg_apis_workflow_v1alpha1_HDFSArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifactRepository": schema_pkg_apis_workflow_v1alpha1_HDFSArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSConfig": schema_pkg_apis_workflow_v1alpha1_HDFSConfig(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSKrbConfig": schema_pkg_apis_workflow_v1alpha1_HDFSKrbConfig(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTP": schema_pkg_apis_workflow_v1alpha1_HTTP(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact": schema_pkg_apis_workflow_v1alpha1_HTTPArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPAuth": schema_pkg_apis_workflow_v1alpha1_HTTPAuth(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPBodySource": schema_pkg_apis_workflow_v1alpha1_HTTPBodySource(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeader": schema_pkg_apis_workflow_v1alpha1_HTTPHeader(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeaderSource": schema_pkg_apis_workflow_v1alpha1_HTTPHeaderSource(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Header": schema_pkg_apis_workflow_v1alpha1_Header(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Histogram": schema_pkg_apis_workflow_v1alpha1_Histogram(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs": schema_pkg_apis_workflow_v1alpha1_Inputs(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item": schema_pkg_apis_workflow_v1alpha1_Item(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelKeys": schema_pkg_apis_workflow_v1alpha1_LabelKeys(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelValueFrom": schema_pkg_apis_workflow_v1alpha1_LabelValueFrom(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelValues": schema_pkg_apis_workflow_v1alpha1_LabelValues(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook": schema_pkg_apis_workflow_v1alpha1_LifecycleHook(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Link": schema_pkg_apis_workflow_v1alpha1_Link(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ManifestFrom": schema_pkg_apis_workflow_v1alpha1_ManifestFrom(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MemoizationStatus": schema_pkg_apis_workflow_v1alpha1_MemoizationStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Memoize": schema_pkg_apis_workflow_v1alpha1_Memoize(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata": schema_pkg_apis_workflow_v1alpha1_Metadata(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MetricLabel": schema_pkg_apis_workflow_v1alpha1_MetricLabel(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics": schema_pkg_apis_workflow_v1alpha1_Metrics(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Mutex": schema_pkg_apis_workflow_v1alpha1_Mutex(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexHolding": schema_pkg_apis_workflow_v1alpha1_MutexHolding(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexStatus": schema_pkg_apis_workflow_v1alpha1_MutexStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeFlag": schema_pkg_apis_workflow_v1alpha1_NodeFlag(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeResult": schema_pkg_apis_workflow_v1alpha1_NodeResult(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeStatus": schema_pkg_apis_workflow_v1alpha1_NodeStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeSynchronizationStatus": schema_pkg_apis_workflow_v1alpha1_NodeSynchronizationStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NoneStrategy": schema_pkg_apis_workflow_v1alpha1_NoneStrategy(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2Auth": schema_pkg_apis_workflow_v1alpha1_OAuth2Auth(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2EndpointParam": schema_pkg_apis_workflow_v1alpha1_OAuth2EndpointParam(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact": schema_pkg_apis_workflow_v1alpha1_OSSArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifactRepository": schema_pkg_apis_workflow_v1alpha1_OSSArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSBucket": schema_pkg_apis_workflow_v1alpha1_OSSBucket(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule": schema_pkg_apis_workflow_v1alpha1_OSSLifecycleRule(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Object": schema_pkg_apis_workflow_v1alpha1_Object(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs": schema_pkg_apis_workflow_v1alpha1_Outputs(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ParallelSteps": schema_pkg_apis_workflow_v1alpha1_ParallelSteps(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter": schema_pkg_apis_workflow_v1alpha1_Parameter(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Plugin": schema_pkg_apis_workflow_v1alpha1_Plugin(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.PodGC": schema_pkg_apis_workflow_v1alpha1_PodGC(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Prometheus": schema_pkg_apis_workflow_v1alpha1_Prometheus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact": schema_pkg_apis_workflow_v1alpha1_RawArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ResourceTemplate": schema_pkg_apis_workflow_v1alpha1_ResourceTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryAffinity": schema_pkg_apis_workflow_v1alpha1_RetryAffinity(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryNodeAntiAffinity": schema_pkg_apis_workflow_v1alpha1_RetryNodeAntiAffinity(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy": schema_pkg_apis_workflow_v1alpha1_RetryStrategy(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact": schema_pkg_apis_workflow_v1alpha1_S3Artifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3ArtifactRepository": schema_pkg_apis_workflow_v1alpha1_S3ArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Bucket": schema_pkg_apis_workflow_v1alpha1_S3Bucket(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions": schema_pkg_apis_workflow_v1alpha1_S3EncryptionOptions(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ScriptTemplate": schema_pkg_apis_workflow_v1alpha1_ScriptTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreHolding": schema_pkg_apis_workflow_v1alpha1_SemaphoreHolding(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreRef": schema_pkg_apis_workflow_v1alpha1_SemaphoreRef(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreStatus": schema_pkg_apis_workflow_v1alpha1_SemaphoreStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence": schema_pkg_apis_workflow_v1alpha1_Sequence(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Submit": schema_pkg_apis_workflow_v1alpha1_Submit(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SubmitOpts": schema_pkg_apis_workflow_v1alpha1_SubmitOpts(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuppliedValueFrom": schema_pkg_apis_workflow_v1alpha1_SuppliedValueFrom(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuspendTemplate": schema_pkg_apis_workflow_v1alpha1_SuspendTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization": schema_pkg_apis_workflow_v1alpha1_Synchronization(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SynchronizationStatus": schema_pkg_apis_workflow_v1alpha1_SynchronizationStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TTLStrategy": schema_pkg_apis_workflow_v1alpha1_TTLStrategy(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TarStrategy": schema_pkg_apis_workflow_v1alpha1_TarStrategy(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template": schema_pkg_apis_workflow_v1alpha1_Template(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef": schema_pkg_apis_workflow_v1alpha1_TemplateRef(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TransformationStep": schema_pkg_apis_workflow_v1alpha1_TransformationStep(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.UserContainer": schema_pkg_apis_workflow_v1alpha1_UserContainer(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ValueFrom": schema_pkg_apis_workflow_v1alpha1_ValueFrom(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Version": schema_pkg_apis_workflow_v1alpha1_Version(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.VolumeClaimGC": schema_pkg_apis_workflow_v1alpha1_VolumeClaimGC(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Workflow": schema_pkg_apis_workflow_v1alpha1_Workflow(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowArtifactGCTask": schema_pkg_apis_workflow_v1alpha1_WorkflowArtifactGCTask(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowArtifactGCTaskList": schema_pkg_apis_workflow_v1alpha1_WorkflowArtifactGCTaskList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBinding": schema_pkg_apis_workflow_v1alpha1_WorkflowEventBinding(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBindingList": schema_pkg_apis_workflow_v1alpha1_WorkflowEventBindingList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBindingSpec": schema_pkg_apis_workflow_v1alpha1_WorkflowEventBindingSpec(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowLevelArtifactGC": schema_pkg_apis_workflow_v1alpha1_WorkflowLevelArtifactGC(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowList": schema_pkg_apis_workflow_v1alpha1_WorkflowList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowMetadata": schema_pkg_apis_workflow_v1alpha1_WorkflowMetadata(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec": schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowStatus": schema_pkg_apis_workflow_v1alpha1_WorkflowStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowStep": schema_pkg_apis_workflow_v1alpha1_WorkflowStep(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskResult": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskResult(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskResultList": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskResultList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSet": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSet(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetList": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetSpec": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetSpec(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetStatus": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplate": schema_pkg_apis_workflow_v1alpha1_WorkflowTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateList": schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef": schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateRef(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ZipStrategy": schema_pkg_apis_workflow_v1alpha1_ZipStrategy(ref), - } -} - -func schema_pkg_apis_workflow_v1alpha1_Amount(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Amount represent a numeric amount.", - Type: Amount{}.OpenAPISchemaType(), - Format: Amount{}.OpenAPISchemaFormat(), - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArchiveStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArchiveStrategy describes how to archive files/directory when saving artifacts", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "tar": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TarStrategy"), - }, - }, - "none": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NoneStrategy"), - }, - }, - "zip": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ZipStrategy"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NoneStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TarStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ZipStrategy"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Arguments(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Arguments to a template", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "parameters": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Parameters is the list of parameters to pass to the template or workflow", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"), - }, - }, - }, - }, - }, - "artifacts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Artifacts is the list of artifacts to pass to the template or workflow", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtGCStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtGCStatus maintains state related to ArtifactGC", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "strategiesProcessed": { - SchemaProps: spec.SchemaProps{ - Description: "have Pods been started to perform this strategy? (enables us not to re-process what we've already done)", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: false, - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - "podsRecouped": { - SchemaProps: spec.SchemaProps{ - Description: "have completed Pods been processed? (mapped by Pod name) used to prevent re-processing the Status of a Pod more than once", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: false, - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - "notSpecified": { - SchemaProps: spec.SchemaProps{ - Description: "if this is true, we already checked to see if we need to do it and we don't", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Artifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Artifact indicates an artifact to place at a specified path", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name of the artifact. must be unique within a template's inputs/outputs.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path is the container path to the artifact", - Type: []string{"string"}, - Format: "", - }, - }, - "mode": { - SchemaProps: spec.SchemaProps{ - Description: "mode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "from": { - SchemaProps: spec.SchemaProps{ - Description: "From allows an artifact to reference an artifact from a previous step", - Type: []string{"string"}, - Format: "", - }, - }, - "archiveLogs": { - SchemaProps: spec.SchemaProps{ - Description: "ArchiveLogs indicates if the container logs should be archived", - Type: []string{"boolean"}, - Format: "", - }, - }, - "s3": { - SchemaProps: spec.SchemaProps{ - Description: "S3 contains S3 artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"), - }, - }, - "git": { - SchemaProps: spec.SchemaProps{ - Description: "Git contains git artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact"), - }, - }, - "http": { - SchemaProps: spec.SchemaProps{ - Description: "HTTP contains HTTP artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact"), - }, - }, - "artifactory": { - SchemaProps: spec.SchemaProps{ - Description: "Artifactory contains artifactory artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact"), - }, - }, - "hdfs": { - SchemaProps: spec.SchemaProps{ - Description: "HDFS contains HDFS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact"), - }, - }, - "raw": { - SchemaProps: spec.SchemaProps{ - Description: "Raw contains raw artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact"), - }, - }, - "oss": { - SchemaProps: spec.SchemaProps{ - Description: "OSS contains OSS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact"), - }, - }, - "gcs": { - SchemaProps: spec.SchemaProps{ - Description: "GCS contains GCS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact"), - }, - }, - "azure": { - SchemaProps: spec.SchemaProps{ - Description: "Azure contains Azure Storage artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact"), - }, - }, - "globalName": { - SchemaProps: spec.SchemaProps{ - Description: "GlobalName exports an output artifact to the global scope, making it available as '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts", - Type: []string{"string"}, - Format: "", - }, - }, - "archive": { - SchemaProps: spec.SchemaProps{ - Description: "Archive controls how the artifact will be saved to the artifact repository.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy"), - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Make Artifacts optional, if Artifacts doesn't generate or exist", - Type: []string{"boolean"}, - Format: "", - }, - }, - "subPath": { - SchemaProps: spec.SchemaProps{ - Description: "SubPath allows an artifact to be sourced from a subpath within the specified source", - Type: []string{"string"}, - Format: "", - }, - }, - "recurseMode": { - SchemaProps: spec.SchemaProps{ - Description: "If mode is set, apply the permission recursively into the artifact if it is a folder", - Type: []string{"boolean"}, - Format: "", - }, - }, - "fromExpression": { - SchemaProps: spec.SchemaProps{ - Description: "FromExpression, if defined, is evaluated to specify the value for the artifact", - Type: []string{"string"}, - Format: "", - }, - }, - "artifactGC": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC"), - }, - }, - "deleted": { - SchemaProps: spec.SchemaProps{ - Description: "Has this been deleted?", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactGC(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "strategy": { - SchemaProps: spec.SchemaProps{ - Description: "Strategy is the strategy to use.", - Type: []string{"string"}, - Format: "", - }, - }, - "podMetadata": { - SchemaProps: spec.SchemaProps{ - Description: "PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"), - }, - }, - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactGCSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGCSpec specifies the Artifacts that need to be deleted", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "artifactsByNode": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactsByNode maps Node name to information pertaining to Artifacts on that Node", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactNodeSpec"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactNodeSpec"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactGCStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGCStatus describes the result of the deletion", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "artifactResultsByNode": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactResultsByNode maps Node name to result", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResultNodeStatus"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResultNodeStatus"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactLocation(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactLocation describes a location for a single or multiple artifacts. It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). It is also used to describe the location of multiple artifacts such as the archive location of a single workflow step, which the executor will use as a default location to store its files.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "archiveLogs": { - SchemaProps: spec.SchemaProps{ - Description: "ArchiveLogs indicates if the container logs should be archived", - Type: []string{"boolean"}, - Format: "", - }, - }, - "s3": { - SchemaProps: spec.SchemaProps{ - Description: "S3 contains S3 artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"), - }, - }, - "git": { - SchemaProps: spec.SchemaProps{ - Description: "Git contains git artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact"), - }, - }, - "http": { - SchemaProps: spec.SchemaProps{ - Description: "HTTP contains HTTP artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact"), - }, - }, - "artifactory": { - SchemaProps: spec.SchemaProps{ - Description: "Artifactory contains artifactory artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact"), - }, - }, - "hdfs": { - SchemaProps: spec.SchemaProps{ - Description: "HDFS contains HDFS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact"), - }, - }, - "raw": { - SchemaProps: spec.SchemaProps{ - Description: "Raw contains raw artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact"), - }, - }, - "oss": { - SchemaProps: spec.SchemaProps{ - Description: "OSS contains OSS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact"), - }, - }, - "gcs": { - SchemaProps: spec.SchemaProps{ - Description: "GCS contains GCS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact"), - }, - }, - "azure": { - SchemaProps: spec.SchemaProps{ - Description: "Azure contains Azure Storage artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactNodeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactNodeSpec specifies the Artifacts that need to be deleted for a given Node", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "archiveLocation": { - SchemaProps: spec.SchemaProps{ - Description: "ArchiveLocation is the template-level Artifact location specification", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation"), - }, - }, - "artifacts": { - SchemaProps: spec.SchemaProps{ - Description: "Artifacts maps artifact name to Artifact description", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactPaths(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactPaths expands a step from a collection of artifacts", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name of the artifact. must be unique within a template's inputs/outputs.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path is the container path to the artifact", - Type: []string{"string"}, - Format: "", - }, - }, - "mode": { - SchemaProps: spec.SchemaProps{ - Description: "mode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "from": { - SchemaProps: spec.SchemaProps{ - Description: "From allows an artifact to reference an artifact from a previous step", - Type: []string{"string"}, - Format: "", - }, - }, - "archiveLogs": { - SchemaProps: spec.SchemaProps{ - Description: "ArchiveLogs indicates if the container logs should be archived", - Type: []string{"boolean"}, - Format: "", - }, - }, - "s3": { - SchemaProps: spec.SchemaProps{ - Description: "S3 contains S3 artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"), - }, - }, - "git": { - SchemaProps: spec.SchemaProps{ - Description: "Git contains git artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact"), - }, - }, - "http": { - SchemaProps: spec.SchemaProps{ - Description: "HTTP contains HTTP artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact"), - }, - }, - "artifactory": { - SchemaProps: spec.SchemaProps{ - Description: "Artifactory contains artifactory artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact"), - }, - }, - "hdfs": { - SchemaProps: spec.SchemaProps{ - Description: "HDFS contains HDFS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact"), - }, - }, - "raw": { - SchemaProps: spec.SchemaProps{ - Description: "Raw contains raw artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact"), - }, - }, - "oss": { - SchemaProps: spec.SchemaProps{ - Description: "OSS contains OSS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact"), - }, - }, - "gcs": { - SchemaProps: spec.SchemaProps{ - Description: "GCS contains GCS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact"), - }, - }, - "azure": { - SchemaProps: spec.SchemaProps{ - Description: "Azure contains Azure Storage artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact"), - }, - }, - "globalName": { - SchemaProps: spec.SchemaProps{ - Description: "GlobalName exports an output artifact to the global scope, making it available as '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts", - Type: []string{"string"}, - Format: "", - }, - }, - "archive": { - SchemaProps: spec.SchemaProps{ - Description: "Archive controls how the artifact will be saved to the artifact repository.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy"), - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Make Artifacts optional, if Artifacts doesn't generate or exist", - Type: []string{"boolean"}, - Format: "", - }, - }, - "subPath": { - SchemaProps: spec.SchemaProps{ - Description: "SubPath allows an artifact to be sourced from a subpath within the specified source", - Type: []string{"string"}, - Format: "", - }, - }, - "recurseMode": { - SchemaProps: spec.SchemaProps{ - Description: "If mode is set, apply the permission recursively into the artifact if it is a folder", - Type: []string{"boolean"}, - Format: "", - }, - }, - "fromExpression": { - SchemaProps: spec.SchemaProps{ - Description: "FromExpression, if defined, is evaluated to specify the value for the artifact", - Type: []string{"string"}, - Format: "", - }, - }, - "artifactGC": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC"), - }, - }, - "deleted": { - SchemaProps: spec.SchemaProps{ - Description: "Has this been deleted?", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactRepository represents an artifact repository in which a controller will store its artifacts", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "archiveLogs": { - SchemaProps: spec.SchemaProps{ - Description: "ArchiveLogs enables log archiving", - Type: []string{"boolean"}, - Format: "", - }, - }, - "s3": { - SchemaProps: spec.SchemaProps{ - Description: "S3 stores artifact in a S3-compliant object store", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3ArtifactRepository"), - }, - }, - "artifactory": { - SchemaProps: spec.SchemaProps{ - Description: "Artifactory stores artifacts to JFrog Artifactory", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifactRepository"), - }, - }, - "hdfs": { - SchemaProps: spec.SchemaProps{ - Description: "HDFS stores artifacts in HDFS", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifactRepository"), - }, - }, - "oss": { - SchemaProps: spec.SchemaProps{ - Description: "OSS stores artifact in a OSS-compliant object store", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifactRepository"), - }, - }, - "gcs": { - SchemaProps: spec.SchemaProps{ - Description: "GCS stores artifact in a GCS object store", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifactRepository"), - }, - }, - "azure": { - SchemaProps: spec.SchemaProps{ - Description: "Azure stores artifact in an Azure Storage account", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifactRepository"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3ArtifactRepository"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactRepositoryRef(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "configMap": { - SchemaProps: spec.SchemaProps{ - Description: "The name of the config map. Defaults to \"artifact-repositories\".", - Type: []string{"string"}, - Format: "", - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "The config map key. Defaults to the value of the \"workflows.argoproj.io/default-artifact-repository\" annotation.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactRepositoryRefStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "configMap": { - SchemaProps: spec.SchemaProps{ - Description: "The name of the config map. Defaults to \"artifact-repositories\".", - Type: []string{"string"}, - Format: "", - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "The config map key. Defaults to the value of the \"workflows.argoproj.io/default-artifact-repository\" annotation.", - Type: []string{"string"}, - Format: "", - }, - }, - "namespace": { - SchemaProps: spec.SchemaProps{ - Description: "The namespace of the config map. Defaults to the workflow's namespace, or the controller's namespace (if found).", - Type: []string{"string"}, - Format: "", - }, - }, - "default": { - SchemaProps: spec.SchemaProps{ - Description: "If this ref represents the default artifact repository, rather than a config map.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "artifactRepository": { - SchemaProps: spec.SchemaProps{ - Description: "The repository the workflow will use. This maybe empty before v3.1.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepository"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepository"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactResult(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactResult describes the result of attempting to delete a given Artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the Artifact", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "success": { - SchemaProps: spec.SchemaProps{ - Description: "Success describes whether the deletion succeeded", - Type: []string{"boolean"}, - Format: "", - }, - }, - "error": { - SchemaProps: spec.SchemaProps{ - Description: "Error is an optional error message which should be set if Success==false", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactResultNodeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactResultNodeStatus describes the result of the deletion on a given node", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "artifactResults": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactResults maps Artifact name to result of the deletion", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResult"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResult"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactSearchQuery(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "artifactGCStrategies": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: false, - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - "artifactName": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "templateName": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "nodeId": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "deleted": { - SchemaProps: spec.SchemaProps{ - Type: []string{"boolean"}, - Format: "", - }, - }, - "nodeTypes": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: false, - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactSearchResult(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "Artifact": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), - }, - }, - "NodeID": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"Artifact", "NodeID"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactoryArtifact is the location of an artifactory artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "url": { - SchemaProps: spec.SchemaProps{ - Description: "URL of the artifact", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "usernameSecret": { - SchemaProps: spec.SchemaProps{ - Description: "UsernameSecret is the secret selector to the repository username", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "passwordSecret": { - SchemaProps: spec.SchemaProps{ - Description: "PasswordSecret is the secret selector to the repository password", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - Required: []string{"url"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactoryArtifactRepository defines the controller configuration for an artifactory artifact repository", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "usernameSecret": { - SchemaProps: spec.SchemaProps{ - Description: "UsernameSecret is the secret selector to the repository username", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "passwordSecret": { - SchemaProps: spec.SchemaProps{ - Description: "PasswordSecret is the secret selector to the repository password", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "repoURL": { - SchemaProps: spec.SchemaProps{ - Description: "RepoURL is the url for artifactory repo.", - Type: []string{"string"}, - Format: "", - }, - }, - "keyFormat": { - SchemaProps: spec.SchemaProps{ - Description: "KeyFormat defines the format of how to store keys and can reference workflow variables.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactoryAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactoryAuth describes the secret selectors required for authenticating to artifactory", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "usernameSecret": { - SchemaProps: spec.SchemaProps{ - Description: "UsernameSecret is the secret selector to the repository username", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "passwordSecret": { - SchemaProps: spec.SchemaProps{ - Description: "PasswordSecret is the secret selector to the repository password", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_AzureArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AzureArtifact is the location of a an Azure Storage artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the service url associated with an account. It is most likely \"https://<ACCOUNT_NAME>.blob.core.windows.net\"", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Description: "Container is the container where resources will be stored", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "accountKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccountKeySecret is the secret selector to the Azure Blob Storage account access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "blob": { - SchemaProps: spec.SchemaProps{ - Description: "Blob is the blob name (i.e., path) in the container where the artifact resides", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"endpoint", "container", "blob"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_AzureArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the service url associated with an account. It is most likely \"https://<ACCOUNT_NAME>.blob.core.windows.net\"", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Description: "Container is the container where resources will be stored", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "accountKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccountKeySecret is the secret selector to the Azure Blob Storage account access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "blobNameFormat": { - SchemaProps: spec.SchemaProps{ - Description: "BlobNameFormat is defines the format of how to store blob names. Can reference workflow variables", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"endpoint", "container"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_AzureBlobContainer(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AzureBlobContainer contains the access information for interfacing with an Azure Blob Storage container", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the service url associated with an account. It is most likely \"https://<ACCOUNT_NAME>.blob.core.windows.net\"", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Description: "Container is the container where resources will be stored", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "accountKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccountKeySecret is the secret selector to the Azure Blob Storage account access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"endpoint", "container"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Backoff(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Backoff is a backoff strategy to use within retryStrategy", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "duration": { - SchemaProps: spec.SchemaProps{ - Description: "Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. \"2m\", \"1h\")", - Type: []string{"string"}, - Format: "", - }, - }, - "factor": { - SchemaProps: spec.SchemaProps{ - Description: "Factor is a factor to multiply the base duration after each failed retry", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "maxDuration": { - SchemaProps: spec.SchemaProps{ - Description: "MaxDuration is the maximum amount of time allowed for a workflow in the backoff strategy. It is important to note that if the workflow template includes activeDeadlineSeconds, the pod's deadline is initially set with activeDeadlineSeconds. However, when the workflow fails, the pod's deadline is then overridden by maxDuration. This ensures that the workflow does not exceed the specified maximum duration when retries are involved.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_BasicAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "BasicAuth describes the secret selectors required for basic authentication", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "usernameSecret": { - SchemaProps: spec.SchemaProps{ - Description: "UsernameSecret is the secret selector to the repository username", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "passwordSecret": { - SchemaProps: spec.SchemaProps{ - Description: "PasswordSecret is the secret selector to the repository password", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Cache(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Cache is the configuration for the type of cache to be used", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "configMap": { - SchemaProps: spec.SchemaProps{ - Description: "ConfigMap sets a ConfigMap-based cache", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - }, - Required: []string{"configMap"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ClientCertAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClientCertAuth holds necessary information for client authentication via certificates", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "clientCertSecret": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "clientKeySecret": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ClusterWorkflowTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ClusterWorkflowTemplateList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterWorkflowTemplateList is list of ClusterWorkflowTemplate resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClusterWorkflowTemplate"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClusterWorkflowTemplate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Column(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Column is a custom column that will be exposed in the Workflow List View.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "The name of this column, e.g., \"Workflow Completed\".", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "type": { - SchemaProps: spec.SchemaProps{ - Description: "The type of this column, \"label\" or \"annotation\".", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "The key of the label or annotation, e.g., \"workflows.argoproj.io/completed\".", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "type", "key"}, - }, - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Condition(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type is the type of condition", - Type: []string{"string"}, - Format: "", - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status is the status of the condition", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "Message is the condition message", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ContainerNode(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", - Type: []string{"string"}, - Format: "", - }, - }, - "command": { - SchemaProps: spec.SchemaProps{ - Description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "args": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "workingDir": { - SchemaProps: spec.SchemaProps{ - Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "ports": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "containerPort", - "protocol", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "containerPort", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ContainerPort"), - }, - }, - }, - }, - }, - "envFrom": { - SchemaProps: spec.SchemaProps{ - Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvFromSource"), - }, - }, - }, - }, - }, - "env": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of environment variables to set in the container. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvVar"), - }, - }, - }, - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "volumeMounts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "mountPath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeMount"), - }, - }, - }, - }, - }, - "volumeDevices": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "devicePath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "volumeDevices is the list of block devices to be used by the container.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeDevice"), - }, - }, - }, - }, - }, - "livenessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "readinessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "startupProbe": { - SchemaProps: spec.SchemaProps{ - Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "lifecycle": { - SchemaProps: spec.SchemaProps{ - Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", - Ref: ref("k8s.io/api/core/v1.Lifecycle"), - }, - }, - "terminationMessagePath": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "terminationMessagePolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "imagePullPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - Type: []string{"string"}, - Format: "", - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", - Ref: ref("k8s.io/api/core/v1.SecurityContext"), - }, - }, - "stdin": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "stdinOnce": { - SchemaProps: spec.SchemaProps{ - Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - Type: []string{"boolean"}, - Format: "", - }, - }, - "tty": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "dependencies": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ContainerSetRetryStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ContainerSetRetryStrategy provides controls on how to retry a container set", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "duration": { - SchemaProps: spec.SchemaProps{ - Description: "Duration is the time between each retry, examples values are \"300ms\", \"1s\" or \"5m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", - Type: []string{"string"}, - Format: "", - }, - }, - "retries": { - SchemaProps: spec.SchemaProps{ - Description: "Retries is the maximum number of retry attempts for each container. It does not include the first, original attempt; the maximum number of total attempts will be `retries + 1`.", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - }, - Required: []string{"retries"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ContainerSetTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "containers": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerNode"), - }, - }, - }, - }, - }, - "volumeMounts": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeMount"), - }, - }, - }, - }, - }, - "retryStrategy": { - SchemaProps: spec.SchemaProps{ - Description: "RetryStrategy describes how to retry container nodes if the container set fails. Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetRetryStrategy"), - }, - }, - }, - Required: []string{"containers"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerNode", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetRetryStrategy", "k8s.io/api/core/v1.VolumeMount"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ContinueOn(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ContinueOn defines if a workflow should continue even if a task or step fails/errors. It can be specified if the workflow should continue when the pod errors, fails or both.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "error": { - SchemaProps: spec.SchemaProps{ - Type: []string{"boolean"}, - Format: "", - }, - }, - "failed": { - SchemaProps: spec.SchemaProps{ - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Counter(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Counter is a Counter prometheus metric", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the value of the metric", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"value"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_CreateS3BucketOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "CreateS3BucketOptions options used to determine automatic automatic bucket-creation process", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "objectLocking": { - SchemaProps: spec.SchemaProps{ - Description: "ObjectLocking Enable object locking", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_CronWorkflow(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "CronWorkflow is the definition of a scheduled workflow resource", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowStatus"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowSpec", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_CronWorkflowList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "CronWorkflowList is list of CronWorkflow resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflow"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflow", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_CronWorkflowSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "CronWorkflowSpec is the specification of a CronWorkflow", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "workflowSpec": { - SchemaProps: spec.SchemaProps{ - Description: "WorkflowSpec is the spec of the workflow to be run", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), - }, - }, - "schedule": { - SchemaProps: spec.SchemaProps{ - Description: "Schedule is a schedule to run the Workflow in Cron format", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "concurrencyPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "ConcurrencyPolicy is the K8s-style concurrency policy that will be used", - Type: []string{"string"}, - Format: "", - }, - }, - "suspend": { - SchemaProps: spec.SchemaProps{ - Description: "Suspend is a flag that will stop new CronWorkflows from running if set to true", - Type: []string{"boolean"}, - Format: "", - }, - }, - "startingDeadlineSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "successfulJobsHistoryLimit": { - SchemaProps: spec.SchemaProps{ - Description: "SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "failedJobsHistoryLimit": { - SchemaProps: spec.SchemaProps{ - Description: "FailedJobsHistoryLimit is the number of failed jobs to be kept at a time", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "timezone": { - SchemaProps: spec.SchemaProps{ - Description: "Timezone is the timezone against which the cron schedule will be calculated, e.g. \"Asia/Tokyo\". Default is machine's local time.", - Type: []string{"string"}, - Format: "", - }, - }, - "workflowMetadata": { - SchemaProps: spec.SchemaProps{ - Description: "WorkflowMetadata contains some metadata of the workflow to be run", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - }, - Required: []string{"workflowSpec", "schedule"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_CronWorkflowStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "CronWorkflowStatus is the status of a CronWorkflow", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "active": { - SchemaProps: spec.SchemaProps{ - Description: "Active is a list of active workflows stemming from this CronWorkflow", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ObjectReference"), - }, - }, - }, - }, - }, - "lastScheduledTime": { - SchemaProps: spec.SchemaProps{ - Description: "LastScheduleTime is the last time the CronWorkflow was scheduled", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "conditions": { - SchemaProps: spec.SchemaProps{ - Description: "Conditions is a list of conditions the CronWorkflow may have", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition"), - }, - }, - }, - }, - }, - }, - Required: []string{"active", "lastScheduledTime", "conditions"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_DAGTask(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DAGTask represents a node in the graph during DAG execution", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the target", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "template": { - SchemaProps: spec.SchemaProps{ - Description: "Name of template to execute", - Type: []string{"string"}, - Format: "", - }, - }, - "inline": { - SchemaProps: spec.SchemaProps{ - Description: "Inline is the template. Template must be empty if this is declared (and vice-versa).", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), - }, - }, - "arguments": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments are the parameter and artifact arguments to the template", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), - }, - }, - "templateRef": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateRef is the reference to the template resource to execute.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"), - }, - }, - "dependencies": { - SchemaProps: spec.SchemaProps{ - Description: "Dependencies are name of other targets which this depends on", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "withItems": { - SchemaProps: spec.SchemaProps{ - Description: "WithItems expands a task into multiple parallel tasks from the items in the list", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item"), - }, - }, - }, - }, - }, - "withParam": { - SchemaProps: spec.SchemaProps{ - Description: "WithParam expands a task into multiple parallel tasks from the value in the parameter, which is expected to be a JSON list.", - Type: []string{"string"}, - Format: "", - }, - }, - "withSequence": { - SchemaProps: spec.SchemaProps{ - Description: "WithSequence expands a task into a numeric sequence", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence"), - }, - }, - "when": { - SchemaProps: spec.SchemaProps{ - Description: "When is an expression in which the task should conditionally execute", - Type: []string{"string"}, - Format: "", - }, - }, - "continueOn": { - SchemaProps: spec.SchemaProps{ - Description: "ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn"), - }, - }, - "onExit": { - SchemaProps: spec.SchemaProps{ - Description: "OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template. DEPRECATED: Use Hooks[exit].Template instead.", - Type: []string{"string"}, - Format: "", - }, - }, - "depends": { - SchemaProps: spec.SchemaProps{ - Description: "Depends are name of other targets which this depends on", - Type: []string{"string"}, - Format: "", - }, - }, - "hooks": { - SchemaProps: spec.SchemaProps{ - Description: "Hooks hold the lifecycle hook which is invoked at lifecycle of task, irrespective of the success, failure, or error status of the primary task", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook"), - }, - }, - }, - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DAGTemplate is a template subtype for directed acyclic graph templates", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "target": { - SchemaProps: spec.SchemaProps{ - Description: "Target are one or more names of targets to execute in a DAG", - Type: []string{"string"}, - Format: "", - }, - }, - "tasks": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Tasks are a list of DAG tasks", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTask"), - }, - }, - }, - }, - }, - "failFast": { - SchemaProps: spec.SchemaProps{ - Description: "This flag is for DAG logic. The DAG logic has a built-in \"fail fast\" feature to stop scheduling new steps, as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed before failing the DAG itself. The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to completion (either success or failure), regardless of the failed outcomes of branches in the DAG. More info and example about this feature at https://github.com/argoproj/argo-workflows/issues/1442", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"tasks"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTask"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Data(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Data is a data template", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "source": { - SchemaProps: spec.SchemaProps{ - Description: "Source sources external data into a data template", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DataSource"), - }, - }, - "transformation": { - SchemaProps: spec.SchemaProps{ - Description: "Transformation applies a set of transformations", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TransformationStep"), - }, - }, - }, - }, - }, - }, - Required: []string{"source", "transformation"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DataSource", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TransformationStep"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_DataSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DataSource sources external data into a data template", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "artifactPaths": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactPaths is a data transformation that collects a list of artifact paths", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactPaths"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactPaths"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Event(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "selector": { - SchemaProps: spec.SchemaProps{ - Description: "Selector (https://github.com/expr-lang/expr) that we must must match the event. E.g. `payload.message == \"test\"`", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"selector"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ExecutorConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ExecutorConfig holds configurations of an executor container.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName specifies the service account name of the executor container.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_GCSArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GCSArtifact is the location of a GCS artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "serviceAccountKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountKeySecret is the secret selector to the bucket's service account key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Key is the path in the bucket where the artifact resides", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"key"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_GCSArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GCSArtifactRepository defines the controller configuration for a GCS artifact repository", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "serviceAccountKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountKeySecret is the secret selector to the bucket's service account key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "keyFormat": { - SchemaProps: spec.SchemaProps{ - Description: "KeyFormat defines the format of how to store keys and can reference workflow variables.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_GCSBucket(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GCSBucket contains the access information for interfacring with a GCS bucket", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "serviceAccountKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountKeySecret is the secret selector to the bucket's service account key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Gauge(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Gauge is a Gauge prometheus metric", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the value to be used in the operation with the metric's current value. If no operation is set, value is the value of the metric", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "realtime": { - SchemaProps: spec.SchemaProps{ - Description: "Realtime emits this metric in real time if applicable", - Type: []string{"boolean"}, - Format: "", - }, - }, - "operation": { - SchemaProps: spec.SchemaProps{ - Description: "Operation defines the operation to apply with value and the metrics' current value", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"value", "realtime"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GitArtifact is the location of an git artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "repo": { - SchemaProps: spec.SchemaProps{ - Description: "Repo is the git repository", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "revision": { - SchemaProps: spec.SchemaProps{ - Description: "Revision is the git commit, tag, branch to checkout", - Type: []string{"string"}, - Format: "", - }, - }, - "depth": { - SchemaProps: spec.SchemaProps{ - Description: "Depth specifies clones/fetches should be shallow and include the given number of commits from the branch tip", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "fetch": { - SchemaProps: spec.SchemaProps{ - Description: "Fetch specifies a number of refs that should be fetched before checkout", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "usernameSecret": { - SchemaProps: spec.SchemaProps{ - Description: "UsernameSecret is the secret selector to the repository username", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "passwordSecret": { - SchemaProps: spec.SchemaProps{ - Description: "PasswordSecret is the secret selector to the repository password", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "sshPrivateKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SSHPrivateKeySecret is the secret selector to the repository ssh private key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "insecureIgnoreHostKey": { - SchemaProps: spec.SchemaProps{ - Description: "InsecureIgnoreHostKey disables SSH strict host key checking during git clone", - Type: []string{"boolean"}, - Format: "", - }, - }, - "disableSubmodules": { - SchemaProps: spec.SchemaProps{ - Description: "DisableSubmodules disables submodules during git clone", - Type: []string{"boolean"}, - Format: "", - }, - }, - "singleBranch": { - SchemaProps: spec.SchemaProps{ - Description: "SingleBranch enables single branch clone, using the `branch` parameter", - Type: []string{"boolean"}, - Format: "", - }, - }, - "branch": { - SchemaProps: spec.SchemaProps{ - Description: "Branch is the branch to fetch when `SingleBranch` is enabled", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"repo"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HDFSArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HDFSArtifact is the location of an HDFS artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "krbCCacheSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbKeytabSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbUsername": { - SchemaProps: spec.SchemaProps{ - Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbRealm": { - SchemaProps: spec.SchemaProps{ - Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbConfigConfigMap": { - SchemaProps: spec.SchemaProps{ - Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - "krbServicePrincipalName": { - SchemaProps: spec.SchemaProps{ - Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "addresses": { - SchemaProps: spec.SchemaProps{ - Description: "Addresses is accessible addresses of HDFS name nodes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "hdfsUser": { - SchemaProps: spec.SchemaProps{ - Description: "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path is a file path in HDFS", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "force": { - SchemaProps: spec.SchemaProps{ - Description: "Force copies a file forcibly even if it exists", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"path"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HDFSArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HDFSArtifactRepository defines the controller configuration for an HDFS artifact repository", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "krbCCacheSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbKeytabSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbUsername": { - SchemaProps: spec.SchemaProps{ - Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbRealm": { - SchemaProps: spec.SchemaProps{ - Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbConfigConfigMap": { - SchemaProps: spec.SchemaProps{ - Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - "krbServicePrincipalName": { - SchemaProps: spec.SchemaProps{ - Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "addresses": { - SchemaProps: spec.SchemaProps{ - Description: "Addresses is accessible addresses of HDFS name nodes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "hdfsUser": { - SchemaProps: spec.SchemaProps{ - Description: "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "pathFormat": { - SchemaProps: spec.SchemaProps{ - Description: "PathFormat is defines the format of path to store a file. Can reference workflow variables", - Type: []string{"string"}, - Format: "", - }, - }, - "force": { - SchemaProps: spec.SchemaProps{ - Description: "Force copies a file forcibly even if it exists", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HDFSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HDFSConfig is configurations for HDFS", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "krbCCacheSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbKeytabSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbUsername": { - SchemaProps: spec.SchemaProps{ - Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbRealm": { - SchemaProps: spec.SchemaProps{ - Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbConfigConfigMap": { - SchemaProps: spec.SchemaProps{ - Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - "krbServicePrincipalName": { - SchemaProps: spec.SchemaProps{ - Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "addresses": { - SchemaProps: spec.SchemaProps{ - Description: "Addresses is accessible addresses of HDFS name nodes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "hdfsUser": { - SchemaProps: spec.SchemaProps{ - Description: "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HDFSKrbConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HDFSKrbConfig is auth configurations for Kerberos", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "krbCCacheSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbKeytabSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbUsername": { - SchemaProps: spec.SchemaProps{ - Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbRealm": { - SchemaProps: spec.SchemaProps{ - Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbConfigConfigMap": { - SchemaProps: spec.SchemaProps{ - Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - "krbServicePrincipalName": { - SchemaProps: spec.SchemaProps{ - Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HTTP(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "method": { - SchemaProps: spec.SchemaProps{ - Description: "Method is HTTP methods for HTTP Request", - Type: []string{"string"}, - Format: "", - }, - }, - "url": { - SchemaProps: spec.SchemaProps{ - Description: "URL of the HTTP Request", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "headers": { - SchemaProps: spec.SchemaProps{ - Description: "Headers are an optional list of headers to send with HTTP requests", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeader"), - }, - }, - }, - }, - }, - "timeoutSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "successCondition": { - SchemaProps: spec.SchemaProps{ - Description: "SuccessCondition is an expression if evaluated to true is considered successful", - Type: []string{"string"}, - Format: "", - }, - }, - "body": { - SchemaProps: spec.SchemaProps{ - Description: "Body is content of the HTTP Request", - Type: []string{"string"}, - Format: "", - }, - }, - "bodyFrom": { - SchemaProps: spec.SchemaProps{ - Description: "BodyFrom is content of the HTTP Request as Bytes", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPBodySource"), - }, - }, - "insecureSkipVerify": { - SchemaProps: spec.SchemaProps{ - Description: "InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"url"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPBodySource", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeader"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HTTPArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "url": { - SchemaProps: spec.SchemaProps{ - Description: "URL of the artifact", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "headers": { - SchemaProps: spec.SchemaProps{ - Description: "Headers are an optional list of headers to send with HTTP requests for artifacts", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Header"), - }, - }, - }, - }, - }, - "auth": { - SchemaProps: spec.SchemaProps{ - Description: "Auth contains information for client authentication", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPAuth"), - }, - }, - }, - Required: []string{"url"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPAuth", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Header"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HTTPAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "clientCert": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClientCertAuth"), - }, - }, - "oauth2": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2Auth"), - }, - }, - "basicAuth": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.BasicAuth"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.BasicAuth", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClientCertAuth", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2Auth"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HTTPBodySource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HTTPBodySource contains the source of the HTTP body.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "bytes": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "byte", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HTTPHeader(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "valueFrom": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeaderSource"), - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeaderSource"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HTTPHeaderSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "secretKeyRef": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Header(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Header indicate a key-value request header to be used when fetching artifacts over HTTP", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the header name", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the literal value to use for the header", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "value"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Histogram(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Histogram is a Histogram prometheus metric", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the value of the metric", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "buckets": { - SchemaProps: spec.SchemaProps{ - Description: "Buckets is a list of bucket divisors for the histogram", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Amount"), - }, - }, - }, - }, - }, - }, - Required: []string{"value", "buckets"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Amount"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Inputs(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "parameters": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Parameters are a list of parameters passed as inputs", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"), - }, - }, - }, - }, - }, - "artifacts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Artifact are a list of artifacts passed as inputs", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Item(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Item expands a single workflow step into multiple parallel steps The value of Item can be a map, string, bool, or number", - Type: Item{}.OpenAPISchemaType(), - Format: Item{}.OpenAPISchemaFormat(), - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_LabelKeys(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "LabelKeys is list of keys", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_LabelValueFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "expression": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"expression"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_LabelValues(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Labels is list of workflow labels", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_LifecycleHook(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "template": { - SchemaProps: spec.SchemaProps{ - Description: "Template is the name of the template to execute by the hook", - Type: []string{"string"}, - Format: "", - }, - }, - "arguments": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments hold arguments to the template", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), - }, - }, - "templateRef": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateRef is the reference to the template resource to execute by the hook", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"), - }, - }, - "expression": { - SchemaProps: spec.SchemaProps{ - Description: "Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Link(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A link to another app.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "The name of the link, E.g. \"Workflow Logs\" or \"Pod Logs\"", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "scope": { - SchemaProps: spec.SchemaProps{ - Description: "\"workflow\", \"pod\", \"pod-logs\", \"event-source-logs\", \"sensor-logs\", \"workflow-list\" or \"chat\"", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "url": { - SchemaProps: spec.SchemaProps{ - Description: "The URL. Can contain \"${metadata.namespace}\", \"${metadata.name}\", \"${status.startedAt}\", \"${status.finishedAt}\" or any other element in workflow yaml, e.g. \"${workflow.metadata.annotations.userDefinedKey}\"", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "scope", "url"}, - }, - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ManifestFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "artifact": { - SchemaProps: spec.SchemaProps{ - Description: "Artifact contains the artifact to use", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), - }, - }, - }, - Required: []string{"artifact"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_MemoizationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "MemoizationStatus is the status of this memoized node", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "hit": { - SchemaProps: spec.SchemaProps{ - Description: "Hit indicates whether this node was created from a cache entry", - Default: false, - Type: []string{"boolean"}, - Format: "", - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Key is the name of the key used for this node's cache", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "cacheName": { - SchemaProps: spec.SchemaProps{ - Description: "Cache is the name of the cache that was used", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"hit", "key", "cacheName"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Memoize(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Memoization enables caching for the Outputs of the template", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Key is the key to use as the caching key", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "cache": { - SchemaProps: spec.SchemaProps{ - Description: "Cache sets and configures the kind of cache", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Cache"), - }, - }, - "maxAge": { - SchemaProps: spec.SchemaProps{ - Description: "MaxAge is the maximum age (e.g. \"180s\", \"24h\") of an entry that is still considered valid. If an entry is older than the MaxAge, it will be ignored.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"key", "cache", "maxAge"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Cache"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Metadata(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Pod metdata", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "annotations": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "labels": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_MetricLabel(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "MetricLabel is a single label for a prometheus metric", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "key": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"key", "value"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Metrics(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Metrics are a list of metrics emitted from a Workflow/Template", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "prometheus": { - SchemaProps: spec.SchemaProps{ - Description: "Prometheus is a list of prometheus metrics to be emitted", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Prometheus"), - }, - }, - }, - }, - }, - }, - Required: []string{"prometheus"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Prometheus"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Mutex(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Mutex holds Mutex configuration", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name of the mutex", - Type: []string{"string"}, - Format: "", - }, - }, - "namespace": { - SchemaProps: spec.SchemaProps{ - Description: "Namespace is the namespace of the mutex, default: [namespace of workflow]", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_MutexHolding(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "MutexHolding describes the mutex and the object which is holding it.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "mutex": { - SchemaProps: spec.SchemaProps{ - Description: "Reference for the mutex e.g: ${namespace}/mutex/${mutexName}", - Type: []string{"string"}, - Format: "", - }, - }, - "holder": { - SchemaProps: spec.SchemaProps{ - Description: "Holder is a reference to the object which holds the Mutex. Holding Scenario:\n 1. Current workflow's NodeID which is holding the lock.\n e.g: ${NodeID}\nWaiting Scenario:\n 1. Current workflow or other workflow NodeID which is holding the lock.\n e.g: ${WorkflowName}/${NodeID}", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_MutexStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "MutexStatus contains which objects hold mutex locks, and which objects this workflow is waiting on to release locks.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "holding": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Holding is a list of mutexes and their respective objects that are held by mutex lock for this workflow.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexHolding"), - }, - }, - }, - }, - }, - "waiting": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Waiting is a list of mutexes and their respective objects this workflow is waiting for.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexHolding"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexHolding"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_NodeFlag(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "hooked": { - SchemaProps: spec.SchemaProps{ - Description: "Hooked tracks whether or not this node was triggered by hook or onExit", - Type: []string{"boolean"}, - Format: "", - }, - }, - "retried": { - SchemaProps: spec.SchemaProps{ - Description: "Retried tracks whether or not this node was retried by retryStrategy", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_NodeResult(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "phase": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "outputs": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), - }, - }, - "progress": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_NodeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeStatus contains status information about an individual node in the workflow", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "id": { - SchemaProps: spec.SchemaProps{ - Description: "ID is a unique identifier of a node within the worklow It is implemented as a hash of the node name, which makes the ID deterministic", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is unique name in the node tree used to generate the node ID", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "displayName": { - SchemaProps: spec.SchemaProps{ - Description: "DisplayName is a human readable representation of the node. Unique within a template boundary", - Type: []string{"string"}, - Format: "", - }, - }, - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type indicates type of node", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "templateName": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateName is the template name which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)", - Type: []string{"string"}, - Format: "", - }, - }, - "templateRef": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateRef is the reference to the template resource which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"), - }, - }, - "templateScope": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateScope is the template scope in which the template of this node was retrieved.", - Type: []string{"string"}, - Format: "", - }, - }, - "phase": { - SchemaProps: spec.SchemaProps{ - Description: "Phase a simple, high-level summary of where the node is in its lifecycle. Can be used as a state machine. Will be one of these values \"Pending\", \"Running\" before the node is completed, or \"Succeeded\", \"Skipped\", \"Failed\", \"Error\", or \"Omitted\" as a final state.", - Type: []string{"string"}, - Format: "", - }, - }, - "boundaryID": { - SchemaProps: spec.SchemaProps{ - Description: "BoundaryID indicates the node ID of the associated template root node in which this node belongs to", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "A human readable message indicating details about why the node is in this condition.", - Type: []string{"string"}, - Format: "", - }, - }, - "startedAt": { - SchemaProps: spec.SchemaProps{ - Description: "Time at which this node started", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "finishedAt": { - SchemaProps: spec.SchemaProps{ - Description: "Time at which this node completed", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "estimatedDuration": { - SchemaProps: spec.SchemaProps{ - Description: "EstimatedDuration in seconds.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "progress": { - SchemaProps: spec.SchemaProps{ - Description: "Progress to completion", - Type: []string{"string"}, - Format: "", - }, - }, - "resourcesDuration": { - SchemaProps: spec.SchemaProps{ - Description: "ResourcesDuration is indicative, but not accurate, resource duration. This is populated when the nodes completes.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: 0, - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - }, - }, - "podIP": { - SchemaProps: spec.SchemaProps{ - Description: "PodIP captures the IP of the pod for daemoned steps", - Type: []string{"string"}, - Format: "", - }, - }, - "daemoned": { - SchemaProps: spec.SchemaProps{ - Description: "Daemoned tracks whether or not this node was daemoned and need to be terminated", - Type: []string{"boolean"}, - Format: "", - }, - }, - "nodeFlag": { - SchemaProps: spec.SchemaProps{ - Description: "NodeFlag tracks some history of node. e.g.) hooked, retried, etc.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeFlag"), - }, - }, - "inputs": { - SchemaProps: spec.SchemaProps{ - Description: "Inputs captures input parameter values and artifact locations supplied to this template invocation", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs"), - }, - }, - "outputs": { - SchemaProps: spec.SchemaProps{ - Description: "Outputs captures output parameter values and artifact locations produced by this template invocation", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), - }, - }, - "children": { - SchemaProps: spec.SchemaProps{ - Description: "Children is a list of child node IDs", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "outboundNodes": { - SchemaProps: spec.SchemaProps{ - Description: "OutboundNodes tracks the node IDs which are considered \"outbound\" nodes to a template invocation. For every invocation of a template, there are nodes which we considered as \"outbound\". Essentially, these are last nodes in the execution sequence to run, before the template is considered completed. These nodes are then connected as parents to a following step.\n\nIn the case of single pod steps (i.e. container, script, resource templates), this list will be nil since the pod itself is already considered the \"outbound\" node. In the case of DAGs, outbound nodes are the \"target\" tasks (tasks with no children). In the case of steps, outbound nodes are all the containers involved in the last step group. NOTE: since templates are composable, the list of outbound nodes are carried upwards when a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of a template, will be a superset of the outbound nodes of its last children.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "hostNodeName": { - SchemaProps: spec.SchemaProps{ - Description: "HostNodeName name of the Kubernetes node on which the Pod is running, if applicable", - Type: []string{"string"}, - Format: "", - }, - }, - "memoizationStatus": { - SchemaProps: spec.SchemaProps{ - Description: "MemoizationStatus holds information about cached nodes", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MemoizationStatus"), - }, - }, - "synchronizationStatus": { - SchemaProps: spec.SchemaProps{ - Description: "SynchronizationStatus is the synchronization status of the node", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeSynchronizationStatus"), - }, - }, - }, - Required: []string{"id", "name", "type"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MemoizationStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeFlag", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeSynchronizationStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_NodeSynchronizationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeSynchronizationStatus stores the status of a node", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "waiting": { - SchemaProps: spec.SchemaProps{ - Description: "Waiting is the name of the lock that this node is waiting for", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_NoneStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NoneStrategy indicates to skip tar process and upload the files or directory tree as independent files. Note that if the artifact is a directory, the artifact driver must support the ability to save/load the directory appropriately.", - Type: []string{"object"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_OAuth2Auth(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "OAuth2Auth holds all information for client authentication via OAuth2 tokens", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "clientIDSecret": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "clientSecretSecret": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "tokenURLSecret": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "scopes": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "endpointParams": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2EndpointParam"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2EndpointParam", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_OAuth2EndpointParam(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "EndpointParam is for requesting optional fields that should be sent in the oauth request", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the header name", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the literal value to use for the header", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"key"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_OSSArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "OSSArtifact is the location of an Alibaba Cloud OSS artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the hostname of the bucket endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "accessKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKeySecret is the secret selector to the bucket's access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySecret is the secret selector to the bucket's secret key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "createBucketIfNotPresent": { - SchemaProps: spec.SchemaProps{ - Description: "CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist", - Type: []string{"boolean"}, - Format: "", - }, - }, - "securityToken": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm", - Type: []string{"string"}, - Format: "", - }, - }, - "lifecycleRule": { - SchemaProps: spec.SchemaProps{ - Description: "LifecycleRule specifies how to manage bucket's lifecycle", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule"), - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Key is the path in the bucket where the artifact resides", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"key"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_OSSArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "OSSArtifactRepository defines the controller configuration for an OSS artifact repository", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the hostname of the bucket endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "accessKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKeySecret is the secret selector to the bucket's access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySecret is the secret selector to the bucket's secret key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "createBucketIfNotPresent": { - SchemaProps: spec.SchemaProps{ - Description: "CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist", - Type: []string{"boolean"}, - Format: "", - }, - }, - "securityToken": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm", - Type: []string{"string"}, - Format: "", - }, - }, - "lifecycleRule": { - SchemaProps: spec.SchemaProps{ - Description: "LifecycleRule specifies how to manage bucket's lifecycle", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule"), - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "keyFormat": { - SchemaProps: spec.SchemaProps{ - Description: "KeyFormat defines the format of how to store keys and can reference workflow variables.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_OSSBucket(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "OSSBucket contains the access information required for interfacing with an Alibaba Cloud OSS bucket", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the hostname of the bucket endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "accessKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKeySecret is the secret selector to the bucket's access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySecret is the secret selector to the bucket's secret key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "createBucketIfNotPresent": { - SchemaProps: spec.SchemaProps{ - Description: "CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist", - Type: []string{"boolean"}, - Format: "", - }, - }, - "securityToken": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm", - Type: []string{"string"}, - Format: "", - }, - }, - "lifecycleRule": { - SchemaProps: spec.SchemaProps{ - Description: "LifecycleRule specifies how to manage bucket's lifecycle", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule"), - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_OSSLifecycleRule(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "OSSLifecycleRule specifies how to manage bucket's lifecycle", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "markInfrequentAccessAfterDays": { - SchemaProps: spec.SchemaProps{ - Description: "MarkInfrequentAccessAfterDays is the number of days before we convert the objects in the bucket to Infrequent Access (IA) storage type", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "markDeletionAfterDays": { - SchemaProps: spec.SchemaProps{ - Description: "MarkDeletionAfterDays is the number of days before we delete objects in the bucket", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Object(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: Object{}.OpenAPISchemaType(), - Format: Object{}.OpenAPISchemaFormat(), - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Outputs(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Outputs hold parameters, artifacts, and results from a step", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "parameters": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Parameters holds the list of output parameters produced by a step", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"), - }, - }, - }, - }, - }, - "artifacts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Artifacts holds the list of output artifacts produced by a step", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), - }, - }, - }, - }, - }, - "result": { - SchemaProps: spec.SchemaProps{ - Description: "Result holds the result (stdout) of a script template", - Type: []string{"string"}, - Format: "", - }, - }, - "exitCode": { - SchemaProps: spec.SchemaProps{ - Description: "ExitCode holds the exit code of a script template", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ParallelSteps(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: ParallelSteps{}.OpenAPISchemaType(), - Format: ParallelSteps{}.OpenAPISchemaFormat(), - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Parameter(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Parameter indicate a passed string parameter to a service template with an optional default value", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the parameter name", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "default": { - SchemaProps: spec.SchemaProps{ - Description: "Default is the default value to use for an input parameter if a value was not supplied", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the literal value to use for the parameter. If specified in the context of an input parameter, the value takes precedence over any passed values", - Type: []string{"string"}, - Format: "", - }, - }, - "valueFrom": { - SchemaProps: spec.SchemaProps{ - Description: "ValueFrom is the source for the output parameter's value", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ValueFrom"), - }, - }, - "globalName": { - SchemaProps: spec.SchemaProps{ - Description: "GlobalName exports an output parameter to the global scope, making it available as '{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters", - Type: []string{"string"}, - Format: "", - }, - }, - "enum": { - SchemaProps: spec.SchemaProps{ - Description: "Enum holds a list of string values to choose from, for the actual value of the parameter", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "description": { - SchemaProps: spec.SchemaProps{ - Description: "Description is the parameter description", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ValueFrom"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Plugin(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Plugin is an Object with exactly one key", - Type: []string{"object"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_PodGC(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodGC describes how to delete completed pods as they complete", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "strategy": { - SchemaProps: spec.SchemaProps{ - Description: "Strategy is the strategy to use. One of \"OnPodCompletion\", \"OnPodSuccess\", \"OnWorkflowCompletion\", \"OnWorkflowSuccess\". If unset, does not delete Pods", - Type: []string{"string"}, - Format: "", - }, - }, - "labelSelector": { - SchemaProps: spec.SchemaProps{ - Description: "LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - "deleteDelayDuration": { - SchemaProps: spec.SchemaProps{ - Description: "DeleteDelayDuration specifies the duration before pods in the GC queue get deleted.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Prometheus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Prometheus is a prometheus metric to be emitted", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the metric", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "labels": { - SchemaProps: spec.SchemaProps{ - Description: "Labels is a list of metric labels", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MetricLabel"), - }, - }, - }, - }, - }, - "help": { - SchemaProps: spec.SchemaProps{ - Description: "Help is a string that describes the metric", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "when": { - SchemaProps: spec.SchemaProps{ - Description: "When is a conditional statement that decides when to emit the metric", - Type: []string{"string"}, - Format: "", - }, - }, - "gauge": { - SchemaProps: spec.SchemaProps{ - Description: "Gauge is a gauge metric", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Gauge"), - }, - }, - "histogram": { - SchemaProps: spec.SchemaProps{ - Description: "Histogram is a histogram metric", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Histogram"), - }, - }, - "counter": { - SchemaProps: spec.SchemaProps{ - Description: "Counter is a counter metric", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Counter"), - }, - }, - }, - Required: []string{"name", "help"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Counter", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Gauge", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Histogram", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MetricLabel"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_RawArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RawArtifact allows raw string content to be placed as an artifact in a container", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "data": { - SchemaProps: spec.SchemaProps{ - Description: "Data is the string contents of the artifact", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"data"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ResourceTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResourceTemplate is a template subtype to manipulate kubernetes resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "action": { - SchemaProps: spec.SchemaProps{ - Description: "Action is the action to perform to the resource. Must be one of: get, create, apply, delete, replace, patch", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "mergeStrategy": { - SchemaProps: spec.SchemaProps{ - Description: "MergeStrategy is the strategy used to merge a patch. It defaults to \"strategic\" Must be one of: strategic, merge, json", - Type: []string{"string"}, - Format: "", - }, - }, - "manifest": { - SchemaProps: spec.SchemaProps{ - Description: "Manifest contains the kubernetes manifest", - Type: []string{"string"}, - Format: "", - }, - }, - "manifestFrom": { - SchemaProps: spec.SchemaProps{ - Description: "ManifestFrom is the source for a single kubernetes manifest", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ManifestFrom"), - }, - }, - "setOwnerReference": { - SchemaProps: spec.SchemaProps{ - Description: "SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "successCondition": { - SchemaProps: spec.SchemaProps{ - Description: "SuccessCondition is a label selector expression which describes the conditions of the k8s resource in which it is acceptable to proceed to the following step", - Type: []string{"string"}, - Format: "", - }, - }, - "failureCondition": { - SchemaProps: spec.SchemaProps{ - Description: "FailureCondition is a label selector expression which describes the conditions of the k8s resource in which the step was considered failed", - Type: []string{"string"}, - Format: "", - }, - }, - "flags": { - SchemaProps: spec.SchemaProps{ - Description: "Flags is a set of additional options passed to kubectl before submitting a resource I.e. to disable resource validation: flags: [\n\t\"--validate=false\" # disable resource validation\n]", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"action"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ManifestFrom"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_RetryAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RetryAffinity prevents running steps on the same host.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "nodeAntiAffinity": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryNodeAntiAffinity"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryNodeAntiAffinity"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_RetryNodeAntiAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RetryNodeAntiAffinity is a placeholder for future expansion, only empty nodeAntiAffinity is allowed. In order to prevent running steps on the same host, it uses \"kubernetes.io/hostname\".", - Type: []string{"object"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_RetryStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RetryStrategy provides controls on how to retry a workflow step", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "limit": { - SchemaProps: spec.SchemaProps{ - Description: "Limit is the maximum number of retry attempts when retrying a container. It does not include the original container; the maximum number of total attempts will be `limit + 1`.", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "retryPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "RetryPolicy is a policy of NodePhase statuses that will be retried", - Type: []string{"string"}, - Format: "", - }, - }, - "backoff": { - SchemaProps: spec.SchemaProps{ - Description: "Backoff is a backoff strategy", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Backoff"), - }, - }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "Affinity prevents running workflow's step on the same host", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryAffinity"), - }, - }, - "expression": { - SchemaProps: spec.SchemaProps{ - Description: "Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Backoff", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryAffinity", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_S3Artifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "S3Artifact is the location of an S3 artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the hostname of the bucket endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "region": { - SchemaProps: spec.SchemaProps{ - Description: "Region contains the optional bucket region", - Type: []string{"string"}, - Format: "", - }, - }, - "insecure": { - SchemaProps: spec.SchemaProps{ - Description: "Insecure will connect to the service with TLS", - Type: []string{"boolean"}, - Format: "", - }, - }, - "accessKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKeySecret is the secret selector to the bucket's access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySecret is the secret selector to the bucket's secret key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "roleARN": { - SchemaProps: spec.SchemaProps{ - Description: "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", - Type: []string{"string"}, - Format: "", - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "createBucketIfNotPresent": { - SchemaProps: spec.SchemaProps{ - Description: "CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions"), - }, - }, - "encryptionOptions": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions"), - }, - }, - "caSecret": { - SchemaProps: spec.SchemaProps{ - Description: "CASecret specifies the secret that contains the CA, used to verify the TLS connection", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Key is the key in the bucket where the artifact resides", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_S3ArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "S3ArtifactRepository defines the controller configuration for an S3 artifact repository", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the hostname of the bucket endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "region": { - SchemaProps: spec.SchemaProps{ - Description: "Region contains the optional bucket region", - Type: []string{"string"}, - Format: "", - }, - }, - "insecure": { - SchemaProps: spec.SchemaProps{ - Description: "Insecure will connect to the service with TLS", - Type: []string{"boolean"}, - Format: "", - }, - }, - "accessKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKeySecret is the secret selector to the bucket's access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySecret is the secret selector to the bucket's secret key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "roleARN": { - SchemaProps: spec.SchemaProps{ - Description: "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", - Type: []string{"string"}, - Format: "", - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "createBucketIfNotPresent": { - SchemaProps: spec.SchemaProps{ - Description: "CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions"), - }, - }, - "encryptionOptions": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions"), - }, - }, - "caSecret": { - SchemaProps: spec.SchemaProps{ - Description: "CASecret specifies the secret that contains the CA, used to verify the TLS connection", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "keyFormat": { - SchemaProps: spec.SchemaProps{ - Description: "KeyFormat defines the format of how to store keys and can reference workflow variables.", - Type: []string{"string"}, - Format: "", - }, - }, - "keyPrefix": { - SchemaProps: spec.SchemaProps{ - Description: "KeyPrefix is prefix used as part of the bucket key in which the controller will store artifacts. DEPRECATED. Use KeyFormat instead", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_S3Bucket(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "S3Bucket contains the access information required for interfacing with an S3 bucket", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the hostname of the bucket endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "region": { - SchemaProps: spec.SchemaProps{ - Description: "Region contains the optional bucket region", - Type: []string{"string"}, - Format: "", - }, - }, - "insecure": { - SchemaProps: spec.SchemaProps{ - Description: "Insecure will connect to the service with TLS", - Type: []string{"boolean"}, - Format: "", - }, - }, - "accessKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKeySecret is the secret selector to the bucket's access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySecret is the secret selector to the bucket's secret key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "roleARN": { - SchemaProps: spec.SchemaProps{ - Description: "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", - Type: []string{"string"}, - Format: "", - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "createBucketIfNotPresent": { - SchemaProps: spec.SchemaProps{ - Description: "CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions"), - }, - }, - "encryptionOptions": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions"), - }, - }, - "caSecret": { - SchemaProps: spec.SchemaProps{ - Description: "CASecret specifies the secret that contains the CA, used to verify the TLS connection", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_S3EncryptionOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "S3EncryptionOptions used to determine encryption options during s3 operations", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kmsKeyId": { - SchemaProps: spec.SchemaProps{ - Description: "KMSKeyId tells the driver to encrypt the object using the specified KMS Key.", - Type: []string{"string"}, - Format: "", - }, - }, - "kmsEncryptionContext": { - SchemaProps: spec.SchemaProps{ - Description: "KmsEncryptionContext is a json blob that contains an encryption context. See https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context for more information", - Type: []string{"string"}, - Format: "", - }, - }, - "enableEncryption": { - SchemaProps: spec.SchemaProps{ - Description: "EnableEncryption tells the driver to encrypt objects if set to true. If kmsKeyId and serverSideCustomerKeySecret are not set, SSE-S3 will be used", - Type: []string{"boolean"}, - Format: "", - }, - }, - "serverSideCustomerKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "ServerSideCustomerKeySecret tells the driver to encrypt the output artifacts using SSE-C with the specified secret.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ScriptTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ScriptTemplate is a template subtype to enable scripting through code steps", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", - Type: []string{"string"}, - Format: "", - }, - }, - "command": { - SchemaProps: spec.SchemaProps{ - Description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "args": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "workingDir": { - SchemaProps: spec.SchemaProps{ - Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "ports": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "containerPort", - "protocol", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "containerPort", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ContainerPort"), - }, - }, - }, - }, - }, - "envFrom": { - SchemaProps: spec.SchemaProps{ - Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvFromSource"), - }, - }, - }, - }, - }, - "env": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of environment variables to set in the container. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvVar"), - }, - }, - }, - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "volumeMounts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "mountPath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeMount"), - }, - }, - }, - }, - }, - "volumeDevices": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "devicePath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "volumeDevices is the list of block devices to be used by the container.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeDevice"), - }, - }, - }, - }, - }, - "livenessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "readinessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "startupProbe": { - SchemaProps: spec.SchemaProps{ - Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "lifecycle": { - SchemaProps: spec.SchemaProps{ - Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", - Ref: ref("k8s.io/api/core/v1.Lifecycle"), - }, - }, - "terminationMessagePath": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "terminationMessagePolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "imagePullPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - Type: []string{"string"}, - Format: "", - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", - Ref: ref("k8s.io/api/core/v1.SecurityContext"), - }, - }, - "stdin": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "stdinOnce": { - SchemaProps: spec.SchemaProps{ - Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - Type: []string{"boolean"}, - Format: "", - }, - }, - "tty": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "source": { - SchemaProps: spec.SchemaProps{ - Description: "Source contains the source code of the script to execute", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "source"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SemaphoreHolding(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "semaphore": { - SchemaProps: spec.SchemaProps{ - Description: "Semaphore stores the semaphore name.", - Type: []string{"string"}, - Format: "", - }, - }, - "holders": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Holders stores the list of current holder names in the workflow.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SemaphoreRef(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SemaphoreRef is a reference of Semaphore", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "configMapKeyRef": { - SchemaProps: spec.SchemaProps{ - Description: "ConfigMapKeyRef is configmap selector for Semaphore configuration", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - "namespace": { - SchemaProps: spec.SchemaProps{ - Description: "Namespace is the namespace of the configmap, default: [namespace of workflow]", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SemaphoreStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "holding": { - SchemaProps: spec.SchemaProps{ - Description: "Holding stores the list of resource acquired synchronization lock for workflows.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreHolding"), - }, - }, - }, - }, - }, - "waiting": { - SchemaProps: spec.SchemaProps{ - Description: "Waiting indicates the list of current synchronization lock holders.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreHolding"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreHolding"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Sequence(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Sequence expands a workflow step into numeric range", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "count": { - SchemaProps: spec.SchemaProps{ - Description: "Count is number of elements in the sequence (default: 0). Not to be used with end", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "start": { - SchemaProps: spec.SchemaProps{ - Description: "Number at which to start the sequence (default: 0)", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "end": { - SchemaProps: spec.SchemaProps{ - Description: "Number at which to end the sequence (default: 0). Not to be used with Count", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "format": { - SchemaProps: spec.SchemaProps{ - Description: "Format is a printf format string to format the value in the sequence", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Submit(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "workflowTemplateRef": { - SchemaProps: spec.SchemaProps{ - Description: "WorkflowTemplateRef the workflow template to submit", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef"), - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Metadata optional means to customize select fields of the workflow metadata", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "arguments": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments extracted from the event and then set as arguments to the workflow created.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), - }, - }, - }, - Required: []string{"workflowTemplateRef"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SubmitOpts(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SubmitOpts are workflow submission options", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name overrides metadata.name", - Type: []string{"string"}, - Format: "", - }, - }, - "generateName": { - SchemaProps: spec.SchemaProps{ - Description: "GenerateName overrides metadata.generateName", - Type: []string{"string"}, - Format: "", - }, - }, - "entryPoint": { - SchemaProps: spec.SchemaProps{ - Description: "Entrypoint overrides spec.entrypoint", - Type: []string{"string"}, - Format: "", - }, - }, - "parameters": { - SchemaProps: spec.SchemaProps{ - Description: "Parameters passes input parameters to workflow", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "serviceAccount": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccount runs all pods in the workflow using specified ServiceAccount.", - Type: []string{"string"}, - Format: "", - }, - }, - "dryRun": { - SchemaProps: spec.SchemaProps{ - Description: "DryRun validates the workflow on the client-side without creating it. This option is not supported in API", - Type: []string{"boolean"}, - Format: "", - }, - }, - "serverDryRun": { - SchemaProps: spec.SchemaProps{ - Description: "ServerDryRun validates the workflow on the server-side without creating it", - Type: []string{"boolean"}, - Format: "", - }, - }, - "labels": { - SchemaProps: spec.SchemaProps{ - Description: "Labels adds to metadata.labels", - Type: []string{"string"}, - Format: "", - }, - }, - "ownerReference": { - SchemaProps: spec.SchemaProps{ - Description: "OwnerReference creates a metadata.ownerReference", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"), - }, - }, - "annotations": { - SchemaProps: spec.SchemaProps{ - Description: "Annotations adds to metadata.labels", - Type: []string{"string"}, - Format: "", - }, - }, - "podPriorityClassName": { - SchemaProps: spec.SchemaProps{ - Description: "Set the podPriorityClassName of the workflow", - Type: []string{"string"}, - Format: "", - }, - }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "Priority is used if controller is configured to process limited number of workflows in parallel, higher priority workflows are processed first.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SuppliedValueFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SuppliedValueFrom is a placeholder for a value to be filled in directly, either through the CLI, API, etc.", - Type: []string{"object"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SuspendTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "duration": { - SchemaProps: spec.SchemaProps{ - Description: "Duration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds. Could also be a Duration, e.g.: \"2m\", \"6h\"", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Synchronization(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Synchronization holds synchronization lock configuration", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "semaphore": { - SchemaProps: spec.SchemaProps{ - Description: "Semaphore holds the Semaphore configuration", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreRef"), - }, - }, - "mutex": { - SchemaProps: spec.SchemaProps{ - Description: "Mutex holds the Mutex lock details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Mutex"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Mutex", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreRef"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SynchronizationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SynchronizationStatus stores the status of semaphore and mutex.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "semaphore": { - SchemaProps: spec.SchemaProps{ - Description: "Semaphore stores this workflow's Semaphore holder details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreStatus"), - }, - }, - "mutex": { - SchemaProps: spec.SchemaProps{ - Description: "Mutex stores this workflow's mutex holder details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreStatus"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_TTLStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TTLStrategy is the strategy for the time to live depending on if the workflow succeeded or failed", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "secondsAfterCompletion": { - SchemaProps: spec.SchemaProps{ - Description: "SecondsAfterCompletion is the number of seconds to live after completion", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "secondsAfterSuccess": { - SchemaProps: spec.SchemaProps{ - Description: "SecondsAfterSuccess is the number of seconds to live after success", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "secondsAfterFailure": { - SchemaProps: spec.SchemaProps{ - Description: "SecondsAfterFailure is the number of seconds to live after failure", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_TarStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TarStrategy will tar and gzip the file or directory when saving", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "compressionLevel": { - SchemaProps: spec.SchemaProps{ - Description: "CompressionLevel specifies the gzip compression level to use for the artifact. Defaults to gzip.DefaultCompression.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Template is a reusable and composable unit of execution in a workflow", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the template", - Type: []string{"string"}, - Format: "", - }, - }, - "inputs": { - SchemaProps: spec.SchemaProps{ - Description: "Inputs describe what inputs parameters and artifacts are supplied to this template", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs"), - }, - }, - "outputs": { - SchemaProps: spec.SchemaProps{ - Description: "Outputs describe the parameters and artifacts that this template produces", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), - }, - }, - "nodeSelector": { - SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector to schedule this step of the workflow to be run on the selected node(s). Overrides the selector set at the workflow level.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "Affinity sets the pod's scheduling constraints Overrides the affinity set at the workflow level (if any)", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Metdata sets the pods's metadata, i.e. annotations and labels", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"), - }, - }, - "daemon": { - SchemaProps: spec.SchemaProps{ - Description: "Daemon will allow a workflow to proceed to the next step so long as the container reaches readiness", - Type: []string{"boolean"}, - Format: "", - }, - }, - "steps": { - SchemaProps: spec.SchemaProps{ - Description: "Steps define a series of sequential/parallel workflow steps", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ParallelSteps"), - }, - }, - }, - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Description: "Container is the main container image to run in the pod", - Ref: ref("k8s.io/api/core/v1.Container"), - }, - }, - "containerSet": { - SchemaProps: spec.SchemaProps{ - Description: "ContainerSet groups multiple containers within a single pod.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetTemplate"), - }, - }, - "script": { - SchemaProps: spec.SchemaProps{ - Description: "Script runs a portion of code against an interpreter", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ScriptTemplate"), - }, - }, - "resource": { - SchemaProps: spec.SchemaProps{ - Description: "Resource template subtype which can run k8s resources", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ResourceTemplate"), - }, - }, - "dag": { - SchemaProps: spec.SchemaProps{ - Description: "DAG template subtype which runs a DAG", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTemplate"), - }, - }, - "suspend": { - SchemaProps: spec.SchemaProps{ - Description: "Suspend template subtype which can suspend a workflow when reaching the step", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuspendTemplate"), - }, - }, - "data": { - SchemaProps: spec.SchemaProps{ - Description: "Data is a data template", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Data"), - }, - }, - "http": { - SchemaProps: spec.SchemaProps{ - Description: "HTTP makes a HTTP request", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTP"), - }, - }, - "plugin": { - SchemaProps: spec.SchemaProps{ - Description: "Plugin is a plugin template", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Plugin"), - }, - }, - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Volumes is a list of volumes that can be mounted by containers in a template.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), - }, - }, - }, - }, - }, - "initContainers": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "InitContainers is a list of containers which run before the main container.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.UserContainer"), - }, - }, - }, - }, - }, - "sidecars": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Sidecars is a list of containers which run alongside the main container Sidecars are automatically killed when the main container completes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.UserContainer"), - }, - }, - }, - }, - }, - "archiveLocation": { - SchemaProps: spec.SchemaProps{ - Description: "Location in which all files related to the step will be stored (logs, artifacts, etc...). Can be overridden by individual items in Outputs. If omitted, will use the default artifact repository location configured in the controller, appended with the <workflowname>/<nodename> in the key.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation"), - }, - }, - "activeDeadlineSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds relative to the StartTime that the pod may be active on a node before the system actively tries to terminate the pod; value must be positive integer This field is only applicable to container and script templates.", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "retryStrategy": { - SchemaProps: spec.SchemaProps{ - Description: "RetryStrategy describes how to retry a template when it fails", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy"), - }, - }, - "parallelism": { - SchemaProps: spec.SchemaProps{ - Description: "Parallelism limits the max total parallel pods that can execute at the same time within the boundaries of this template invocation. If additional steps/dag templates are invoked, the pods created by those templates will not be counted towards this total.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "failFast": { - SchemaProps: spec.SchemaProps{ - Description: "FailFast, if specified, will fail this template if any of its child pods has failed. This is useful for when this template is expanded with `withItems`, etc.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "tolerations": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "key", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Tolerations to apply to workflow pods.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Toleration"), - }, - }, - }, - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod will be dispatched by specified scheduler. Or it will be dispatched by workflow scope scheduler if specified. If neither specified, the pod will be dispatched by default scheduler.", - Type: []string{"string"}, - Format: "", - }, - }, - "priorityClassName": { - SchemaProps: spec.SchemaProps{ - Description: "PriorityClassName to apply to workflow pods.", - Type: []string{"string"}, - Format: "", - }, - }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "Priority to apply to workflow pods.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName to apply to workflow pods", - Type: []string{"string"}, - Format: "", - }, - }, - "automountServiceAccountToken": { - SchemaProps: spec.SchemaProps{ - Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "executor": { - SchemaProps: spec.SchemaProps{ - Description: "Executor holds configurations of the executor container.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig"), - }, - }, - "hostAliases": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "ip", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.HostAlias"), - }, - }, - }, - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), - }, - }, - "podSpecPatch": { - SchemaProps: spec.SchemaProps{ - Description: "PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).", - Type: []string{"string"}, - Format: "", - }, - }, - "metrics": { - SchemaProps: spec.SchemaProps{ - Description: "Metrics are a list of metrics emitted from this template", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics"), - }, - }, - "synchronization": { - SchemaProps: spec.SchemaProps{ - Description: "Synchronization holds synchronization lock configuration for this template", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization"), - }, - }, - "memoize": { - SchemaProps: spec.SchemaProps{ - Description: "Memoize allows templates to use outputs generated from already executed templates", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Memoize"), - }, - }, - "timeout": { - SchemaProps: spec.SchemaProps{ - Description: "Timeout allows to set the total node execution timeout duration counting from the node's start time. This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Data", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTP", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Memoize", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ParallelSteps", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Plugin", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ResourceTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ScriptTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuspendTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.UserContainer", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_TemplateRef(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TemplateRef is a reference of template resource.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the resource name of the template.", - Type: []string{"string"}, - Format: "", - }, - }, - "template": { - SchemaProps: spec.SchemaProps{ - Description: "Template is the name of referred template in the resource.", - Type: []string{"string"}, - Format: "", - }, - }, - "clusterScope": { - SchemaProps: spec.SchemaProps{ - Description: "ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_TransformationStep(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "expression": { - SchemaProps: spec.SchemaProps{ - Description: "Expression defines an expr expression to apply", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"expression"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_UserContainer(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "UserContainer is a container specified by a user.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", - Type: []string{"string"}, - Format: "", - }, - }, - "command": { - SchemaProps: spec.SchemaProps{ - Description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "args": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "workingDir": { - SchemaProps: spec.SchemaProps{ - Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "ports": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "containerPort", - "protocol", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "containerPort", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ContainerPort"), - }, - }, - }, - }, - }, - "envFrom": { - SchemaProps: spec.SchemaProps{ - Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvFromSource"), - }, - }, - }, - }, - }, - "env": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of environment variables to set in the container. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvVar"), - }, - }, - }, - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "volumeMounts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "mountPath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeMount"), - }, - }, - }, - }, - }, - "volumeDevices": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "devicePath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "volumeDevices is the list of block devices to be used by the container.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeDevice"), - }, - }, - }, - }, - }, - "livenessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "readinessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "startupProbe": { - SchemaProps: spec.SchemaProps{ - Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "lifecycle": { - SchemaProps: spec.SchemaProps{ - Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", - Ref: ref("k8s.io/api/core/v1.Lifecycle"), - }, - }, - "terminationMessagePath": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "terminationMessagePolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "imagePullPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - Type: []string{"string"}, - Format: "", - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", - Ref: ref("k8s.io/api/core/v1.SecurityContext"), - }, - }, - "stdin": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "stdinOnce": { - SchemaProps: spec.SchemaProps{ - Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - Type: []string{"boolean"}, - Format: "", - }, - }, - "tty": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "mirrorVolumeMounts": { - SchemaProps: spec.SchemaProps{ - Description: "MirrorVolumeMounts will mount the same volumes specified in the main container to the container (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ValueFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ValueFrom describes a location in which to obtain the value to a parameter", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path in the container to retrieve an output parameter value from in container templates", - Type: []string{"string"}, - Format: "", - }, - }, - "jsonPath": { - SchemaProps: spec.SchemaProps{ - Description: "JSONPath of a resource to retrieve an output parameter value from in resource templates", - Type: []string{"string"}, - Format: "", - }, - }, - "jqFilter": { - SchemaProps: spec.SchemaProps{ - Description: "JQFilter expression against the resource object in resource templates", - Type: []string{"string"}, - Format: "", - }, - }, - "event": { - SchemaProps: spec.SchemaProps{ - Description: "Selector (https://github.com/expr-lang/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message`", - Type: []string{"string"}, - Format: "", - }, - }, - "parameter": { - SchemaProps: spec.SchemaProps{ - Description: "Parameter reference to a step or dag task in which to retrieve an output parameter value from (e.g. '{{steps.mystep.outputs.myparam}}')", - Type: []string{"string"}, - Format: "", - }, - }, - "supplied": { - SchemaProps: spec.SchemaProps{ - Description: "Supplied value to be filled in directly, either through the CLI, API, etc.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuppliedValueFrom"), - }, - }, - "configMapKeyRef": { - SchemaProps: spec.SchemaProps{ - Description: "ConfigMapKeyRef is configmap selector for input parameter configuration", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - "default": { - SchemaProps: spec.SchemaProps{ - Description: "Default specifies a value to be used if retrieving the value from the specified source fails", - Type: []string{"string"}, - Format: "", - }, - }, - "expression": { - SchemaProps: spec.SchemaProps{ - Description: "Expression, if defined, is evaluated to specify the value for the parameter", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuppliedValueFrom", "k8s.io/api/core/v1.ConfigMapKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Version(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "version": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "buildDate": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "gitCommit": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "gitTag": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "gitTreeState": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "goVersion": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "compiler": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "platform": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"version", "buildDate", "gitCommit", "gitTag", "gitTreeState", "goVersion", "compiler", "platform"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_VolumeClaimGC(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "VolumeClaimGC describes how to delete volumes from completed Workflows", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "strategy": { - SchemaProps: spec.SchemaProps{ - Description: "Strategy is the strategy to use. One of \"OnWorkflowCompletion\", \"OnWorkflowSuccess\". Defaults to \"OnWorkflowSuccess\"", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Workflow(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Workflow is the definition of a workflow resource", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowStatus"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowArtifactGCTask(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowArtifactGCTask specifies the Artifacts that need to be deleted as well as the status of deletion", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCStatus"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCSpec", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowArtifactGCTaskList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowArtifactGCTaskList is list of WorkflowArtifactGCTask resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowArtifactGCTask"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowArtifactGCTask", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowEventBinding(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowEventBinding is the definition of an event resource", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBindingSpec"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBindingSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowEventBindingList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowEventBindingList is list of event resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBinding"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowEventBindingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "event": { - SchemaProps: spec.SchemaProps{ - Description: "Event is the event to bind to", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Event"), - }, - }, - "submit": { - SchemaProps: spec.SchemaProps{ - Description: "Submit is the workflow template to submit", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Submit"), - }, - }, - }, - Required: []string{"event"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Event", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Submit"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowLevelArtifactGC(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowLevelArtifactGC describes how to delete artifacts from completed Workflows - this spec is used on the Workflow level", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "strategy": { - SchemaProps: spec.SchemaProps{ - Description: "Strategy is the strategy to use.", - Type: []string{"string"}, - Format: "", - }, - }, - "podMetadata": { - SchemaProps: spec.SchemaProps{ - Description: "PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"), - }, - }, - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion", - Type: []string{"string"}, - Format: "", - }, - }, - "forceFinalizerRemoval": { - SchemaProps: spec.SchemaProps{ - Description: "ForceFinalizerRemoval: if set to true, the finalizer will be removed in the case that Artifact GC fails", - Type: []string{"boolean"}, - Format: "", - }, - }, - "podSpecPatch": { - SchemaProps: spec.SchemaProps{ - Description: "PodSpecPatch holds strategic merge patch to apply against the artgc pod spec.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowList is list of Workflow resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Workflow"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Workflow", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "labels": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "annotations": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "labelsFrom": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelValueFrom"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelValueFrom"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowSpec is the specification of a Workflow.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "templates": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Templates is a list of workflow templates used in a workflow", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), - }, - }, - }, - }, - }, - "entrypoint": { - SchemaProps: spec.SchemaProps{ - Description: "Entrypoint is a template reference to the starting point of the workflow.", - Type: []string{"string"}, - Format: "", - }, - }, - "arguments": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally using the 'workflow' variable prefix. e.g. {{workflow.parameters.myparam}}", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), - }, - }, - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as.", - Type: []string{"string"}, - Format: "", - }, - }, - "automountServiceAccountToken": { - SchemaProps: spec.SchemaProps{ - Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "executor": { - SchemaProps: spec.SchemaProps{ - Description: "Executor holds configurations of executor containers of the workflow.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig"), - }, - }, - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Volumes is a list of volumes that can be mounted by containers in a workflow.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), - }, - }, - }, - }, - }, - "volumeClaimTemplates": { - SchemaProps: spec.SchemaProps{ - Description: "VolumeClaimTemplates is a list of claims that containers are allowed to reference. The Workflow controller will create the claims at the beginning of the workflow and delete the claims upon completion of the workflow", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"), - }, - }, - }, - }, - }, - "parallelism": { - SchemaProps: spec.SchemaProps{ - Description: "Parallelism limits the max total parallel pods that can execute at the same time in a workflow", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "artifactRepositoryRef": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRef"), - }, - }, - "suspend": { - SchemaProps: spec.SchemaProps{ - Description: "Suspend will suspend the workflow and prevent execution of any future steps in the workflow", - Type: []string{"boolean"}, - Format: "", - }, - }, - "nodeSelector": { - SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector which will result in all pods of the workflow to be scheduled on the selected node(s). This is able to be overridden by a nodeSelector specified in the template.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "Affinity sets the scheduling constraints for all pods in the workflow. Can be overridden by an affinity specified in the template", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "tolerations": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "key", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Tolerations to apply to workflow pods.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Toleration"), - }, - }, - }, - }, - }, - "imagePullSecrets": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - }, - }, - }, - "hostNetwork": { - SchemaProps: spec.SchemaProps{ - Description: "Host networking requested for this workflow pod. Default to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "dnsPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Set DNS policy for workflow pods. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", - Type: []string{"string"}, - Format: "", - }, - }, - "dnsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", - Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), - }, - }, - "onExit": { - SchemaProps: spec.SchemaProps{ - Description: "OnExit is a template reference which is invoked at the end of the workflow, irrespective of the success, failure, or error of the primary workflow.", - Type: []string{"string"}, - Format: "", - }, - }, - "ttlStrategy": { - SchemaProps: spec.SchemaProps{ - Description: "TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be deleted after the time to live expires. If this field is unset, the controller config map will hold the default values.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TTLStrategy"), - }, - }, - "activeDeadlineSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds relative to the workflow start time which the workflow is allowed to run before the controller terminates the workflow. A value of zero is used to terminate a Running workflow", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "Set scheduler name for all pods. Will be overridden if container/script template's scheduler name is set. Default scheduler will be used if neither specified.", - Type: []string{"string"}, - Format: "", - }, - }, - "podGC": { - SchemaProps: spec.SchemaProps{ - Description: "PodGC describes the strategy to use when deleting completed pods", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.PodGC"), - }, - }, - "podPriorityClassName": { - SchemaProps: spec.SchemaProps{ - Description: "PriorityClassName to apply to workflow pods.", - Type: []string{"string"}, - Format: "", - }, - }, - "podPriority": { - SchemaProps: spec.SchemaProps{ - Description: "Priority to apply to workflow pods. DEPRECATED: Use PodPriorityClassName instead.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "hostAliases": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "ip", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.HostAlias"), - }, - }, - }, - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), - }, - }, - "podSpecPatch": { - SchemaProps: spec.SchemaProps{ - Description: "PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).", - Type: []string{"string"}, - Format: "", - }, - }, - "podDisruptionBudget": { - SchemaProps: spec.SchemaProps{ - Description: "PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. Controller will automatically add the selector with workflow name, if selector is empty. Optional: Defaults to empty.", - Ref: ref("k8s.io/api/policy/v1.PodDisruptionBudgetSpec"), - }, - }, - "metrics": { - SchemaProps: spec.SchemaProps{ - Description: "Metrics are a list of metrics emitted from this Workflow", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics"), - }, - }, - "shutdown": { - SchemaProps: spec.SchemaProps{ - Description: "Shutdown will shutdown the workflow according to its ShutdownStrategy", - Type: []string{"string"}, - Format: "", - }, - }, - "workflowTemplateRef": { - SchemaProps: spec.SchemaProps{ - Description: "WorkflowTemplateRef holds a reference to a WorkflowTemplate for execution", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef"), - }, - }, - "synchronization": { - SchemaProps: spec.SchemaProps{ - Description: "Synchronization holds synchronization lock configuration for this Workflow", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization"), - }, - }, - "volumeClaimGC": { - SchemaProps: spec.SchemaProps{ - Description: "VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.VolumeClaimGC"), - }, - }, - "retryStrategy": { - SchemaProps: spec.SchemaProps{ - Description: "RetryStrategy for all templates in the workflow.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy"), - }, - }, - "podMetadata": { - SchemaProps: spec.SchemaProps{ - Description: "PodMetadata defines additional metadata that should be applied to workflow pods", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"), - }, - }, - "templateDefaults": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateDefaults holds default template values that will apply to all templates in the Workflow, unless overridden on the template-level", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), - }, - }, - "archiveLogs": { - SchemaProps: spec.SchemaProps{ - Description: "ArchiveLogs indicates if the container logs should be archived", - Type: []string{"boolean"}, - Format: "", - }, - }, - "hooks": { - SchemaProps: spec.SchemaProps{ - Description: "Hooks holds the lifecycle hook which is invoked at lifecycle of step, irrespective of the success, failure, or error status of the primary step", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook"), - }, - }, - }, - }, - }, - "workflowMetadata": { - SchemaProps: spec.SchemaProps{ - Description: "WorkflowMetadata contains some metadata of the workflow to refer to", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowMetadata"), - }, - }, - "artifactGC": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts unless Artifact.ArtifactGC is specified, which overrides this)", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowLevelArtifactGC"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRef", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.PodGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TTLStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.VolumeClaimGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowLevelArtifactGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowMetadata", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume", "k8s.io/api/policy/v1.PodDisruptionBudgetSpec"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowStatus contains overall status information about a workflow", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "phase": { - SchemaProps: spec.SchemaProps{ - Description: "Phase a simple, high-level summary of where the workflow is in its lifecycle. Will be \"\" (Unknown), \"Pending\", or \"Running\" before the workflow is completed, and \"Succeeded\", \"Failed\" or \"Error\" once the workflow has completed.", - Type: []string{"string"}, - Format: "", - }, - }, - "startedAt": { - SchemaProps: spec.SchemaProps{ - Description: "Time at which this workflow started", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "finishedAt": { - SchemaProps: spec.SchemaProps{ - Description: "Time at which this workflow completed", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "estimatedDuration": { - SchemaProps: spec.SchemaProps{ - Description: "EstimatedDuration in seconds.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "progress": { - SchemaProps: spec.SchemaProps{ - Description: "Progress to completion", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "A human readable message indicating details about why the workflow is in this condition.", - Type: []string{"string"}, - Format: "", - }, - }, - "compressedNodes": { - SchemaProps: spec.SchemaProps{ - Description: "Compressed and base64 decoded Nodes map", - Type: []string{"string"}, - Format: "", - }, - }, - "nodes": { - SchemaProps: spec.SchemaProps{ - Description: "Nodes is a mapping between a node ID and the node's status.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeStatus"), - }, - }, - }, - }, - }, - "offloadNodeStatusVersion": { - SchemaProps: spec.SchemaProps{ - Description: "Whether on not node status has been offloaded to a database. If exists, then Nodes and CompressedNodes will be empty. This will actually be populated with a hash of the offloaded data.", - Type: []string{"string"}, - Format: "", - }, - }, - "storedTemplates": { - SchemaProps: spec.SchemaProps{ - Description: "StoredTemplates is a mapping between a template ref and the node's status.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), - }, - }, - }, - }, - }, - "persistentVolumeClaims": { - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeClaims tracks all PVCs that were created as part of the workflow. The contents of this list are drained at the end of the workflow.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), - }, - }, - }, - }, - }, - "outputs": { - SchemaProps: spec.SchemaProps{ - Description: "Outputs captures output values and artifact locations produced by the workflow via global outputs", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), - }, - }, - "conditions": { - SchemaProps: spec.SchemaProps{ - Description: "Conditions is a list of conditions the Workflow may have", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition"), - }, - }, - }, - }, - }, - "resourcesDuration": { - SchemaProps: spec.SchemaProps{ - Description: "ResourcesDuration is the total for the workflow", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: 0, - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - }, - }, - "storedWorkflowTemplateSpec": { - SchemaProps: spec.SchemaProps{ - Description: "StoredWorkflowSpec stores the WorkflowTemplate spec for future execution.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), - }, - }, - "synchronization": { - SchemaProps: spec.SchemaProps{ - Description: "Synchronization stores the status of synchronization locks", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SynchronizationStatus"), - }, - }, - "artifactRepositoryRef": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRefStatus"), - }, - }, - "artifactGCStatus": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGCStatus maintains the status of Artifact Garbage Collection", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtGCStatus"), - }, - }, - "taskResultsCompletionStatus": { - SchemaProps: spec.SchemaProps{ - Description: "TaskResultsCompletionStatus tracks task result completion status (mapped by node ID). Used to prevent premature archiving and garbage collection.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: false, - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtGCStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRefStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SynchronizationStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowStep(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowStep is a reference to a template to execute in a series of step", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the step", - Type: []string{"string"}, - Format: "", - }, - }, - "template": { - SchemaProps: spec.SchemaProps{ - Description: "Template is the name of the template to execute as the step", - Type: []string{"string"}, - Format: "", - }, - }, - "inline": { - SchemaProps: spec.SchemaProps{ - Description: "Inline is the template. Template must be empty if this is declared (and vice-versa).", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), - }, - }, - "arguments": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments hold arguments to the template", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), - }, - }, - "templateRef": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateRef is the reference to the template resource to execute as the step.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"), - }, - }, - "withItems": { - SchemaProps: spec.SchemaProps{ - Description: "WithItems expands a step into multiple parallel steps from the items in the list", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item"), - }, - }, - }, - }, - }, - "withParam": { - SchemaProps: spec.SchemaProps{ - Description: "WithParam expands a step into multiple parallel steps from the value in the parameter, which is expected to be a JSON list.", - Type: []string{"string"}, - Format: "", - }, - }, - "withSequence": { - SchemaProps: spec.SchemaProps{ - Description: "WithSequence expands a step into a numeric sequence", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence"), - }, - }, - "when": { - SchemaProps: spec.SchemaProps{ - Description: "When is an expression in which the step should conditionally execute", - Type: []string{"string"}, - Format: "", - }, - }, - "continueOn": { - SchemaProps: spec.SchemaProps{ - Description: "ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn"), - }, - }, - "onExit": { - SchemaProps: spec.SchemaProps{ - Description: "OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template. DEPRECATED: Use Hooks[exit].Template instead.", - Type: []string{"string"}, - Format: "", - }, - }, - "hooks": { - SchemaProps: spec.SchemaProps{ - Description: "Hooks holds the lifecycle hook which is invoked at lifecycle of step, irrespective of the success, failure, or error status of the primary step", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskResult(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowTaskResult is a used to communicate a result back to the controller. Unlike WorkflowTaskSet, it has more capacity. This is an internal type. Users should never create this resource directly, much like you would never create a ReplicaSet directly.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "phase": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "outputs": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), - }, - }, - "progress": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"metadata"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskResultList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskResult"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskResult", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSet(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetStatus"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetSpec", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSet"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "tasks": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "nodes": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeResult"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeResult"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowTemplate is the definition of a workflow template resource", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowTemplateList is list of WorkflowTemplate resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplate"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateRef(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowTemplateRef is a reference to a WorkflowTemplate resource.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the resource name of the workflow template.", - Type: []string{"string"}, - Format: "", - }, - }, - "clusterScope": { - SchemaProps: spec.SchemaProps{ - Description: "ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ZipStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ZipStrategy will unzip zipped input artifacts", - Type: []string{"object"}, - }, - }, - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/plugin_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/plugin_types.go deleted file mode 100644 index a505a2aa2..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/plugin_types.go +++ /dev/null @@ -1,29 +0,0 @@ -package v1alpha1 - -import ( - "encoding/json" - "fmt" -) - -// Plugin is an Object with exactly one key -type Plugin struct { - Object `json:",inline" protobuf:"bytes,1,opt,name=object"` -} - -// UnmarshalJSON unmarshalls the Plugin from JSON, and also validates that it is a map exactly one key -func (p *Plugin) UnmarshalJSON(value []byte) error { - if err := p.Object.UnmarshalJSON(value); err != nil { - return err - } - // by validating the structure in UnmarshallJSON, we prevent bad data entering the system at the point of - // parsing, which means we do not need validate - m := map[string]interface{}{} - if err := json.Unmarshal(p.Object.Value, &m); err != nil { - return err - } - numKeys := len(m) - if numKeys != 1 { - return fmt.Errorf("expected exactly one key, got %d", numKeys) - } - return nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/progress.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/progress.go deleted file mode 100644 index 14a57f98d..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/progress.go +++ /dev/null @@ -1,54 +0,0 @@ -package v1alpha1 - -import ( - "fmt" - "strconv" - "strings" -) - -// Progress in N/M format. N is number of task complete. M is number of tasks. -type Progress string - -const ( - ProgressUndefined = Progress("") - ProgressZero = Progress("0/0") // zero value (not the same as "no progress) - ProgressDefault = Progress("0/1") -) - -func NewProgress(n, m int64) (Progress, bool) { - return ParseProgress(fmt.Sprintf("%v/%v", n, m)) -} - -func ParseProgress(s string) (Progress, bool) { - v := Progress(s) - return v, v.IsValid() -} - -func (in Progress) parts() []string { - return strings.SplitN(string(in), "/", 2) -} - -func (in Progress) N() int64 { - return parseInt64(in.parts()[0]) -} - -func (in Progress) M() int64 { - return parseInt64(in.parts()[1]) -} - -func (in Progress) Add(x Progress) Progress { - return Progress(fmt.Sprintf("%v/%v", in.N()+x.N(), in.M()+x.M())) -} - -func (in Progress) Complete() Progress { - return Progress(fmt.Sprintf("%v/%v", in.M(), in.M())) -} - -func (in Progress) IsValid() bool { - return in != "" && in.N() >= 0 && in.N() <= in.M() && in.M() > 0 -} - -func parseInt64(s string) int64 { - v, _ := strconv.ParseInt(s, 10, 64) - return v -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/register.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/register.go deleted file mode 100644 index b4d6738f4..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/register.go +++ /dev/null @@ -1,56 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow" -) - -// SchemeGroupVersion is group version used to register these objects -var ( - SchemeGroupVersion = schema.GroupVersion{Group: workflow.Group, Version: "v1alpha1"} - WorkflowSchemaGroupVersionKind = schema.GroupVersionKind{Group: workflow.Group, Version: "v1alpha1", Kind: workflow.WorkflowKind} -) - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group-qualified GroupResource. -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// addKnownTypes adds the set of types defined in this package to the supplied scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Workflow{}, - &WorkflowList{}, - &WorkflowEventBinding{}, - &WorkflowEventBindingList{}, - &WorkflowTemplate{}, - &WorkflowTemplateList{}, - &CronWorkflow{}, - &CronWorkflowList{}, - &ClusterWorkflowTemplate{}, - &ClusterWorkflowTemplateList{}, - &WorkflowTaskSet{}, - &WorkflowTaskSetList{}, - &WorkflowArtifactGCTask{}, - &WorkflowArtifactGCTaskList{}, - &WorkflowTaskResult{}, - &WorkflowTaskResultList{}, - &WorkflowArtifactGCTask{}, - &WorkflowArtifactGCTaskList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_result_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_result_types.go deleted file mode 100644 index 6f19052a4..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_result_types.go +++ /dev/null @@ -1,23 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// WorkflowTaskResult is a used to communicate a result back to the controller. Unlike WorkflowTaskSet, it has -// more capacity. This is an internal type. Users should never create this resource directly, much like you would -// never create a ReplicaSet directly. -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type WorkflowTaskResult struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - NodeResult `json:",inline" protobuf:"bytes,2,opt,name=nodeResult"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type WorkflowTaskResultList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Items []WorkflowTaskResult `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_set_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_set_types.go deleted file mode 100644 index b756aea70..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/task_set_types.go +++ /dev/null @@ -1,42 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +kubebuilder:resource:shortName=wfts -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -type WorkflowTaskSet struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Spec WorkflowTaskSetSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - Status WorkflowTaskSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -type WorkflowTaskSetSpec struct { - Tasks map[string]Template `json:"tasks,omitempty" protobuf:"bytes,1,rep,name=tasks"` -} - -type WorkflowTaskSetStatus struct { - Nodes map[string]NodeResult `json:"nodes,omitempty" protobuf:"bytes,1,rep,name=nodes"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type WorkflowTaskSetList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Items []WorkflowTaskSet `json:"items" protobuf:"bytes,2,opt,name=items"` -} - -type NodeResult struct { - Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NodePhase"` - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` - Outputs *Outputs `json:"outputs,omitempty" protobuf:"bytes,3,opt,name=outputs"` - Progress Progress `json:"progress,omitempty" protobuf:"bytes,4,opt,name=progress,casttype=Progress"` -} - -func (in NodeResult) Fulfilled() bool { - return in.Phase.Fulfilled() -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/utils.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/utils.go deleted file mode 100644 index c0f348df7..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/utils.go +++ /dev/null @@ -1,20 +0,0 @@ -package v1alpha1 - -import ( - "fmt" - "strconv" - "time" -) - -func ParseStringToDuration(durationString string) (time.Duration, error) { - var duration time.Duration - // If no units are attached, treat as seconds - if val, err := strconv.Atoi(durationString); err == nil { - duration = time.Duration(val) * time.Second - } else if parsed, err := time.ParseDuration(durationString); err == nil { - duration = parsed - } else { - return 0, fmt.Errorf("unable to parse %s as a duration: %w", durationString, err) - } - return duration, nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/validation_utils.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/validation_utils.go deleted file mode 100644 index 912725d49..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/validation_utils.go +++ /dev/null @@ -1,118 +0,0 @@ -package v1alpha1 - -import ( - "fmt" - "regexp" - "sort" - "strings" - - apivalidation "k8s.io/apimachinery/pkg/util/validation" -) - -const ( - workflowFieldNameFmt string = "[a-zA-Z0-9][-a-zA-Z0-9]*" - workflowFieldNameErrMsg string = "name must consist of alpha-numeric characters or '-', and must start with an alpha-numeric character" - workflowFieldMaxLength int = 128 -) - -var ( - paramOrArtifactNameRegex = regexp.MustCompile(`^[-a-zA-Z0-9_]+[-a-zA-Z0-9_]*$`) - workflowFieldNameRegex = regexp.MustCompile("^" + workflowFieldNameFmt + "$") -) - -func isValidParamOrArtifactName(p string) []string { - var errs []string - if !paramOrArtifactNameRegex.MatchString(p) { - return append(errs, "Parameter/Artifact name must consist of alpha-numeric characters, '_' or '-' e.g. my_param_1, MY-PARAM-1") - } - return errs -} - -// isValidWorkflowFieldName : workflow field name must consist of alpha-numeric characters or '-', and must start with an alpha-numeric character -func isValidWorkflowFieldName(name string) []string { - var errs []string - if len(name) > workflowFieldMaxLength { - errs = append(errs, apivalidation.MaxLenError(workflowFieldMaxLength)) - } - if !workflowFieldNameRegex.MatchString(name) { - msg := workflowFieldNameErrMsg + " (e.g. My-name1-2, 123-NAME)" - errs = append(errs, msg) - } - return errs -} - -// validateWorkflowFieldNames accepts a slice of strings and -// verifies that the Name field of the structs are: -// * unique -// * non-empty -// * matches matches our regex requirements -func validateWorkflowFieldNames(names []string, isParamOrArtifact bool) error { - nameSet := make(map[string]bool) - - for i, name := range names { - if name == "" { - return fmt.Errorf("[%d].name is required", i) - } - var errs []string - if isParamOrArtifact { - errs = isValidParamOrArtifactName(name) - } else { - errs = isValidWorkflowFieldName(name) - } - if len(errs) != 0 { - return fmt.Errorf("[%d].name: '%s' is invalid: %s", i, name, strings.Join(errs, ";")) - } - _, ok := nameSet[name] - if ok { - return fmt.Errorf("[%d].name '%s' is not unique", i, name) - } - nameSet[name] = true - } - return nil -} - -// validateNoCycles validates that a dependency graph has no cycles by doing a Depth-First Search -// depGraph is an adjacency list, where key is a node name and value is a list of its dependencies' names -func validateNoCycles(depGraph map[string][]string) error { - visited := make(map[string]bool) - var noCyclesHelper func(currentName string, cycyle []string) error - noCyclesHelper = func(currentName string, cycle []string) error { - if _, ok := visited[currentName]; ok { - return nil - } - depNames, ok := depGraph[currentName] - if !ok { - return nil - } - for _, depName := range depNames { - for _, name := range cycle { - if depName == name { - return fmt.Errorf("dependency cycle detected: %s->%s", strings.Join(cycle, "->"), name) - } - } - cycle = append(cycle, depName) - err := noCyclesHelper(depName, cycle) - if err != nil { - return err - } - cycle = cycle[0 : len(cycle)-1] - } - visited[currentName] = true - return nil - } - names := make([]string, 0) - for name := range depGraph { - names = append(names, name) - } - // sort names here to make sure the error message has consistent ordering - // so that we can verify the error message in unit tests - sort.Strings(names) - - for _, name := range names { - err := noCyclesHelper(name, []string{}) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/version_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/version_types.go deleted file mode 100644 index 6845daa16..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/version_types.go +++ /dev/null @@ -1,30 +0,0 @@ -package v1alpha1 - -import ( - "errors" - "regexp" -) - -type Version struct { - Version string `json:"version" protobuf:"bytes,1,opt,name=version"` - BuildDate string `json:"buildDate" protobuf:"bytes,2,opt,name=buildDate"` - GitCommit string `json:"gitCommit" protobuf:"bytes,3,opt,name=gitCommit"` - GitTag string `json:"gitTag" protobuf:"bytes,4,opt,name=gitTag"` - GitTreeState string `json:"gitTreeState" protobuf:"bytes,5,opt,name=gitTreeState"` - GoVersion string `json:"goVersion" protobuf:"bytes,6,opt,name=goVersion"` - Compiler string `json:"compiler" protobuf:"bytes,7,opt,name=compiler"` - Platform string `json:"platform" protobuf:"bytes,8,opt,name=platform"` -} - -var verRe = regexp.MustCompile(`^v(\d+)\.(\d+)\.(\d+)`) - -// BrokenDown returns the major, minor and release components -// of the version number, or error if this is not a release -// The error path is considered "normal" in a non-release build. -func (v Version) Components() (string, string, string, error) { - matches := verRe.FindStringSubmatch(v.Version) - if matches == nil || matches[1] == "0" { - return ``, ``, ``, errors.New("Not a formal release") - } - return matches[1], matches[2], matches[3], nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_phase.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_phase.go deleted file mode 100644 index 4027b10dc..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_phase.go +++ /dev/null @@ -1,22 +0,0 @@ -package v1alpha1 - -// the workflow's phase -type WorkflowPhase string - -const ( - WorkflowUnknown WorkflowPhase = "" - WorkflowPending WorkflowPhase = "Pending" // pending some set-up - rarely used - WorkflowRunning WorkflowPhase = "Running" // any node has started; pods might not be running yet, the workflow maybe suspended too - WorkflowSucceeded WorkflowPhase = "Succeeded" - WorkflowFailed WorkflowPhase = "Failed" // it maybe that the workflow was terminated - WorkflowError WorkflowPhase = "Error" -) - -func (p WorkflowPhase) Completed() bool { - switch p { - case WorkflowSucceeded, WorkflowFailed, WorkflowError: - return true - default: - return false - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_template_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_template_types.go deleted file mode 100644 index 1317fc18b..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_template_types.go +++ /dev/null @@ -1,62 +0,0 @@ -package v1alpha1 - -import ( - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// WorkflowTemplate is the definition of a workflow template resource -// +genclient -// +genclient:noStatus -// +kubebuilder:resource:shortName=wftmpl -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type WorkflowTemplate struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Spec WorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` -} - -type WorkflowTemplates []WorkflowTemplate - -func (w WorkflowTemplates) Len() int { - return len(w) -} - -func (w WorkflowTemplates) Less(i, j int) bool { - return strings.Compare(w[j].ObjectMeta.Name, w[i].ObjectMeta.Name) > 0 -} - -func (w WorkflowTemplates) Swap(i, j int) { - w[i], w[j] = w[j], w[i] -} - -// WorkflowTemplateList is list of WorkflowTemplate resources -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type WorkflowTemplateList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Items WorkflowTemplates `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -var _ TemplateHolder = &WorkflowTemplate{} - -// GetTemplateByName retrieves a defined template by its name -func (wftmpl *WorkflowTemplate) GetTemplateByName(name string) *Template { - for _, t := range wftmpl.Spec.Templates { - if t.Name == name { - return &t - } - } - return nil -} - -// GetResourceScope returns the template scope of workflow template. -func (wftmpl *WorkflowTemplate) GetResourceScope() ResourceScope { - return ResourceScopeNamespaced -} - -// GetWorkflowSpec returns the WorkflowSpec of workflow template. -func (wftmpl *WorkflowTemplate) GetWorkflowSpec() *WorkflowSpec { - return &wftmpl.Spec -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_types.go deleted file mode 100644 index 8d16c9fc3..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_types.go +++ /dev/null @@ -1,3930 +0,0 @@ -package v1alpha1 - -import ( - "encoding/json" - "fmt" - "hash/fnv" - "net/url" - "os" - "path" - "path/filepath" - "reflect" - "regexp" - "runtime" - "sort" - "strings" - "time" - - apiv1 "k8s.io/api/core/v1" - policyv1 "k8s.io/api/policy/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" - - log "github.com/sirupsen/logrus" - - argoerrs "github.com/argoproj/argo-workflows/v3/errors" - "github.com/argoproj/argo-workflows/v3/util/slice" -) - -// TemplateType is the type of a template -type TemplateType string - -// Possible template types -const ( - TemplateTypeContainer TemplateType = "Container" - TemplateTypeContainerSet TemplateType = "ContainerSet" - TemplateTypeSteps TemplateType = "Steps" - TemplateTypeScript TemplateType = "Script" - TemplateTypeResource TemplateType = "Resource" - TemplateTypeDAG TemplateType = "DAG" - TemplateTypeSuspend TemplateType = "Suspend" - TemplateTypeData TemplateType = "Data" - TemplateTypeHTTP TemplateType = "HTTP" - TemplateTypePlugin TemplateType = "Plugin" - TemplateTypeUnknown TemplateType = "Unknown" -) - -// NodePhase is a label for the condition of a node at the current time. -type NodePhase string - -// Workflow and node statuses -const ( - // Node is waiting to run - NodePending NodePhase = "Pending" - // Node is running - NodeRunning NodePhase = "Running" - // Node finished with no errors - NodeSucceeded NodePhase = "Succeeded" - // Node was skipped - NodeSkipped NodePhase = "Skipped" - // Node or child of node exited with non-0 code - NodeFailed NodePhase = "Failed" - // Node had an error other than a non 0 exit code - NodeError NodePhase = "Error" - // Node was omitted because its `depends` condition was not met (only relevant in DAGs) - NodeOmitted NodePhase = "Omitted" -) - -// NodeType is the type of a node -type NodeType string - -// Node types -const ( - NodeTypePod NodeType = "Pod" - NodeTypeContainer NodeType = "Container" - NodeTypeSteps NodeType = "Steps" - NodeTypeStepGroup NodeType = "StepGroup" - NodeTypeDAG NodeType = "DAG" - NodeTypeTaskGroup NodeType = "TaskGroup" - NodeTypeRetry NodeType = "Retry" - NodeTypeSkipped NodeType = "Skipped" - NodeTypeSuspend NodeType = "Suspend" - NodeTypeHTTP NodeType = "HTTP" - NodeTypePlugin NodeType = "Plugin" -) - -// ArtifactGCStrategy is the strategy when to delete artifacts for GC. -type ArtifactGCStrategy string - -// ArtifactGCStrategy -const ( - ArtifactGCOnWorkflowCompletion ArtifactGCStrategy = "OnWorkflowCompletion" - ArtifactGCOnWorkflowDeletion ArtifactGCStrategy = "OnWorkflowDeletion" - ArtifactGCNever ArtifactGCStrategy = "Never" - ArtifactGCStrategyUndefined ArtifactGCStrategy = "" -) - -var AnyArtifactGCStrategy = map[ArtifactGCStrategy]bool{ - ArtifactGCOnWorkflowCompletion: true, - ArtifactGCOnWorkflowDeletion: true, -} - -// PodGCStrategy is the strategy when to delete completed pods for GC. -type PodGCStrategy string - -func (s PodGCStrategy) IsValid() bool { - switch s { - case PodGCOnPodNone, - PodGCOnPodCompletion, - PodGCOnPodSuccess, - PodGCOnWorkflowCompletion, - PodGCOnWorkflowSuccess: - return true - } - return false -} - -// PodGCStrategy -const ( - PodGCOnPodNone PodGCStrategy = "" - PodGCOnPodCompletion PodGCStrategy = "OnPodCompletion" - PodGCOnPodSuccess PodGCStrategy = "OnPodSuccess" - PodGCOnWorkflowCompletion PodGCStrategy = "OnWorkflowCompletion" - PodGCOnWorkflowSuccess PodGCStrategy = "OnWorkflowSuccess" -) - -// VolumeClaimGCStrategy is the strategy to use when deleting volumes from completed workflows -type VolumeClaimGCStrategy string - -const ( - VolumeClaimGCOnCompletion VolumeClaimGCStrategy = "OnWorkflowCompletion" - VolumeClaimGCOnSuccess VolumeClaimGCStrategy = "OnWorkflowSuccess" -) - -type HoldingNameVersion int - -const ( - HoldingNameV1 HoldingNameVersion = 1 - HoldingNameV2 HoldingNameVersion = 2 -) - -// Workflow is the definition of a workflow resource -// +genclient -// +genclient:noStatus -// +kubebuilder:resource:shortName=wf -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Status of the workflow" -// +kubebuilder:printcolumn:name="Age",type="date",format="date-time",JSONPath=".status.startedAt",description="When the workflow was started" -// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Human readable message indicating details about why the workflow is in this condition." -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type Workflow struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Spec WorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec "` - Status WorkflowStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// Workflows is a sort interface which sorts running jobs earlier before considering FinishedAt -type Workflows []Workflow - -func (w Workflows) Len() int { return len(w) } -func (w Workflows) Swap(i, j int) { w[i], w[j] = w[j], w[i] } -func (w Workflows) Less(i, j int) bool { - iStart := w[i].ObjectMeta.CreationTimestamp - iFinish := w[i].Status.FinishedAt - jStart := w[j].ObjectMeta.CreationTimestamp - jFinish := w[j].Status.FinishedAt - if iFinish.IsZero() && jFinish.IsZero() { - return !iStart.Before(&jStart) - } - if iFinish.IsZero() && !jFinish.IsZero() { - return true - } - if !iFinish.IsZero() && jFinish.IsZero() { - return false - } - return jFinish.Before(&iFinish) -} - -type WorkflowPredicate = func(wf Workflow) bool - -func (w Workflows) Filter(predicate WorkflowPredicate) Workflows { - var out Workflows - for _, wf := range w { - if predicate(wf) { - out = append(out, wf) - } - } - return out -} - -// GetTTLStrategy return TTLStrategy based on Order of precedence: -// 1. Workflow, 2. WorkflowTemplate, 3. Workflowdefault -func (w *Workflow) GetTTLStrategy() *TTLStrategy { - var ttlStrategy *TTLStrategy - // TTLStrategy from WorkflowTemplate - if w.Status.StoredWorkflowSpec != nil && w.Status.StoredWorkflowSpec.GetTTLStrategy() != nil { - ttlStrategy = w.Status.StoredWorkflowSpec.GetTTLStrategy() - } - // TTLStrategy from Workflow - if w.Spec.GetTTLStrategy() != nil { - ttlStrategy = w.Spec.GetTTLStrategy() - } - return ttlStrategy -} - -func (w *Workflow) GetExecSpec() *WorkflowSpec { - if w.Status.StoredWorkflowSpec != nil { - return w.Status.StoredWorkflowSpec - } - return &w.Spec -} - -// return the ultimate ArtifactGCStrategy for the Artifact -// (defined on the Workflow level but can be overridden on the Artifact level) -func (w *Workflow) GetArtifactGCStrategy(a *Artifact) ArtifactGCStrategy { - artifactStrategy := a.GetArtifactGC().GetStrategy() - wfStrategy := w.Spec.GetArtifactGC().GetStrategy() - strategy := wfStrategy - if artifactStrategy != ArtifactGCStrategyUndefined { - strategy = artifactStrategy - } - if strategy == ArtifactGCStrategyUndefined { - return ArtifactGCNever - } - return strategy -} - -var ( - WorkflowCreatedAfter = func(t time.Time) WorkflowPredicate { - return func(wf Workflow) bool { - return wf.ObjectMeta.CreationTimestamp.After(t) - } - } - WorkflowFinishedBefore = func(t time.Time) WorkflowPredicate { - return func(wf Workflow) bool { - return !wf.Status.FinishedAt.IsZero() && wf.Status.FinishedAt.Time.Before(t) - } - } - WorkflowRanBetween = func(startTime time.Time, endTime time.Time) WorkflowPredicate { - return func(wf Workflow) bool { - return wf.ObjectMeta.CreationTimestamp.After(startTime) && !wf.Status.FinishedAt.IsZero() && wf.Status.FinishedAt.Time.Before(endTime) - } - } -) - -// WorkflowList is list of Workflow resources -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type WorkflowList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Items Workflows `json:"items" protobuf:"bytes,2,opt,name=items"` -} - -var _ TemplateHolder = &Workflow{} - -// TTLStrategy is the strategy for the time to live depending on if the workflow succeeded or failed -type TTLStrategy struct { - // SecondsAfterCompletion is the number of seconds to live after completion - SecondsAfterCompletion *int32 `json:"secondsAfterCompletion,omitempty" protobuf:"bytes,1,opt,name=secondsAfterCompletion"` - // SecondsAfterSuccess is the number of seconds to live after success - SecondsAfterSuccess *int32 `json:"secondsAfterSuccess,omitempty" protobuf:"bytes,2,opt,name=secondsAfterSuccess"` - // SecondsAfterFailure is the number of seconds to live after failure - SecondsAfterFailure *int32 `json:"secondsAfterFailure,omitempty" protobuf:"bytes,3,opt,name=secondsAfterFailure"` -} - -// WorkflowSpec is the specification of a Workflow. -type WorkflowSpec struct { - // Templates is a list of workflow templates used in a workflow - // +patchStrategy=merge - // +patchMergeKey=name - Templates []Template `json:"templates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,opt,name=templates"` - - // Entrypoint is a template reference to the starting point of the workflow. - Entrypoint string `json:"entrypoint,omitempty" protobuf:"bytes,2,opt,name=entrypoint"` - - // Arguments contain the parameters and artifacts sent to the workflow entrypoint - // Parameters are referencable globally using the 'workflow' variable prefix. - // e.g. {{workflow.parameters.myparam}} - Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"` - - // ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as. - ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,4,opt,name=serviceAccountName"` - - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. - // ServiceAccountName of ExecutorConfig must be specified if this value is false. - AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,28,opt,name=automountServiceAccountToken"` - - // Executor holds configurations of executor containers of the workflow. - Executor *ExecutorConfig `json:"executor,omitempty" protobuf:"bytes,29,opt,name=executor"` - - // Volumes is a list of volumes that can be mounted by containers in a workflow. - // +patchStrategy=merge - // +patchMergeKey=name - Volumes []apiv1.Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,5,opt,name=volumes"` - - // VolumeClaimTemplates is a list of claims that containers are allowed to reference. - // The Workflow controller will create the claims at the beginning of the workflow - // and delete the claims upon completion of the workflow - VolumeClaimTemplates []apiv1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,6,opt,name=volumeClaimTemplates"` - - // Parallelism limits the max total parallel pods that can execute at the same time in a workflow - Parallelism *int64 `json:"parallelism,omitempty" protobuf:"bytes,7,opt,name=parallelism"` - - // ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config. - ArtifactRepositoryRef *ArtifactRepositoryRef `json:"artifactRepositoryRef,omitempty" protobuf:"bytes,8,opt,name=artifactRepositoryRef"` - - // Suspend will suspend the workflow and prevent execution of any future steps in the workflow - Suspend *bool `json:"suspend,omitempty" protobuf:"bytes,9,opt,name=suspend"` - - // NodeSelector is a selector which will result in all pods of the workflow - // to be scheduled on the selected node(s). This is able to be overridden by - // a nodeSelector specified in the template. - NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,10,opt,name=nodeSelector"` - - // Affinity sets the scheduling constraints for all pods in the workflow. - // Can be overridden by an affinity specified in the template - Affinity *apiv1.Affinity `json:"affinity,omitempty" protobuf:"bytes,11,opt,name=affinity"` - - // Tolerations to apply to workflow pods. - // +patchStrategy=merge - // +patchMergeKey=key - Tolerations []apiv1.Toleration `json:"tolerations,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,12,opt,name=tolerations"` - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod - // +patchStrategy=merge - // +patchMergeKey=name - ImagePullSecrets []apiv1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,13,opt,name=imagePullSecrets"` - - // Host networking requested for this workflow pod. Default to false. - HostNetwork *bool `json:"hostNetwork,omitempty" protobuf:"bytes,14,opt,name=hostNetwork"` - - // Set DNS policy for workflow pods. - // Defaults to "ClusterFirst". - // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - // To have DNS options set along with hostNetwork, you have to specify DNS policy - // explicitly to 'ClusterFirstWithHostNet'. - DNSPolicy *apiv1.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,15,opt,name=dnsPolicy"` - - // PodDNSConfig defines the DNS parameters of a pod in addition to - // those generated from DNSPolicy. - DNSConfig *apiv1.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,16,opt,name=dnsConfig"` - - // OnExit is a template reference which is invoked at the end of the - // workflow, irrespective of the success, failure, or error of the - // primary workflow. - OnExit string `json:"onExit,omitempty" protobuf:"bytes,17,opt,name=onExit"` - - // TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it - // Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be - // deleted after the time to live expires. If this field is unset, - // the controller config map will hold the default values. - TTLStrategy *TTLStrategy `json:"ttlStrategy,omitempty" protobuf:"bytes,30,opt,name=ttlStrategy"` - - // Optional duration in seconds relative to the workflow start time which the workflow is - // allowed to run before the controller terminates the workflow. A value of zero is used to - // terminate a Running workflow - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"bytes,19,opt,name=activeDeadlineSeconds"` - - // Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first. - Priority *int32 `json:"priority,omitempty" protobuf:"bytes,20,opt,name=priority"` - - // Set scheduler name for all pods. - // Will be overridden if container/script template's scheduler name is set. - // Default scheduler will be used if neither specified. - // +optional - SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,21,opt,name=schedulerName"` - - // PodGC describes the strategy to use when deleting completed pods - PodGC *PodGC `json:"podGC,omitempty" protobuf:"bytes,22,opt,name=podGC"` - - // PriorityClassName to apply to workflow pods. - PodPriorityClassName string `json:"podPriorityClassName,omitempty" protobuf:"bytes,23,opt,name=podPriorityClassName"` - - // Priority to apply to workflow pods. - // DEPRECATED: Use PodPriorityClassName instead. - PodPriority *int32 `json:"podPriority,omitempty" protobuf:"bytes,24,opt,name=podPriority"` - - // +patchStrategy=merge - // +patchMergeKey=ip - HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,25,opt,name=hostAliases"` - - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,26,opt,name=securityContext"` - - // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of - // container fields which are not strings (e.g. resource limits). - PodSpecPatch string `json:"podSpecPatch,omitempty" protobuf:"bytes,27,opt,name=podSpecPatch"` - - // PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. - // Controller will automatically add the selector with workflow name, if selector is empty. - // Optional: Defaults to empty. - // +optional - PodDisruptionBudget *policyv1.PodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty" protobuf:"bytes,31,opt,name=podDisruptionBudget"` - - // Metrics are a list of metrics emitted from this Workflow - Metrics *Metrics `json:"metrics,omitempty" protobuf:"bytes,32,opt,name=metrics"` - - // Shutdown will shutdown the workflow according to its ShutdownStrategy - Shutdown ShutdownStrategy `json:"shutdown,omitempty" protobuf:"bytes,33,opt,name=shutdown,casttype=ShutdownStrategy"` - - // WorkflowTemplateRef holds a reference to a WorkflowTemplate for execution - WorkflowTemplateRef *WorkflowTemplateRef `json:"workflowTemplateRef,omitempty" protobuf:"bytes,34,opt,name=workflowTemplateRef"` - - // Synchronization holds synchronization lock configuration for this Workflow - Synchronization *Synchronization `json:"synchronization,omitempty" protobuf:"bytes,35,opt,name=synchronization,casttype=Synchronization"` - - // VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows - VolumeClaimGC *VolumeClaimGC `json:"volumeClaimGC,omitempty" protobuf:"bytes,36,opt,name=volumeClaimGC,casttype=VolumeClaimGC"` - - // RetryStrategy for all templates in the workflow. - RetryStrategy *RetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,37,opt,name=retryStrategy"` - - // PodMetadata defines additional metadata that should be applied to workflow pods - PodMetadata *Metadata `json:"podMetadata,omitempty" protobuf:"bytes,38,opt,name=podMetadata"` - - // TemplateDefaults holds default template values that will apply to all templates in the Workflow, unless overridden on the template-level - TemplateDefaults *Template `json:"templateDefaults,omitempty" protobuf:"bytes,39,opt,name=templateDefaults"` - - // ArchiveLogs indicates if the container logs should be archived - ArchiveLogs *bool `json:"archiveLogs,omitempty" protobuf:"varint,40,opt,name=archiveLogs"` - - // Hooks holds the lifecycle hook which is invoked at lifecycle of - // step, irrespective of the success, failure, or error status of the primary step - Hooks LifecycleHooks `json:"hooks,omitempty" protobuf:"bytes,41,opt,name=hooks"` - - // WorkflowMetadata contains some metadata of the workflow to refer to - WorkflowMetadata *WorkflowMetadata `json:"workflowMetadata,omitempty" protobuf:"bytes,42,opt,name=workflowMetadata"` - - // ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts - // unless Artifact.ArtifactGC is specified, which overrides this) - ArtifactGC *WorkflowLevelArtifactGC `json:"artifactGC,omitempty" protobuf:"bytes,43,opt,name=artifactGC"` -} - -type LabelValueFrom struct { - Expression string `json:"expression" protobuf:"bytes,1,opt,name=expression"` -} - -type WorkflowMetadata struct { - Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,1,rep,name=labels"` - Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,2,rep,name=annotations"` - LabelsFrom map[string]LabelValueFrom `json:"labelsFrom,omitempty" protobuf:"bytes,3,rep,name=labelsFrom"` -} - -func (in *WorkflowMetadata) AsObjectMeta() *metav1.ObjectMeta { - return &metav1.ObjectMeta{Labels: in.Labels, Annotations: in.Annotations} -} - -func (wfs *WorkflowSpec) GetExitHook(args Arguments) *LifecycleHook { - if !wfs.HasExitHook() { - return nil - } - if wfs.OnExit != "" { - return &LifecycleHook{Template: wfs.OnExit, Arguments: args} - } - return wfs.Hooks.GetExitHook().WithArgs(args) -} - -func (wfs *WorkflowSpec) HasExitHook() bool { - return (wfs.Hooks != nil && wfs.Hooks.HasExitHook()) || wfs.OnExit != "" -} - -// GetVolumeClaimGC returns the VolumeClaimGC that was defined in the workflow spec. If none was provided, a default value is returned. -func (wfs WorkflowSpec) GetVolumeClaimGC() *VolumeClaimGC { - // If no volumeClaimGC strategy was provided, we default to the equivalent of "OnSuccess" - // to match the existing behavior for back-compat - if wfs.VolumeClaimGC == nil { - return &VolumeClaimGC{Strategy: VolumeClaimGCOnSuccess} - } - - return wfs.VolumeClaimGC -} - -// ArtifactGC returns the ArtifactGC that was defined in the workflow spec. If none was provided, a default value is returned. -func (wfs WorkflowSpec) GetArtifactGC() *ArtifactGC { - if wfs.ArtifactGC == nil { - return &ArtifactGC{Strategy: ArtifactGCStrategyUndefined} - } - - return &wfs.ArtifactGC.ArtifactGC -} - -func (wfs WorkflowSpec) GetTTLStrategy() *TTLStrategy { - return wfs.TTLStrategy -} - -// GetSemaphoreKeys will return list of semaphore configmap keys which are configured in the workflow -// Example key format namespace/configmapname (argo/my-config) -// Return []string -func (wf *Workflow) GetSemaphoreKeys() []string { - keyMap := make(map[string]bool) - namespace := wf.Namespace - var templates []Template - if wf.Spec.WorkflowTemplateRef == nil { - templates = wf.Spec.Templates - if wf.Spec.Synchronization != nil { - if configMapRef := wf.Spec.Synchronization.getSemaphoreConfigMapRef(); configMapRef != nil { - key := fmt.Sprintf("%s/%s", namespace, configMapRef.Name) - keyMap[key] = true - } - } - } else if wf.Status.StoredWorkflowSpec != nil { - templates = wf.Status.StoredWorkflowSpec.Templates - if wf.Status.StoredWorkflowSpec.Synchronization != nil { - if configMapRef := wf.Status.StoredWorkflowSpec.Synchronization.getSemaphoreConfigMapRef(); configMapRef != nil { - key := fmt.Sprintf("%s/%s", namespace, configMapRef.Name) - keyMap[key] = true - } - } - } - - for _, tmpl := range templates { - if tmpl.Synchronization != nil { - if configMapRef := tmpl.Synchronization.getSemaphoreConfigMapRef(); configMapRef != nil { - key := fmt.Sprintf("%s/%s", namespace, configMapRef.Name) - keyMap[key] = true - } - } - } - var semaphoreKeys []string - for key := range keyMap { - semaphoreKeys = append(semaphoreKeys, key) - } - return semaphoreKeys -} - -type ShutdownStrategy string - -const ( - ShutdownStrategyTerminate ShutdownStrategy = "Terminate" - ShutdownStrategyStop ShutdownStrategy = "Stop" - ShutdownStrategyNone ShutdownStrategy = "" -) - -func (s ShutdownStrategy) Enabled() bool { - return s != ShutdownStrategyNone -} - -func (s ShutdownStrategy) ShouldExecute(isOnExitPod bool) bool { - switch s { - case ShutdownStrategyTerminate: - return false - case ShutdownStrategyStop: - return isOnExitPod - default: - return true - } -} - -// +kubebuilder:validation:Type=array -type ParallelSteps struct { - Steps []WorkflowStep `json:"-" protobuf:"bytes,1,rep,name=steps"` -} - -// WorkflowStep is an anonymous list inside of ParallelSteps (i.e. it does not have a key), so it needs its own -// custom Unmarshaller -func (p *ParallelSteps) UnmarshalJSON(value []byte) error { - // Since we are writing a custom unmarshaller, we have to enforce the "DisallowUnknownFields" requirement manually. - - // First, get a generic representation of the contents - var candidate []map[string]interface{} - err := json.Unmarshal(value, &candidate) - if err != nil { - return err - } - - // Generate a list of all the available JSON fields of the WorkflowStep struct - availableFields := map[string]bool{} - reflectType := reflect.TypeOf(WorkflowStep{}) - for i := 0; i < reflectType.NumField(); i++ { - cleanString := strings.ReplaceAll(reflectType.Field(i).Tag.Get("json"), ",omitempty", "") - availableFields[cleanString] = true - } - - // Enforce that no unknown fields are present - for _, step := range candidate { - for key := range step { - if _, ok := availableFields[key]; !ok { - return fmt.Errorf(`json: unknown field "%s"`, key) - } - } - } - - // Finally, attempt to fully unmarshal the struct - err = json.Unmarshal(value, &p.Steps) - if err != nil { - return err - } - return nil -} - -func (p ParallelSteps) MarshalJSON() ([]byte, error) { - return json.Marshal(p.Steps) -} - -func (b ParallelSteps) OpenAPISchemaType() []string { - return []string{"array"} -} - -func (b ParallelSteps) OpenAPISchemaFormat() string { return "" } - -func (wfs *WorkflowSpec) HasPodSpecPatch() bool { - return wfs.PodSpecPatch != "" -} - -// Template is a reusable and composable unit of execution in a workflow -type Template struct { - // Name is the name of the template - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - - // Inputs describe what inputs parameters and artifacts are supplied to this template - Inputs Inputs `json:"inputs,omitempty" protobuf:"bytes,5,opt,name=inputs"` - - // Outputs describe the parameters and artifacts that this template produces - Outputs Outputs `json:"outputs,omitempty" protobuf:"bytes,6,opt,name=outputs"` - - // NodeSelector is a selector to schedule this step of the workflow to be - // run on the selected node(s). Overrides the selector set at the workflow level. - NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,opt,name=nodeSelector"` - - // Affinity sets the pod's scheduling constraints - // Overrides the affinity set at the workflow level (if any) - Affinity *apiv1.Affinity `json:"affinity,omitempty" protobuf:"bytes,8,opt,name=affinity"` - - // Metdata sets the pods's metadata, i.e. annotations and labels - Metadata Metadata `json:"metadata,omitempty" protobuf:"bytes,9,opt,name=metadata"` - - // Daemon will allow a workflow to proceed to the next step so long as the container reaches readiness - Daemon *bool `json:"daemon,omitempty" protobuf:"bytes,10,opt,name=daemon"` - - // Steps define a series of sequential/parallel workflow steps - Steps []ParallelSteps `json:"steps,omitempty" protobuf:"bytes,11,opt,name=steps"` - - // Container is the main container image to run in the pod - Container *apiv1.Container `json:"container,omitempty" protobuf:"bytes,12,opt,name=container"` - - // ContainerSet groups multiple containers within a single pod. - ContainerSet *ContainerSetTemplate `json:"containerSet,omitempty" protobuf:"bytes,40,opt,name=containerSet"` - - // Script runs a portion of code against an interpreter - Script *ScriptTemplate `json:"script,omitempty" protobuf:"bytes,13,opt,name=script"` - - // Resource template subtype which can run k8s resources - Resource *ResourceTemplate `json:"resource,omitempty" protobuf:"bytes,14,opt,name=resource"` - - // DAG template subtype which runs a DAG - DAG *DAGTemplate `json:"dag,omitempty" protobuf:"bytes,15,opt,name=dag"` - - // Suspend template subtype which can suspend a workflow when reaching the step - Suspend *SuspendTemplate `json:"suspend,omitempty" protobuf:"bytes,16,opt,name=suspend"` - - // Data is a data template - Data *Data `json:"data,omitempty" protobuf:"bytes,39,opt,name=data"` - - // HTTP makes a HTTP request - HTTP *HTTP `json:"http,omitempty" protobuf:"bytes,42,opt,name=http"` - - // Plugin is a plugin template - Plugin *Plugin `json:"plugin,omitempty" protobuf:"bytes,43,opt,name=plugin"` - - // Volumes is a list of volumes that can be mounted by containers in a template. - // +patchStrategy=merge - // +patchMergeKey=name - Volumes []apiv1.Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,17,opt,name=volumes"` - - // InitContainers is a list of containers which run before the main container. - // +patchStrategy=merge - // +patchMergeKey=name - InitContainers []UserContainer `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,18,opt,name=initContainers"` - - // Sidecars is a list of containers which run alongside the main container - // Sidecars are automatically killed when the main container completes - // +patchStrategy=merge - // +patchMergeKey=name - Sidecars []UserContainer `json:"sidecars,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,19,opt,name=sidecars"` - - // Location in which all files related to the step will be stored (logs, artifacts, etc...). - // Can be overridden by individual items in Outputs. If omitted, will use the default - // artifact repository location configured in the controller, appended with the - // <workflowname>/<nodename> in the key. - ArchiveLocation *ArtifactLocation `json:"archiveLocation,omitempty" protobuf:"bytes,20,opt,name=archiveLocation"` - - // Optional duration in seconds relative to the StartTime that the pod may be active on a node - // before the system actively tries to terminate the pod; value must be positive integer - // This field is only applicable to container and script templates. - ActiveDeadlineSeconds *intstr.IntOrString `json:"activeDeadlineSeconds,omitempty" protobuf:"bytes,21,opt,name=activeDeadlineSeconds"` - - // RetryStrategy describes how to retry a template when it fails - RetryStrategy *RetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,22,opt,name=retryStrategy"` - - // Parallelism limits the max total parallel pods that can execute at the same time within the - // boundaries of this template invocation. If additional steps/dag templates are invoked, the - // pods created by those templates will not be counted towards this total. - Parallelism *int64 `json:"parallelism,omitempty" protobuf:"bytes,23,opt,name=parallelism"` - - // FailFast, if specified, will fail this template if any of its child pods has failed. This is useful for when this - // template is expanded with `withItems`, etc. - FailFast *bool `json:"failFast,omitempty" protobuf:"varint,41,opt,name=failFast"` - - // Tolerations to apply to workflow pods. - // +patchStrategy=merge - // +patchMergeKey=key - Tolerations []apiv1.Toleration `json:"tolerations,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,24,opt,name=tolerations"` - - // If specified, the pod will be dispatched by specified scheduler. - // Or it will be dispatched by workflow scope scheduler if specified. - // If neither specified, the pod will be dispatched by default scheduler. - // +optional - SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,25,opt,name=schedulerName"` - - // PriorityClassName to apply to workflow pods. - PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,26,opt,name=priorityClassName"` - - // Priority to apply to workflow pods. - Priority *int32 `json:"priority,omitempty" protobuf:"bytes,27,opt,name=priority"` - - // ServiceAccountName to apply to workflow pods - ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,28,opt,name=serviceAccountName"` - - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. - // ServiceAccountName of ExecutorConfig must be specified if this value is false. - AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,32,opt,name=automountServiceAccountToken"` - - // Executor holds configurations of the executor container. - Executor *ExecutorConfig `json:"executor,omitempty" protobuf:"bytes,33,opt,name=executor"` - - // HostAliases is an optional list of hosts and IPs that will be injected into the pod spec - // +patchStrategy=merge - // +patchMergeKey=ip - HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,29,opt,name=hostAliases"` - - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,30,opt,name=securityContext"` - - // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of - // container fields which are not strings (e.g. resource limits). - PodSpecPatch string `json:"podSpecPatch,omitempty" protobuf:"bytes,31,opt,name=podSpecPatch"` - - // Metrics are a list of metrics emitted from this template - Metrics *Metrics `json:"metrics,omitempty" protobuf:"bytes,35,opt,name=metrics"` - - // Synchronization holds synchronization lock configuration for this template - Synchronization *Synchronization `json:"synchronization,omitempty" protobuf:"bytes,36,opt,name=synchronization,casttype=Synchronization"` - - // Memoize allows templates to use outputs generated from already executed templates - Memoize *Memoize `json:"memoize,omitempty" protobuf:"bytes,37,opt,name=memoize"` - - // Timeout allows to set the total node execution timeout duration counting from the node's start time. - // This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates. - Timeout string `json:"timeout,omitempty" protobuf:"bytes,38,opt,name=timeout"` -} - -// SetType will set the template object based on template type. -func (tmpl *Template) SetType(tmplType TemplateType) { - switch tmplType { - case TemplateTypeSteps: - tmpl.setTemplateObjs(tmpl.Steps, nil, nil, nil, nil, nil, nil) - case TemplateTypeDAG: - tmpl.setTemplateObjs(nil, tmpl.DAG, nil, nil, nil, nil, nil) - case TemplateTypeContainer: - tmpl.setTemplateObjs(nil, nil, tmpl.Container, nil, nil, nil, nil) - case TemplateTypeScript: - tmpl.setTemplateObjs(nil, nil, nil, tmpl.Script, nil, nil, nil) - case TemplateTypeResource: - tmpl.setTemplateObjs(nil, nil, nil, nil, tmpl.Resource, nil, nil) - case TemplateTypeData: - tmpl.setTemplateObjs(nil, nil, nil, nil, nil, tmpl.Data, nil) - case TemplateTypeSuspend: - tmpl.setTemplateObjs(nil, nil, nil, nil, nil, nil, tmpl.Suspend) - } -} - -func (tmpl *Template) setTemplateObjs(steps []ParallelSteps, dag *DAGTemplate, container *apiv1.Container, script *ScriptTemplate, resource *ResourceTemplate, data *Data, suspend *SuspendTemplate) { - tmpl.Steps = steps - tmpl.DAG = dag - tmpl.Container = container - tmpl.Script = script - tmpl.Resource = resource - tmpl.Data = data - tmpl.Suspend = suspend -} - -// GetBaseTemplate returns a base template content. -func (tmpl *Template) GetBaseTemplate() *Template { - baseTemplate := tmpl.DeepCopy() - baseTemplate.Inputs = Inputs{} - return baseTemplate -} - -func (tmpl *Template) HasPodSpecPatch() bool { - return tmpl.PodSpecPatch != "" -} - -func (tmpl *Template) GetSidecarNames() []string { - var containerNames []string - for _, s := range tmpl.Sidecars { - containerNames = append(containerNames, s.Name) - } - return containerNames -} - -func (tmpl *Template) IsFailFast() bool { - return tmpl.FailFast != nil && *tmpl.FailFast -} - -func (tmpl *Template) HasParallelism() bool { - return tmpl.Parallelism != nil && *tmpl.Parallelism > 0 -} - -func (tmpl *Template) GetOutputs() *Outputs { - if tmpl != nil { - return &tmpl.Outputs - } - return nil -} - -type Artifacts []Artifact - -func (a Artifacts) GetArtifactByName(name string) *Artifact { - for _, art := range a { - if art.Name == name { - return &art - } - } - return nil -} - -// Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another -type Inputs struct { - // Parameters are a list of parameters passed as inputs - // +patchStrategy=merge - // +patchMergeKey=name - Parameters []Parameter `json:"parameters,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,opt,name=parameters"` - - // Artifact are a list of artifacts passed as inputs - // +patchStrategy=merge - // +patchMergeKey=name - Artifacts Artifacts `json:"artifacts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,opt,name=artifacts"` -} - -func (in Inputs) IsEmpty() bool { - return len(in.Parameters) == 0 && len(in.Artifacts) == 0 -} - -// Pod metdata -type Metadata struct { - Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,1,opt,name=annotations"` - Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,2,opt,name=labels"` -} - -// Parameter indicate a passed string parameter to a service template with an optional default value -type Parameter struct { - // Name is the parameter name - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // Default is the default value to use for an input parameter if a value was not supplied - Default *AnyString `json:"default,omitempty" protobuf:"bytes,2,opt,name=default"` - - // Value is the literal value to use for the parameter. - // If specified in the context of an input parameter, the value takes precedence over any passed values - Value *AnyString `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"` - - // ValueFrom is the source for the output parameter's value - ValueFrom *ValueFrom `json:"valueFrom,omitempty" protobuf:"bytes,4,opt,name=valueFrom"` - - // GlobalName exports an output parameter to the global scope, making it available as - // '{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters - GlobalName string `json:"globalName,omitempty" protobuf:"bytes,5,opt,name=globalName"` - - // Enum holds a list of string values to choose from, for the actual value of the parameter - Enum []AnyString `json:"enum,omitempty" protobuf:"bytes,6,rep,name=enum"` - - // Description is the parameter description - Description *AnyString `json:"description,omitempty" protobuf:"bytes,7,opt,name=description"` -} - -// ValueFrom describes a location in which to obtain the value to a parameter -type ValueFrom struct { - // Path in the container to retrieve an output parameter value from in container templates - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - - // JSONPath of a resource to retrieve an output parameter value from in resource templates - JSONPath string `json:"jsonPath,omitempty" protobuf:"bytes,2,opt,name=jsonPath"` - - // JQFilter expression against the resource object in resource templates - JQFilter string `json:"jqFilter,omitempty" protobuf:"bytes,3,opt,name=jqFilter"` - - // Selector (https://github.com/expr-lang/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message` - Event string `json:"event,omitempty" protobuf:"bytes,7,opt,name=event"` - - // Parameter reference to a step or dag task in which to retrieve an output parameter value from - // (e.g. '{{steps.mystep.outputs.myparam}}') - Parameter string `json:"parameter,omitempty" protobuf:"bytes,4,opt,name=parameter"` - - // Supplied value to be filled in directly, either through the CLI, API, etc. - Supplied *SuppliedValueFrom `json:"supplied,omitempty" protobuf:"bytes,6,opt,name=supplied"` - - // ConfigMapKeyRef is configmap selector for input parameter configuration - ConfigMapKeyRef *apiv1.ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,9,opt,name=configMapKeyRef"` - - // Default specifies a value to be used if retrieving the value from the specified source fails - Default *AnyString `json:"default,omitempty" protobuf:"bytes,5,opt,name=default"` - - // Expression, if defined, is evaluated to specify the value for the parameter - Expression string `json:"expression,omitempty" protobuf:"bytes,8,rep,name=expression"` -} - -func (p *Parameter) HasValue() bool { - return p.Value != nil || p.Default != nil || p.ValueFrom != nil -} - -func (p *Parameter) GetValue() string { - if p.Value != nil { - return p.Value.String() - } - if p.Default != nil { - return p.Default.String() - } - return "" -} - -// SuppliedValueFrom is a placeholder for a value to be filled in directly, either through the CLI, API, etc. -type SuppliedValueFrom struct{} - -// Artifact indicates an artifact to place at a specified path -type Artifact struct { - // name of the artifact. must be unique within a template's inputs/outputs. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // Path is the container path to the artifact - Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` - - // mode bits to use on this file, must be a value between 0 and 0777 - // set when loading input artifacts. - Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"` - - // From allows an artifact to reference an artifact from a previous step - From string `json:"from,omitempty" protobuf:"bytes,4,opt,name=from"` - - // ArtifactLocation contains the location of the artifact - ArtifactLocation `json:",inline" protobuf:"bytes,5,opt,name=artifactLocation"` - - // GlobalName exports an output artifact to the global scope, making it available as - // '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts - GlobalName string `json:"globalName,omitempty" protobuf:"bytes,6,opt,name=globalName"` - - // Archive controls how the artifact will be saved to the artifact repository. - Archive *ArchiveStrategy `json:"archive,omitempty" protobuf:"bytes,7,opt,name=archive"` - - // Make Artifacts optional, if Artifacts doesn't generate or exist - Optional bool `json:"optional,omitempty" protobuf:"varint,8,opt,name=optional"` - - // SubPath allows an artifact to be sourced from a subpath within the specified source - SubPath string `json:"subPath,omitempty" protobuf:"bytes,9,opt,name=subPath"` - - // If mode is set, apply the permission recursively into the artifact if it is a folder - RecurseMode bool `json:"recurseMode,omitempty" protobuf:"varint,10,opt,name=recurseMode"` - - // FromExpression, if defined, is evaluated to specify the value for the artifact - FromExpression string `json:"fromExpression,omitempty" protobuf:"bytes,11,opt,name=fromExpression"` - - // ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows - ArtifactGC *ArtifactGC `json:"artifactGC,omitempty" protobuf:"bytes,12,opt,name=artifactGC"` - - // Has this been deleted? - Deleted bool `json:"deleted,omitempty" protobuf:"varint,13,opt,name=deleted"` -} - -// ArtifactGC returns the ArtifactGC that was defined by the artifact. If none was provided, a default value is returned. -func (a *Artifact) GetArtifactGC() *ArtifactGC { - if a.ArtifactGC == nil { - return &ArtifactGC{Strategy: ArtifactGCStrategyUndefined} - } - - return a.ArtifactGC -} - -// CleanPath validates and cleans the artifact path. -func (a *Artifact) CleanPath() error { - if a.Path == "" { - return argoerrs.InternalErrorf("Artifact '%s' did not specify a path", a.Name) - } - - // Ensure that the artifact path does not use directory traversal to escape a - // "safe" sub-directory, assuming malicious user input is present. For example: - // inputs: - // artifacts: - // - name: a1 - // path: /tmp/safe/{{ inputs.parameters.user-input }} - // - // Any resolved path should always be within the /tmp/safe/ directory. - safeDir := "" - slashDotDotRe := regexp.MustCompile(fmt.Sprintf(`%c..$`, os.PathSeparator)) - if runtime.GOOS == "windows" { - // windows PathSeparator is \ and needs escaping - slashDotDotRe = regexp.MustCompile(fmt.Sprintf(`\%c..$`, os.PathSeparator)) - } - - slashDotDotSlash := fmt.Sprintf(`%c..%c`, os.PathSeparator, os.PathSeparator) - if strings.Contains(a.Path, slashDotDotSlash) { - safeDir = a.Path[:strings.Index(a.Path, slashDotDotSlash)] - } else if slashDotDotRe.FindStringIndex(a.Path) != nil { - safeDir = a.Path[:len(a.Path)-3] - } - cleaned := filepath.Clean(a.Path) - safeDirWithSlash := fmt.Sprintf(`%s%c`, safeDir, os.PathSeparator) - if len(safeDir) > 0 && (!strings.HasPrefix(cleaned, safeDirWithSlash) || len(cleaned) <= len(safeDirWithSlash)) { - return argoerrs.InternalErrorf("Artifact '%s' attempted to use a path containing '..'. Directory traversal is not permitted", a.Name) - } - a.Path = cleaned - return nil -} - -// PodGC describes how to delete completed pods as they complete -type PodGC struct { - // Strategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess". If unset, does not delete Pods - Strategy PodGCStrategy `json:"strategy,omitempty" protobuf:"bytes,1,opt,name=strategy,casttype=PodGCStrategy"` - // LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue. - LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,2,opt,name=labelSelector"` - // DeleteDelayDuration specifies the duration before pods in the GC queue get deleted. - DeleteDelayDuration string `json:"deleteDelayDuration,omitempty" protobuf:"bytes,3,opt,name=deleteDelayDuration"` -} - -// GetLabelSelector gets the label selector from podGC. -func (podGC *PodGC) GetLabelSelector() (labels.Selector, error) { - if podGC == nil { - return labels.Nothing(), nil - } - if podGC.LabelSelector == nil { - return labels.Everything(), nil - } - return metav1.LabelSelectorAsSelector(podGC.LabelSelector) -} - -func (podGC *PodGC) GetStrategy() PodGCStrategy { - if podGC != nil { - return podGC.Strategy - } - return PodGCOnPodNone -} - -func (podGC *PodGC) GetDeleteDelayDuration() (time.Duration, error) { - if podGC == nil || podGC.DeleteDelayDuration == "" { - return -1, nil // negative return means the field was omitted - } - return ParseStringToDuration(podGC.DeleteDelayDuration) -} - -// WorkflowLevelArtifactGC describes how to delete artifacts from completed Workflows - this spec is used on the Workflow level -type WorkflowLevelArtifactGC struct { - // ArtifactGC is an embedded struct - ArtifactGC `json:",inline" protobuf:"bytes,1,opt,name=artifactGC"` - - // ForceFinalizerRemoval: if set to true, the finalizer will be removed in the case that Artifact GC fails - ForceFinalizerRemoval bool `json:"forceFinalizerRemoval,omitempty" protobuf:"bytes,2,opt,name=forceFinalizerRemoval"` - - // PodSpecPatch holds strategic merge patch to apply against the artgc pod spec. - PodSpecPatch string `json:"podSpecPatch,omitempty" protobuf:"bytes,3,opt,name=podSpecPatch"` -} - -// ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed -type ArtifactGC struct { - // Strategy is the strategy to use. - // +kubebuilder:validation:Enum="";OnWorkflowCompletion;OnWorkflowDeletion;Never - Strategy ArtifactGCStrategy `json:"strategy,omitempty" protobuf:"bytes,1,opt,name=strategy,casttype=ArtifactGCStategy"` - - // PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion - PodMetadata *Metadata `json:"podMetadata,omitempty" protobuf:"bytes,2,opt,name=podMetadata"` - - // ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion - ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,3,opt,name=serviceAccountName"` -} - -// GetStrategy returns the VolumeClaimGCStrategy to use for the workflow -func (agc *ArtifactGC) GetStrategy() ArtifactGCStrategy { - if agc != nil { - return agc.Strategy - } - return ArtifactGCStrategyUndefined -} - -// VolumeClaimGC describes how to delete volumes from completed Workflows -type VolumeClaimGC struct { - // Strategy is the strategy to use. One of "OnWorkflowCompletion", "OnWorkflowSuccess". Defaults to "OnWorkflowSuccess" - Strategy VolumeClaimGCStrategy `json:"strategy,omitempty" protobuf:"bytes,1,opt,name=strategy,casttype=VolumeClaimGCStrategy"` -} - -// GetStrategy returns the VolumeClaimGCStrategy to use for the workflow -func (vgc VolumeClaimGC) GetStrategy() VolumeClaimGCStrategy { - if vgc.Strategy == "" { - return VolumeClaimGCOnSuccess - } - - return vgc.Strategy -} - -// ArchiveStrategy describes how to archive files/directory when saving artifacts -type ArchiveStrategy struct { - Tar *TarStrategy `json:"tar,omitempty" protobuf:"bytes,1,opt,name=tar"` - None *NoneStrategy `json:"none,omitempty" protobuf:"bytes,2,opt,name=none"` - Zip *ZipStrategy `json:"zip,omitempty" protobuf:"bytes,3,opt,name=zip"` -} - -// TarStrategy will tar and gzip the file or directory when saving -type TarStrategy struct { - // CompressionLevel specifies the gzip compression level to use for the artifact. - // Defaults to gzip.DefaultCompression. - CompressionLevel *int32 `json:"compressionLevel,omitempty" protobuf:"varint,1,opt,name=compressionLevel"` -} - -// ZipStrategy will unzip zipped input artifacts -type ZipStrategy struct{} - -// NoneStrategy indicates to skip tar process and upload the files or directory tree as independent -// files. Note that if the artifact is a directory, the artifact driver must support the ability to -// save/load the directory appropriately. -type NoneStrategy struct{} - -type ArtifactLocationType interface { - HasLocation() bool - GetKey() (string, error) - SetKey(key string) error -} - -// ArtifactLocation describes a location for a single or multiple artifacts. -// It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). -// It is also used to describe the location of multiple artifacts such as the archive location -// of a single workflow step, which the executor will use as a default location to store its files. -type ArtifactLocation struct { - // ArchiveLogs indicates if the container logs should be archived - ArchiveLogs *bool `json:"archiveLogs,omitempty" protobuf:"varint,1,opt,name=archiveLogs"` - - // S3 contains S3 artifact location details - S3 *S3Artifact `json:"s3,omitempty" protobuf:"bytes,2,opt,name=s3"` - - // Git contains git artifact location details - Git *GitArtifact `json:"git,omitempty" protobuf:"bytes,3,opt,name=git"` - - // HTTP contains HTTP artifact location details - HTTP *HTTPArtifact `json:"http,omitempty" protobuf:"bytes,4,opt,name=http"` - - // Artifactory contains artifactory artifact location details - Artifactory *ArtifactoryArtifact `json:"artifactory,omitempty" protobuf:"bytes,5,opt,name=artifactory"` - - // HDFS contains HDFS artifact location details - HDFS *HDFSArtifact `json:"hdfs,omitempty" protobuf:"bytes,6,opt,name=hdfs"` - - // Raw contains raw artifact location details - Raw *RawArtifact `json:"raw,omitempty" protobuf:"bytes,7,opt,name=raw"` - - // OSS contains OSS artifact location details - OSS *OSSArtifact `json:"oss,omitempty" protobuf:"bytes,8,opt,name=oss"` - - // GCS contains GCS artifact location details - GCS *GCSArtifact `json:"gcs,omitempty" protobuf:"bytes,9,opt,name=gcs"` - - // Azure contains Azure Storage artifact location details - Azure *AzureArtifact `json:"azure,omitempty" protobuf:"bytes,10,opt,name=azure"` -} - -func (a *ArtifactLocation) Get() (ArtifactLocationType, error) { - if a == nil { - return nil, fmt.Errorf("key unsupported: cannot get key for artifact location, because it is invalid") - } else if a.Artifactory != nil { - return a.Artifactory, nil - } else if a.Azure != nil { - return a.Azure, nil - } else if a.Git != nil { - return a.Git, nil - } else if a.GCS != nil { - return a.GCS, nil - } else if a.HDFS != nil { - return a.HDFS, nil - } else if a.HTTP != nil { - return a.HTTP, nil - } else if a.OSS != nil { - return a.OSS, nil - } else if a.Raw != nil { - return a.Raw, nil - } else if a.S3 != nil { - return a.S3, nil - } - return nil, fmt.Errorf("You need to configure artifact storage. More information on how to do this can be found in the docs: https://argo-workflows.readthedocs.io/en/release-3.5/configure-artifact-repository/") -} - -// SetType sets the type of the artifact to type the argument. -// Any existing value is deleted. -func (a *ArtifactLocation) SetType(x ArtifactLocationType) error { - switch v := x.(type) { - case *ArtifactoryArtifact: - a.Artifactory = &ArtifactoryArtifact{} - case *AzureArtifact: - a.Azure = &AzureArtifact{} - case *GCSArtifact: - a.GCS = &GCSArtifact{} - case *HDFSArtifact: - a.HDFS = &HDFSArtifact{} - case *HTTPArtifact: - a.HTTP = &HTTPArtifact{} - case *OSSArtifact: - a.OSS = &OSSArtifact{} - case *RawArtifact: - a.Raw = &RawArtifact{} - case *S3Artifact: - a.S3 = &S3Artifact{} - default: - return fmt.Errorf("set type not supported for type: %v", reflect.TypeOf(v)) - } - return nil -} - -func (a *ArtifactLocation) HasLocationOrKey() bool { - return a.HasLocation() || a.HasKey() -} - -// HasKey returns whether or not an artifact has a key. They may or may not also HasLocation. -func (a *ArtifactLocation) HasKey() bool { - key, _ := a.GetKey() - return key != "" -} - -// set the key to a new value, use path.Join to combine items -func (a *ArtifactLocation) SetKey(key string) error { - v, err := a.Get() - if err != nil { - return err - } - return v.SetKey(key) -} - -func (a *ArtifactLocation) AppendToKey(x string) error { - key, err := a.GetKey() - if err != nil { - return err - } - return a.SetKey(path.Join(key, x)) -} - -// Relocate copies all location info from the parameter, except the key. -// But only if it does not have a location already. -func (a *ArtifactLocation) Relocate(l *ArtifactLocation) error { - if a.HasLocation() { - return nil - } - if l == nil { - return fmt.Errorf("template artifact location not set") - } - key, err := a.GetKey() - if err != nil { - return err - } - *a = *l.DeepCopy() - return a.SetKey(key) -} - -// HasLocation whether or not an artifact has a *full* location defined -// An artifact that has a location implicitly has a key (i.e. HasKey() == true). -func (a *ArtifactLocation) HasLocation() bool { - v, err := a.Get() - return err == nil && v.HasLocation() -} - -func (a *ArtifactLocation) IsArchiveLogs() bool { - return a != nil && a.ArchiveLogs != nil && *a.ArchiveLogs -} - -func (a *ArtifactLocation) GetKey() (string, error) { - v, err := a.Get() - if err != nil { - return "", err - } - return v.GetKey() -} - -// +protobuf.options.(gogoproto.goproto_stringer)=false -type ArtifactRepositoryRef struct { - // The name of the config map. Defaults to "artifact-repositories". - ConfigMap string `json:"configMap,omitempty" protobuf:"bytes,1,opt,name=configMap"` - // The config map key. Defaults to the value of the "workflows.argoproj.io/default-artifact-repository" annotation. - Key string `json:"key,omitempty" protobuf:"bytes,2,opt,name=key"` -} - -func (r *ArtifactRepositoryRef) GetConfigMapOr(configMap string) string { - if r == nil || r.ConfigMap == "" { - return configMap - } - return r.ConfigMap -} - -func (r *ArtifactRepositoryRef) GetKeyOr(key string) string { - if r == nil || r.Key == "" { - return key - } - return r.Key -} - -func (r *ArtifactRepositoryRef) String() string { - if r == nil { - return "nil" - } - return fmt.Sprintf("%s#%s", r.ConfigMap, r.Key) -} - -// +protobuf.options.(gogoproto.goproto_stringer)=false -type ArtifactRepositoryRefStatus struct { - ArtifactRepositoryRef `json:",inline" protobuf:"bytes,1,opt,name=artifactRepositoryRef"` - // The namespace of the config map. Defaults to the workflow's namespace, or the controller's namespace (if found). - Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` - // If this ref represents the default artifact repository, rather than a config map. - Default bool `json:"default,omitempty" protobuf:"varint,3,opt,name=default"` - // The repository the workflow will use. This maybe empty before v3.1. - ArtifactRepository *ArtifactRepository `json:"artifactRepository,omitempty" protobuf:"bytes,4,opt,name=artifactRepository"` -} - -func (r *ArtifactRepositoryRefStatus) String() string { - if r == nil { - return "nil" - } - if r.Default { - return "default-artifact-repository" - } - return fmt.Sprintf("%s/%s", r.Namespace, r.ArtifactRepositoryRef.String()) -} - -type ArtifactSearchQuery struct { - ArtifactGCStrategies map[ArtifactGCStrategy]bool `json:"artifactGCStrategies,omitempty" protobuf:"bytes,1,rep,name=artifactGCStrategies,castkey=ArtifactGCStrategy"` - ArtifactName string `json:"artifactName,omitempty" protobuf:"bytes,2,rep,name=artifactName"` - TemplateName string `json:"templateName,omitempty" protobuf:"bytes,3,rep,name=templateName"` - NodeId string `json:"nodeId,omitempty" protobuf:"bytes,4,rep,name=nodeId"` - Deleted *bool `json:"deleted,omitempty" protobuf:"varint,5,opt,name=deleted"` - NodeTypes map[NodeType]bool `json:"nodeTypes,omitempty" protobuf:"bytes,6,opt,name=nodeTypes"` -} - -// ArtGCStatus maintains state related to ArtifactGC -type ArtGCStatus struct { - - // have Pods been started to perform this strategy? (enables us not to re-process what we've already done) - StrategiesProcessed map[ArtifactGCStrategy]bool `json:"strategiesProcessed,omitempty" protobuf:"bytes,1,opt,name=strategiesProcessed"` - - // have completed Pods been processed? (mapped by Pod name) - // used to prevent re-processing the Status of a Pod more than once - PodsRecouped map[string]bool `json:"podsRecouped,omitempty" protobuf:"bytes,2,opt,name=podsRecouped"` - - // if this is true, we already checked to see if we need to do it and we don't - NotSpecified bool `json:"notSpecified,omitempty" protobuf:"varint,3,opt,name=notSpecified"` -} - -func (gcStatus *ArtGCStatus) SetArtifactGCStrategyProcessed(strategy ArtifactGCStrategy, processed bool) { - if gcStatus.StrategiesProcessed == nil { - gcStatus.StrategiesProcessed = make(map[ArtifactGCStrategy]bool) - } - gcStatus.StrategiesProcessed[strategy] = processed -} - -func (gcStatus *ArtGCStatus) IsArtifactGCStrategyProcessed(strategy ArtifactGCStrategy) bool { - if gcStatus.StrategiesProcessed != nil { - processed := gcStatus.StrategiesProcessed[strategy] - return processed - } - return false -} - -func (gcStatus *ArtGCStatus) SetArtifactGCPodRecouped(podName string, recouped bool) { - if gcStatus.PodsRecouped == nil { - gcStatus.PodsRecouped = make(map[string]bool) - } - gcStatus.PodsRecouped[podName] = recouped -} - -func (gcStatus *ArtGCStatus) IsArtifactGCPodRecouped(podName string) bool { - if gcStatus.PodsRecouped != nil { - recouped := gcStatus.PodsRecouped[podName] - return recouped - } - return false -} -func (gcStatus *ArtGCStatus) AllArtifactGCPodsRecouped() bool { - if gcStatus.PodsRecouped == nil { - return false - } - for _, recouped := range gcStatus.PodsRecouped { - if !recouped { - return false - } - } - return true -} - -type ArtifactSearchResult struct { - Artifact `protobuf:"bytes,1,opt,name=artifact"` - NodeID string `protobuf:"bytes,2,opt,name=nodeID"` -} - -type ArtifactSearchResults []ArtifactSearchResult - -func (asr ArtifactSearchResults) GetArtifacts() []Artifact { - artifacts := make([]Artifact, len(asr)) - for i, result := range asr { - artifacts[i] = result.Artifact - } - return artifacts -} - -func NewArtifactSearchQuery() *ArtifactSearchQuery { - var q ArtifactSearchQuery - q.ArtifactGCStrategies = make(map[ArtifactGCStrategy]bool) - return &q -} - -func (q *ArtifactSearchQuery) anyArtifactGCStrategy() bool { - for _, val := range q.ArtifactGCStrategies { - if val { - return val - } - } - return false -} - -func (w *Workflow) SearchArtifacts(q *ArtifactSearchQuery) ArtifactSearchResults { - - var results ArtifactSearchResults - - for _, n := range w.Status.Nodes { - if q.TemplateName != "" && n.TemplateName != q.TemplateName { - continue - } - if q.NodeId != "" && n.ID != q.NodeId { - continue - } - if q.NodeTypes != nil && !q.NodeTypes[n.Type] { - continue - } - for _, a := range n.GetOutputs().GetArtifacts() { - match := true - if q.anyArtifactGCStrategy() { - // artifact strategy is either based on overall Workflow ArtifactGC Strategy, or - // if it's specified on the individual artifact level that takes priority - artifactStrategy := w.GetArtifactGCStrategy(&a) - if !q.ArtifactGCStrategies[artifactStrategy] { - match = false - } - } - if q.ArtifactName != "" && a.Name != q.ArtifactName { - match = false - } - if q.Deleted != nil && a.Deleted != *q.Deleted { - match = false - } - if match { - results = append(results, ArtifactSearchResult{Artifact: a, NodeID: n.ID}) - } - } - } - return results -} - -// Outputs hold parameters, artifacts, and results from a step -type Outputs struct { - // Parameters holds the list of output parameters produced by a step - // +patchStrategy=merge - // +patchMergeKey=name - Parameters []Parameter `json:"parameters,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=parameters"` - - // Artifacts holds the list of output artifacts produced by a step - // +patchStrategy=merge - // +patchMergeKey=name - Artifacts Artifacts `json:"artifacts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=artifacts"` - - // Result holds the result (stdout) of a script template - Result *string `json:"result,omitempty" protobuf:"bytes,3,opt,name=result"` - - // ExitCode holds the exit code of a script template - ExitCode *string `json:"exitCode,omitempty" protobuf:"bytes,4,opt,name=exitCode"` -} - -func (o *Outputs) GetArtifacts() Artifacts { - if o == nil { - return nil - } - return o.Artifacts -} - -// WorkflowStep is a reference to a template to execute in a series of step -type WorkflowStep struct { - // Name of the step - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - - // Template is the name of the template to execute as the step - Template string `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` - - // Inline is the template. Template must be empty if this is declared (and vice-versa). - Inline *Template `json:"inline,omitempty" protobuf:"bytes,13,opt,name=inline"` - - // Arguments hold arguments to the template - Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"` - - // TemplateRef is the reference to the template resource to execute as the step. - TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,4,opt,name=templateRef"` - - // WithItems expands a step into multiple parallel steps from the items in the list - WithItems []Item `json:"withItems,omitempty" protobuf:"bytes,5,rep,name=withItems"` - - // WithParam expands a step into multiple parallel steps from the value in the parameter, - // which is expected to be a JSON list. - WithParam string `json:"withParam,omitempty" protobuf:"bytes,6,opt,name=withParam"` - - // WithSequence expands a step into a numeric sequence - WithSequence *Sequence `json:"withSequence,omitempty" protobuf:"bytes,7,opt,name=withSequence"` - - // When is an expression in which the step should conditionally execute - When string `json:"when,omitempty" protobuf:"bytes,8,opt,name=when"` - - // ContinueOn makes argo to proceed with the following step even if this step fails. - // Errors and Failed states can be specified - ContinueOn *ContinueOn `json:"continueOn,omitempty" protobuf:"bytes,9,opt,name=continueOn"` - - // OnExit is a template reference which is invoked at the end of the - // template, irrespective of the success, failure, or error of the - // primary template. - // DEPRECATED: Use Hooks[exit].Template instead. - OnExit string `json:"onExit,omitempty" protobuf:"bytes,11,opt,name=onExit"` - - // Hooks holds the lifecycle hook which is invoked at lifecycle of - // step, irrespective of the success, failure, or error status of the primary step - Hooks LifecycleHooks `json:"hooks,omitempty" protobuf:"bytes,12,opt,name=hooks"` -} - -func (step *WorkflowStep) GetName() string { - return step.Name -} - -func (step *WorkflowStep) IsDAGTask() bool { - return false -} -func (step *WorkflowStep) IsWorkflowStep() bool { - return true -} - -type LifecycleEvent string - -const ( - ExitLifecycleEvent = "exit" -) - -type LifecycleHooks map[LifecycleEvent]LifecycleHook - -func (lchs LifecycleHooks) GetExitHook() *LifecycleHook { - hook, ok := lchs[ExitLifecycleEvent] - if ok { - return &hook - } - return nil -} - -func (lchs LifecycleHooks) HasExitHook() bool { - return lchs.GetExitHook() != nil -} - -type LifecycleHook struct { - // Template is the name of the template to execute by the hook - Template string `json:"template,omitempty" protobuf:"bytes,1,opt,name=template"` - // Arguments hold arguments to the template - Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,2,opt,name=arguments"` - // TemplateRef is the reference to the template resource to execute by the hook - TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,3,opt,name=templateRef"` - // Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not - // be retried and the retry strategy will be ignored - Expression string `json:"expression,omitempty" protobuf:"bytes,4,opt,name=expression"` -} - -func (lch *LifecycleHook) WithArgs(args Arguments) *LifecycleHook { - lch1 := lch.DeepCopy() - if lch1.Arguments.IsEmpty() { - lch1.Arguments = args - } - return lch1 -} - -var _ TemplateReferenceHolder = &WorkflowStep{} - -func (step *WorkflowStep) HasExitHook() bool { - return (step.Hooks != nil && step.Hooks.HasExitHook()) || step.OnExit != "" -} - -func (step *WorkflowStep) GetExitHook(args Arguments) *LifecycleHook { - if !step.HasExitHook() { - return nil - } - if step.OnExit != "" { - return &LifecycleHook{Template: step.OnExit, Arguments: args} - } - return step.Hooks.GetExitHook().WithArgs(args) -} - -func (step *WorkflowStep) GetTemplate() *Template { - return step.Inline -} - -func (step *WorkflowStep) GetTemplateName() string { - return step.Template -} - -func (step *WorkflowStep) GetTemplateRef() *TemplateRef { - return step.TemplateRef -} - -func (step *WorkflowStep) ShouldExpand() bool { - return len(step.WithItems) != 0 || step.WithParam != "" || step.WithSequence != nil -} - -// Sequence expands a workflow step into numeric range -type Sequence struct { - // Count is number of elements in the sequence (default: 0). Not to be used with end - Count *intstr.IntOrString `json:"count,omitempty" protobuf:"bytes,1,opt,name=count"` - - // Number at which to start the sequence (default: 0) - Start *intstr.IntOrString `json:"start,omitempty" protobuf:"bytes,2,opt,name=start"` - - // Number at which to end the sequence (default: 0). Not to be used with Count - End *intstr.IntOrString `json:"end,omitempty" protobuf:"bytes,3,opt,name=end"` - - // Format is a printf format string to format the value in the sequence - Format string `json:"format,omitempty" protobuf:"bytes,4,opt,name=format"` -} - -// TemplateRef is a reference of template resource. -type TemplateRef struct { - // Name is the resource name of the template. - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Template is the name of referred template in the resource. - Template string `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` - // ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate). - ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"` -} - -// Synchronization holds synchronization lock configuration -type Synchronization struct { - // Semaphore holds the Semaphore configuration - Semaphore *SemaphoreRef `json:"semaphore,omitempty" protobuf:"bytes,1,opt,name=semaphore"` - // Mutex holds the Mutex lock details - Mutex *Mutex `json:"mutex,omitempty" protobuf:"bytes,2,opt,name=mutex"` -} - -func (s *Synchronization) getSemaphoreConfigMapRef() *apiv1.ConfigMapKeySelector { - if s.Semaphore != nil && s.Semaphore.ConfigMapKeyRef != nil { - return s.Semaphore.ConfigMapKeyRef - } - return nil -} - -type SynchronizationType string - -const ( - SynchronizationTypeSemaphore SynchronizationType = "Semaphore" - SynchronizationTypeMutex SynchronizationType = "Mutex" - SynchronizationTypeUnknown SynchronizationType = "Unknown" -) - -func (s *Synchronization) GetType() SynchronizationType { - if s.Semaphore != nil { - return SynchronizationTypeSemaphore - } else if s.Mutex != nil { - return SynchronizationTypeMutex - } - return SynchronizationTypeUnknown -} - -// SemaphoreRef is a reference of Semaphore -type SemaphoreRef struct { - // ConfigMapKeyRef is configmap selector for Semaphore configuration - ConfigMapKeyRef *apiv1.ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,1,opt,name=configMapKeyRef"` - // Namespace is the namespace of the configmap, default: [namespace of workflow] - Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` -} - -// Mutex holds Mutex configuration -type Mutex struct { - // name of the mutex - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Namespace is the namespace of the mutex, default: [namespace of workflow] - Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` -} - -// WorkflowTemplateRef is a reference to a WorkflowTemplate resource. -type WorkflowTemplateRef struct { - // Name is the resource name of the workflow template. - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate). - ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,2,opt,name=clusterScope"` -} - -func (ref *WorkflowTemplateRef) ToTemplateRef(template string) *TemplateRef { - return &TemplateRef{ - Name: ref.Name, - ClusterScope: ref.ClusterScope, - Template: template, - } -} - -type ArgumentsProvider interface { - GetParameterByName(name string) *Parameter - GetArtifactByName(name string) *Artifact -} - -// Arguments to a template -type Arguments struct { - // Parameters is the list of parameters to pass to the template or workflow - // +patchStrategy=merge - // +patchMergeKey=name - Parameters []Parameter `json:"parameters,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=parameters"` - - // Artifacts is the list of artifacts to pass to the template or workflow - // +patchStrategy=merge - // +patchMergeKey=name - Artifacts Artifacts `json:"artifacts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=artifacts"` -} - -func (a Arguments) IsEmpty() bool { - return len(a.Parameters) == 0 && len(a.Artifacts) == 0 -} - -var _ ArgumentsProvider = &Arguments{} - -type Nodes map[string]NodeStatus - -func (n Nodes) FindByDisplayName(name string) *NodeStatus { - return n.Find(NodeWithDisplayName(name)) -} - -func (n Nodes) FindByName(name string) *NodeStatus { - return n.Find(NodeWithName(name)) -} - -func (in Nodes) Any(f func(NodeStatus) bool) bool { - return in.Find(f) != nil -} - -func (n Nodes) Find(f func(NodeStatus) bool) *NodeStatus { - for _, i := range n { - if f(i) { - return &i - } - } - return nil -} - -// Get a NodeStatus from the hashmap of Nodes. -// Return a nil along with an error if non existent. -func (n Nodes) Get(key string) (*NodeStatus, error) { - val, ok := n[key] - if !ok { - return nil, fmt.Errorf("key was not found for %s", key) - } - return &val, nil -} - -// Check if the Nodes map has a key entry -func (n Nodes) Has(key string) bool { - _, err := n.Get(key) - return err == nil -} - -// Get the Phase of a Node -func (n Nodes) GetPhase(key string) (*NodePhase, error) { - val, err := n.Get(key) - if err != nil { - return nil, err - } - return &val.Phase, nil -} - -// Set the status of a node by key -func (n Nodes) Set(key string, status NodeStatus) { - if status.Name == "" { - log.Warnf("Name was not set for key %s", key) - } - if status.ID == "" { - log.Warnf("ID was not set for key %s", key) - } - _, ok := n[key] - if ok { - log.Tracef("Changing NodeStatus for %s to %+v", key, status) - } - n[key] = status -} - -// Delete a node from the Nodes by key -func (n Nodes) Delete(key string) { - has := n.Has(key) - if !has { - log.Warnf("Trying to delete non existent key %s", key) - return - } - delete(n, key) -} - -// Get the name of a node by key -func (n Nodes) GetName(key string) (string, error) { - val, err := n.Get(key) - if err != nil { - return "", err - } - return val.Name, nil -} -func NodeWithName(name string) func(n NodeStatus) bool { - return func(n NodeStatus) bool { return n.Name == name } -} - -func NodeWithDisplayName(name string) func(n NodeStatus) bool { - return func(n NodeStatus) bool { return n.DisplayName == name } -} - -func FailedPodNode(n NodeStatus) bool { - return n.Type == NodeTypePod && n.Phase == NodeFailed -} - -func SucceededPodNode(n NodeStatus) bool { - return n.Type == NodeTypePod && n.Phase == NodeSucceeded -} - -// Children returns the children of the parent. -func (s Nodes) Children(parentNodeId string) Nodes { - childNodes := make(Nodes) - parentNode, ok := s[parentNodeId] - if !ok { - return childNodes - } - for _, childID := range parentNode.Children { - if childNode, ok := s[childID]; ok { - childNodes[childID] = childNode - } - } - return childNodes -} - -// NestedChildrenStatus takes in a nodeID and returns all its children, this involves a tree search using DFS. -// This is needed to mark all children nodes as failed for example. -func (s Nodes) NestedChildrenStatus(parentNodeId string) ([]NodeStatus, error) { - parentNode, ok := s[parentNodeId] - if !ok { - return nil, fmt.Errorf("could not find %s in nodes when searching for nested children", parentNodeId) - } - - children := []NodeStatus{} - toexplore := []NodeStatus{parentNode} - - for len(toexplore) > 0 { - childNode := toexplore[0] - toexplore = toexplore[1:] - for _, nodeID := range childNode.Children { - toexplore = append(toexplore, s[nodeID]) - } - - if childNode.Name == parentNode.Name { - continue - } - children = append(children, childNode) - } - - return children, nil -} - -// Filter returns the subset of the nodes that match the predicate, e.g. only failed nodes -func (s Nodes) Filter(predicate func(NodeStatus) bool) Nodes { - filteredNodes := make(Nodes) - for _, node := range s { - if predicate(node) { - filteredNodes[node.ID] = node - } - } - return filteredNodes -} - -// Map maps the nodes to new values, e.g. `x.Hostname` -func (s Nodes) Map(f func(x NodeStatus) interface{}) map[string]interface{} { - values := make(map[string]interface{}) - for _, node := range s { - values[node.ID] = f(node) - } - return values -} - -// UserContainer is a container specified by a user. -type UserContainer struct { - apiv1.Container `json:",inline" protobuf:"bytes,1,opt,name=container"` - - // MirrorVolumeMounts will mount the same volumes specified in the main container - // to the container (including artifacts), at the same mountPaths. This enables - // dind daemon to partially see the same filesystem as the main container in - // order to use features such as docker volume binding - MirrorVolumeMounts *bool `json:"mirrorVolumeMounts,omitempty" protobuf:"varint,2,opt,name=mirrorVolumeMounts"` -} - -// WorkflowStatus contains overall status information about a workflow -type WorkflowStatus struct { - // Phase a simple, high-level summary of where the workflow is in its lifecycle. - // Will be "" (Unknown), "Pending", or "Running" before the workflow is completed, and "Succeeded", - // "Failed" or "Error" once the workflow has completed. - Phase WorkflowPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=WorkflowPhase"` - - // Time at which this workflow started - StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,2,opt,name=startedAt"` - - // Time at which this workflow completed - FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,3,opt,name=finishedAt"` - - // EstimatedDuration in seconds. - EstimatedDuration EstimatedDuration `json:"estimatedDuration,omitempty" protobuf:"varint,16,opt,name=estimatedDuration,casttype=EstimatedDuration"` - - // Progress to completion - Progress Progress `json:"progress,omitempty" protobuf:"bytes,17,opt,name=progress,casttype=Progress"` - - // A human readable message indicating details about why the workflow is in this condition. - Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` - - // Compressed and base64 decoded Nodes map - CompressedNodes string `json:"compressedNodes,omitempty" protobuf:"bytes,5,opt,name=compressedNodes"` - - // Nodes is a mapping between a node ID and the node's status. - Nodes Nodes `json:"nodes,omitempty" protobuf:"bytes,6,rep,name=nodes"` - - // Whether on not node status has been offloaded to a database. If exists, then Nodes and CompressedNodes will be empty. - // This will actually be populated with a hash of the offloaded data. - OffloadNodeStatusVersion string `json:"offloadNodeStatusVersion,omitempty" protobuf:"bytes,10,rep,name=offloadNodeStatusVersion"` - - // StoredTemplates is a mapping between a template ref and the node's status. - StoredTemplates map[string]Template `json:"storedTemplates,omitempty" protobuf:"bytes,9,rep,name=storedTemplates"` - - // PersistentVolumeClaims tracks all PVCs that were created as part of the workflow. - // The contents of this list are drained at the end of the workflow. - PersistentVolumeClaims []apiv1.Volume `json:"persistentVolumeClaims,omitempty" protobuf:"bytes,7,rep,name=persistentVolumeClaims"` - - // Outputs captures output values and artifact locations produced by the workflow via global outputs - Outputs *Outputs `json:"outputs,omitempty" protobuf:"bytes,8,opt,name=outputs"` - - // Conditions is a list of conditions the Workflow may have - Conditions Conditions `json:"conditions,omitempty" protobuf:"bytes,13,rep,name=conditions"` - - // ResourcesDuration is the total for the workflow - ResourcesDuration ResourcesDuration `json:"resourcesDuration,omitempty" protobuf:"bytes,12,opt,name=resourcesDuration"` - - // StoredWorkflowSpec stores the WorkflowTemplate spec for future execution. - StoredWorkflowSpec *WorkflowSpec `json:"storedWorkflowTemplateSpec,omitempty" protobuf:"bytes,14,opt,name=storedWorkflowTemplateSpec"` - - // Synchronization stores the status of synchronization locks - Synchronization *SynchronizationStatus `json:"synchronization,omitempty" protobuf:"bytes,15,opt,name=synchronization"` - - // ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile. - ArtifactRepositoryRef *ArtifactRepositoryRefStatus `json:"artifactRepositoryRef,omitempty" protobuf:"bytes,18,opt,name=artifactRepositoryRef"` - - // ArtifactGCStatus maintains the status of Artifact Garbage Collection - ArtifactGCStatus *ArtGCStatus `json:"artifactGCStatus,omitempty" protobuf:"bytes,19,opt,name=artifactGCStatus"` - - // TaskResultsCompletionStatus tracks task result completion status (mapped by node ID). Used to prevent premature archiving and garbage collection. - TaskResultsCompletionStatus map[string]bool `json:"taskResultsCompletionStatus,omitempty" protobuf:"bytes,20,opt,name=taskResultsCompletionStatus"` -} - -func (ws *WorkflowStatus) MarkTaskResultIncomplete(name string) { - if ws.TaskResultsCompletionStatus == nil { - ws.TaskResultsCompletionStatus = make(map[string]bool) - } - ws.TaskResultsCompletionStatus[name] = false -} - -func (ws *WorkflowStatus) MarkTaskResultComplete(name string) { - if ws.TaskResultsCompletionStatus == nil { - ws.TaskResultsCompletionStatus = make(map[string]bool) - } - ws.TaskResultsCompletionStatus[name] = true -} - -func (ws *WorkflowStatus) TaskResultsInProgress() bool { - for _, value := range ws.TaskResultsCompletionStatus { - if !value { - return true - } - } - return false -} - -func (ws *WorkflowStatus) IsTaskResultIncomplete(name string) bool { - value, found := ws.TaskResultsCompletionStatus[name] - if found { - return !value - } - return false // workflows from older versions do not have this status, so assume completed if this is missing -} - -func (ws *WorkflowStatus) IsOffloadNodeStatus() bool { - return ws.OffloadNodeStatusVersion != "" -} - -func (ws *WorkflowStatus) GetOffloadNodeStatusVersion() string { - return ws.OffloadNodeStatusVersion -} - -func (ws *WorkflowStatus) GetStoredTemplates() []Template { - var out []Template - for _, t := range ws.StoredTemplates { - out = append(out, t) - } - return out -} - -func (wf *Workflow) GetOffloadNodeStatusVersion() string { - return wf.Status.GetOffloadNodeStatusVersion() -} - -type RetryPolicy string - -const ( - RetryPolicyAlways RetryPolicy = "Always" - RetryPolicyOnFailure RetryPolicy = "OnFailure" - RetryPolicyOnError RetryPolicy = "OnError" - RetryPolicyOnTransientError RetryPolicy = "OnTransientError" -) - -// Backoff is a backoff strategy to use within retryStrategy -type Backoff struct { - // Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h") - Duration string `json:"duration,omitempty" protobuf:"varint,1,opt,name=duration"` - // Factor is a factor to multiply the base duration after each failed retry - Factor *intstr.IntOrString `json:"factor,omitempty" protobuf:"varint,2,opt,name=factor"` - // MaxDuration is the maximum amount of time allowed for a workflow in the backoff strategy. - // It is important to note that if the workflow template includes activeDeadlineSeconds, the pod's deadline is initially set with activeDeadlineSeconds. - // However, when the workflow fails, the pod's deadline is then overridden by maxDuration. - // This ensures that the workflow does not exceed the specified maximum duration when retries are involved. - MaxDuration string `json:"maxDuration,omitempty" protobuf:"varint,3,opt,name=maxDuration"` -} - -// RetryNodeAntiAffinity is a placeholder for future expansion, only empty nodeAntiAffinity is allowed. -// In order to prevent running steps on the same host, it uses "kubernetes.io/hostname". -type RetryNodeAntiAffinity struct{} - -// RetryAffinity prevents running steps on the same host. -type RetryAffinity struct { - NodeAntiAffinity *RetryNodeAntiAffinity `json:"nodeAntiAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAntiAffinity"` -} - -// RetryStrategy provides controls on how to retry a workflow step -type RetryStrategy struct { - // Limit is the maximum number of retry attempts when retrying a container. It does not include the original - // container; the maximum number of total attempts will be `limit + 1`. - Limit *intstr.IntOrString `json:"limit,omitempty" protobuf:"varint,1,opt,name=limit"` - - // RetryPolicy is a policy of NodePhase statuses that will be retried - RetryPolicy RetryPolicy `json:"retryPolicy,omitempty" protobuf:"bytes,2,opt,name=retryPolicy,casttype=RetryPolicy"` - - // Backoff is a backoff strategy - Backoff *Backoff `json:"backoff,omitempty" protobuf:"bytes,3,opt,name=backoff,casttype=Backoff"` - - // Affinity prevents running workflow's step on the same host - Affinity *RetryAffinity `json:"affinity,omitempty" protobuf:"bytes,4,opt,name=affinity"` - - // Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not - // be retried and the retry strategy will be ignored - Expression string `json:"expression,omitempty" protobuf:"bytes,5,opt,name=expression"` -} - -// RetryPolicyActual gets the active retry policy for a strategy. -// If the policy is explicit, use that. -// If an expression is given, use a policy of Always so the -// expression is all that controls the retry for 'least surprise'. -// Otherwise, if neither is given, default to retry OnFailure. -func (s RetryStrategy) RetryPolicyActual() RetryPolicy { - if s.RetryPolicy != "" { - return s.RetryPolicy - } - if s.Expression == "" { - return RetryPolicyOnFailure - } else { - return RetryPolicyAlways - } -} - -// The amount of requested resource * the duration that request was used. -// This is represented as duration in seconds, so can be converted to and from -// duration (with loss of precision). -type ResourceDuration int64 - -func NewResourceDuration(d time.Duration) ResourceDuration { - return ResourceDuration(d.Seconds()) -} - -func (in ResourceDuration) Duration() time.Duration { - return time.Duration(in) * time.Second -} - -func (in ResourceDuration) String() string { - return in.Duration().String() -} - -// This contains each duration by request requested. -// e.g. 100m CPU * 1h, 1Gi memory * 1h -type ResourcesDuration map[apiv1.ResourceName]ResourceDuration - -func (in ResourcesDuration) Add(o ResourcesDuration) ResourcesDuration { - res := ResourcesDuration{} - for n, d := range in { - res[n] += d - } - for n, d := range o { - res[n] += d - } - return res -} - -func (in ResourcesDuration) String() string { - var parts []string - for n, d := range in { - parts = append(parts, fmt.Sprintf("%v*(%s %s)", d, ResourceQuantityDenominator(n).String(), n)) - } - return strings.Join(parts, ",") -} - -func (in ResourcesDuration) IsZero() bool { - return len(in) == 0 -} - -func ResourceQuantityDenominator(r apiv1.ResourceName) *resource.Quantity { - q, ok := map[apiv1.ResourceName]resource.Quantity{ - apiv1.ResourceMemory: resource.MustParse("100Mi"), - apiv1.ResourceStorage: resource.MustParse("10Gi"), - apiv1.ResourceEphemeralStorage: resource.MustParse("10Gi"), - }[r] - if !ok { - q = resource.MustParse("1") - } - return &q -} - -type Conditions []Condition - -func (cs *Conditions) UpsertCondition(condition Condition) { - for index, wfCondition := range *cs { - if wfCondition.Type == condition.Type { - (*cs)[index] = condition - return - } - } - *cs = append(*cs, condition) -} - -func (cs *Conditions) UpsertConditionMessage(condition Condition) { - for index, wfCondition := range *cs { - if wfCondition.Type == condition.Type { - (*cs)[index].Message += ", " + condition.Message - return - } - } - *cs = append(*cs, condition) -} - -func (cs *Conditions) JoinConditions(conditions *Conditions) { - for _, condition := range *conditions { - cs.UpsertCondition(condition) - } -} - -func (cs *Conditions) RemoveCondition(conditionType ConditionType) { - for index, wfCondition := range *cs { - if wfCondition.Type == conditionType { - *cs = append((*cs)[:index], (*cs)[index+1:]...) - return - } - } -} - -func (cs *Conditions) DisplayString(fmtStr string, iconMap map[ConditionType]string) string { - if len(*cs) == 0 { - return fmt.Sprintf(fmtStr, "Conditions:", "None") - } - out := fmt.Sprintf(fmtStr, "Conditions:", "") - for _, condition := range *cs { - conditionMessage := condition.Message - if conditionMessage == "" { - conditionMessage = string(condition.Status) - } - conditionPrefix := fmt.Sprintf("%s %s", iconMap[condition.Type], string(condition.Type)) - out += fmt.Sprintf(fmtStr, conditionPrefix, conditionMessage) - } - return out -} - -type ConditionType string - -const ( - // ConditionTypeCompleted is a signifies the workflow has completed - ConditionTypeCompleted ConditionType = "Completed" - // ConditionTypePodRunning any workflow pods are currently running - ConditionTypePodRunning ConditionType = "PodRunning" - // ConditionTypeSpecWarning is a warning on the current application spec - ConditionTypeSpecWarning ConditionType = "SpecWarning" - // ConditionTypeSpecWarning is an error on the current application spec - ConditionTypeSpecError ConditionType = "SpecError" - // ConditionTypeMetricsError is an error during metric emission - ConditionTypeMetricsError ConditionType = "MetricsError" - //ConditionTypeArtifactGCError is an error on artifact garbage collection - ConditionTypeArtifactGCError ConditionType = "ArtifactGCError" -) - -type Condition struct { - // Type is the type of condition - Type ConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=ConditionType"` - - // Status is the status of the condition - Status metav1.ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/apimachinery/pkg/apis/meta/v1.ConditionStatus"` - - // Message is the condition message - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` -} - -// NodeStatus contains status information about an individual node in the workflow -type NodeStatus struct { - // ID is a unique identifier of a node within the worklow - // It is implemented as a hash of the node name, which makes the ID deterministic - ID string `json:"id" protobuf:"bytes,1,opt,name=id"` - - // Name is unique name in the node tree used to generate the node ID - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - - // DisplayName is a human readable representation of the node. Unique within a template boundary - DisplayName string `json:"displayName,omitempty" protobuf:"bytes,3,opt,name=displayName"` - - // Type indicates type of node - Type NodeType `json:"type" protobuf:"bytes,4,opt,name=type,casttype=NodeType"` - - // TemplateName is the template name which this node corresponds to. - // Not applicable to virtual nodes (e.g. Retry, StepGroup) - TemplateName string `json:"templateName,omitempty" protobuf:"bytes,5,opt,name=templateName"` - - // TemplateRef is the reference to the template resource which this node corresponds to. - // Not applicable to virtual nodes (e.g. Retry, StepGroup) - TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,6,opt,name=templateRef"` - - // TemplateScope is the template scope in which the template of this node was retrieved. - TemplateScope string `json:"templateScope,omitempty" protobuf:"bytes,20,opt,name=templateScope"` - - // Phase a simple, high-level summary of where the node is in its lifecycle. - // Can be used as a state machine. - // Will be one of these values "Pending", "Running" before the node is completed, or "Succeeded", - // "Skipped", "Failed", "Error", or "Omitted" as a final state. - Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,7,opt,name=phase,casttype=NodePhase"` - - // BoundaryID indicates the node ID of the associated template root node in which this node belongs to - BoundaryID string `json:"boundaryID,omitempty" protobuf:"bytes,8,opt,name=boundaryID"` - - // A human readable message indicating details about why the node is in this condition. - Message string `json:"message,omitempty" protobuf:"bytes,9,opt,name=message"` - - // Time at which this node started - StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,10,opt,name=startedAt"` - - // Time at which this node completed - FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,11,opt,name=finishedAt"` - - // EstimatedDuration in seconds. - EstimatedDuration EstimatedDuration `json:"estimatedDuration,omitempty" protobuf:"varint,24,opt,name=estimatedDuration,casttype=EstimatedDuration"` - - // Progress to completion - Progress Progress `json:"progress,omitempty" protobuf:"bytes,26,opt,name=progress,casttype=Progress"` - - // ResourcesDuration is indicative, but not accurate, resource duration. This is populated when the nodes completes. - ResourcesDuration ResourcesDuration `json:"resourcesDuration,omitempty" protobuf:"bytes,21,opt,name=resourcesDuration"` - - // PodIP captures the IP of the pod for daemoned steps - PodIP string `json:"podIP,omitempty" protobuf:"bytes,12,opt,name=podIP"` - - // Daemoned tracks whether or not this node was daemoned and need to be terminated - Daemoned *bool `json:"daemoned,omitempty" protobuf:"varint,13,opt,name=daemoned"` - - // NodeFlag tracks some history of node. e.g.) hooked, retried, etc. - NodeFlag *NodeFlag `json:"nodeFlag,omitempty" protobuf:"bytes,27,opt,name=nodeFlag"` - - // Inputs captures input parameter values and artifact locations supplied to this template invocation - Inputs *Inputs `json:"inputs,omitempty" protobuf:"bytes,14,opt,name=inputs"` - - // Outputs captures output parameter values and artifact locations produced by this template invocation - Outputs *Outputs `json:"outputs,omitempty" protobuf:"bytes,15,opt,name=outputs"` - - // Children is a list of child node IDs - Children []string `json:"children,omitempty" protobuf:"bytes,16,rep,name=children"` - - // OutboundNodes tracks the node IDs which are considered "outbound" nodes to a template invocation. - // For every invocation of a template, there are nodes which we considered as "outbound". Essentially, - // these are last nodes in the execution sequence to run, before the template is considered completed. - // These nodes are then connected as parents to a following step. - // - // In the case of single pod steps (i.e. container, script, resource templates), this list will be nil - // since the pod itself is already considered the "outbound" node. - // In the case of DAGs, outbound nodes are the "target" tasks (tasks with no children). - // In the case of steps, outbound nodes are all the containers involved in the last step group. - // NOTE: since templates are composable, the list of outbound nodes are carried upwards when - // a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of - // a template, will be a superset of the outbound nodes of its last children. - OutboundNodes []string `json:"outboundNodes,omitempty" protobuf:"bytes,17,rep,name=outboundNodes"` - - // HostNodeName name of the Kubernetes node on which the Pod is running, if applicable - HostNodeName string `json:"hostNodeName,omitempty" protobuf:"bytes,22,rep,name=hostNodeName"` - - // MemoizationStatus holds information about cached nodes - MemoizationStatus *MemoizationStatus `json:"memoizationStatus,omitempty" protobuf:"varint,23,opt,name=memoizationStatus"` - - // SynchronizationStatus is the synchronization status of the node - SynchronizationStatus *NodeSynchronizationStatus `json:"synchronizationStatus,omitempty" protobuf:"bytes,25,opt,name=synchronizationStatus"` -} - -func (n *NodeStatus) GetName() string { - return n.Name -} - -func (n *NodeStatus) IsDAGTask() bool { - return false -} - -func (n *NodeStatus) IsWorkflowStep() bool { - return false -} - -// Fulfilled returns whether a phase is fulfilled, i.e. it completed execution or was skipped or omitted -func (phase NodePhase) Fulfilled() bool { - return phase.Completed() || phase == NodeSkipped || phase == NodeOmitted -} - -// Completed returns whether or not a phase completed. Notably, a skipped phase is not considered as having completed -func (phase NodePhase) Completed() bool { - return phase.FailedOrError() || phase == NodeSucceeded -} - -func (phase NodePhase) FailedOrError() bool { - return phase == NodeFailed || phase == NodeError -} - -// Fulfilled returns whether or not the workflow has fulfilled its execution -func (ws WorkflowStatus) Fulfilled() bool { - return ws.Phase.Completed() -} - -// Successful return whether or not the workflow has succeeded -func (ws WorkflowStatus) Successful() bool { - return ws.Phase == WorkflowSucceeded -} - -// Failed return whether or not the workflow has failed -func (ws WorkflowStatus) Failed() bool { - return ws.Phase == WorkflowFailed -} - -func (ws WorkflowStatus) StartTime() *metav1.Time { - return &ws.StartedAt -} - -func (ws WorkflowStatus) FinishTime() *metav1.Time { - return &ws.FinishedAt -} - -// Fulfilled returns whether a node is fulfilled, i.e. it finished execution, was skipped, or was dameoned successfully -func (n NodeStatus) Fulfilled() bool { - return n.Phase.Fulfilled() || n.IsDaemoned() && n.Phase != NodePending -} - -// Completed returns whether a node completed. Notably, a skipped node is not considered as having completed -func (n NodeStatus) Completed() bool { - return n.Phase.Completed() -} - -func (in *WorkflowStatus) AnyActiveSuspendNode() bool { - return in.Nodes.Any(func(node NodeStatus) bool { return node.IsActiveSuspendNode() }) -} - -func (ws *WorkflowStatus) GetDuration() time.Duration { - if ws.FinishedAt.IsZero() { - return 0 - } - return ws.FinishedAt.Time.Sub(ws.StartedAt.Time) -} - -// Pending returns whether or not the node is in pending state -func (n NodeStatus) Pending() bool { - return n.Phase == NodePending -} - -// IsDaemoned returns whether or not the node is daemoned -func (n NodeStatus) IsDaemoned() bool { - if n.Daemoned == nil || !*n.Daemoned { - return false - } - return true -} - -// IsPartOfExitHandler returns whether node is part of exit handler. -func (n *NodeStatus) IsPartOfExitHandler(nodes Nodes) bool { - currentNode := n - for !currentNode.IsExitNode() { - if currentNode.BoundaryID == "" { - return false - } - boundaryNode, err := nodes.Get(currentNode.BoundaryID) - if err != nil { - log.Panicf("was unable to obtain node for %s", currentNode.BoundaryID) - } - currentNode = boundaryNode - } - return true -} - -// IsExitNode returns whether or not node run as exit handler. -func (n NodeStatus) IsExitNode() bool { - return strings.HasSuffix(n.DisplayName, ".onExit") -} - -func (n NodeStatus) Succeeded() bool { - return n.Phase == NodeSucceeded -} - -func (n NodeStatus) FailedOrError() bool { - return n.Phase.FailedOrError() -} - -func (n NodeStatus) Omitted() bool { - return n.Type == NodeTypeSkipped && n.Phase == NodeOmitted -} - -func (n NodeStatus) StartTime() *metav1.Time { - return &n.StartedAt -} - -func (n NodeStatus) FinishTime() *metav1.Time { - return &n.FinishedAt -} - -// CanRetry returns whether the node should be retried or not. -func (n NodeStatus) CanRetry() bool { - // TODO(shri): Check if there are some 'unretryable' errors. - return n.FailedOrError() -} - -func (n NodeStatus) GetTemplateScope() (ResourceScope, string) { - // For compatibility: an empty TemplateScope is a local scope - if n.TemplateScope == "" { - return ResourceScopeLocal, "" - } - split := strings.Split(n.TemplateScope, "/") - // For compatibility: an unspecified ResourceScope in a TemplateScope is a namespaced scope - if len(split) == 1 { - return ResourceScopeNamespaced, split[0] - } - resourceScope, resourceName := split[0], split[1] - return ResourceScope(resourceScope), resourceName -} - -var _ TemplateReferenceHolder = &NodeStatus{} - -func (n *NodeStatus) GetTemplate() *Template { - return nil -} - -func (n *NodeStatus) GetTemplateName() string { - return n.TemplateName -} - -func (n *NodeStatus) GetTemplateRef() *TemplateRef { - return n.TemplateRef -} - -func (n *NodeStatus) GetOutputs() *Outputs { - if n == nil { - return nil - } - return n.Outputs -} - -// IsActiveSuspendNode returns whether this node is an active suspend node -func (n *NodeStatus) IsActiveSuspendNode() bool { - return n.Type == NodeTypeSuspend && n.Phase == NodeRunning -} - -// IsTaskSetNode returns whether this node uses the taskset -func (n *NodeStatus) IsTaskSetNode() bool { - return n.Type == NodeTypeHTTP || n.Type == NodeTypePlugin -} - -func (n NodeStatus) GetDuration() time.Duration { - if n.FinishedAt.IsZero() { - return 0 - } - return n.FinishedAt.Sub(n.StartedAt.Time) -} - -func (n NodeStatus) HasChild(childID string) bool { - for _, nodeID := range n.Children { - if childID == nodeID { - return true - } - } - return false -} - -// S3Bucket contains the access information required for interfacing with an S3 bucket -type S3Bucket struct { - // Endpoint is the hostname of the bucket endpoint - Endpoint string `json:"endpoint,omitempty" protobuf:"bytes,1,opt,name=endpoint"` - - // Bucket is the name of the bucket - Bucket string `json:"bucket,omitempty" protobuf:"bytes,2,opt,name=bucket"` - - // Region contains the optional bucket region - Region string `json:"region,omitempty" protobuf:"bytes,3,opt,name=region"` - - // Insecure will connect to the service with TLS - Insecure *bool `json:"insecure,omitempty" protobuf:"varint,4,opt,name=insecure"` - - // AccessKeySecret is the secret selector to the bucket's access key - AccessKeySecret *apiv1.SecretKeySelector `json:"accessKeySecret,omitempty" protobuf:"bytes,5,opt,name=accessKeySecret"` - - // SecretKeySecret is the secret selector to the bucket's secret key - SecretKeySecret *apiv1.SecretKeySelector `json:"secretKeySecret,omitempty" protobuf:"bytes,6,opt,name=secretKeySecret"` - - // RoleARN is the Amazon Resource Name (ARN) of the role to assume. - RoleARN string `json:"roleARN,omitempty" protobuf:"bytes,7,opt,name=roleARN"` - - // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. - UseSDKCreds bool `json:"useSDKCreds,omitempty" protobuf:"varint,8,opt,name=useSDKCreds"` - - // CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is. - CreateBucketIfNotPresent *CreateS3BucketOptions `json:"createBucketIfNotPresent,omitempty" protobuf:"bytes,9,opt,name=createBucketIfNotPresent"` - - EncryptionOptions *S3EncryptionOptions `json:"encryptionOptions,omitempty" protobuf:"bytes,10,opt,name=encryptionOptions"` - - // CASecret specifies the secret that contains the CA, used to verify the TLS connection - CASecret *apiv1.SecretKeySelector `json:"caSecret,omitempty" protobuf:"bytes,11,opt,name=caSecret"` -} - -// S3EncryptionOptions used to determine encryption options during s3 operations -type S3EncryptionOptions struct { - // KMSKeyId tells the driver to encrypt the object using the specified KMS Key. - KmsKeyId string `json:"kmsKeyId,omitempty" protobuf:"bytes,1,opt,name=kmsKeyId"` - - // KmsEncryptionContext is a json blob that contains an encryption context. See https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context for more information - KmsEncryptionContext string `json:"kmsEncryptionContext,omitempty" protobuf:"bytes,2,opt,name=kmsEncryptionContext"` - - // EnableEncryption tells the driver to encrypt objects if set to true. If kmsKeyId and serverSideCustomerKeySecret are not set, SSE-S3 will be used - EnableEncryption bool `json:"enableEncryption,omitempty" protobuf:"varint,3,opt,name=enableEncryption"` - - // ServerSideCustomerKeySecret tells the driver to encrypt the output artifacts using SSE-C with the specified secret. - ServerSideCustomerKeySecret *apiv1.SecretKeySelector `json:"serverSideCustomerKeySecret,omitempty" protobuf:"bytes,4,opt,name=serverSideCustomerKeySecret"` -} - -// CreateS3BucketOptions options used to determine automatic automatic bucket-creation process -type CreateS3BucketOptions struct { - // ObjectLocking Enable object locking - ObjectLocking bool `json:"objectLocking,omitempty" protobuf:"varint,3,opt,name=objectLocking"` -} - -// S3Artifact is the location of an S3 artifact -type S3Artifact struct { - S3Bucket `json:",inline" protobuf:"bytes,1,opt,name=s3Bucket"` - - // Key is the key in the bucket where the artifact resides - Key string `json:"key,omitempty" protobuf:"bytes,2,opt,name=key"` -} - -func (s *S3Artifact) GetKey() (string, error) { - return s.Key, nil -} - -func (s *S3Artifact) SetKey(key string) error { - s.Key = key - return nil -} - -func (s *S3Artifact) HasLocation() bool { - return s != nil && s.Endpoint != "" && s.Bucket != "" && s.Key != "" -} - -// GitArtifact is the location of an git artifact -type GitArtifact struct { - // Repo is the git repository - Repo string `json:"repo" protobuf:"bytes,1,opt,name=repo"` - - // Revision is the git commit, tag, branch to checkout - Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` - - // Depth specifies clones/fetches should be shallow and include the given - // number of commits from the branch tip - Depth *uint64 `json:"depth,omitempty" protobuf:"bytes,3,opt,name=depth"` - - // Fetch specifies a number of refs that should be fetched before checkout - Fetch []string `json:"fetch,omitempty" protobuf:"bytes,4,rep,name=fetch"` - - // UsernameSecret is the secret selector to the repository username - UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty" protobuf:"bytes,5,opt,name=usernameSecret"` - - // PasswordSecret is the secret selector to the repository password - PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty" protobuf:"bytes,6,opt,name=passwordSecret"` - - // SSHPrivateKeySecret is the secret selector to the repository ssh private key - SSHPrivateKeySecret *apiv1.SecretKeySelector `json:"sshPrivateKeySecret,omitempty" protobuf:"bytes,7,opt,name=sshPrivateKeySecret"` - - // InsecureIgnoreHostKey disables SSH strict host key checking during git clone - InsecureIgnoreHostKey bool `json:"insecureIgnoreHostKey,omitempty" protobuf:"varint,8,opt,name=insecureIgnoreHostKey"` - - // DisableSubmodules disables submodules during git clone - DisableSubmodules bool `json:"disableSubmodules,omitempty" protobuf:"varint,9,opt,name=disableSubmodules"` - - // SingleBranch enables single branch clone, using the `branch` parameter - SingleBranch bool `json:"singleBranch,omitempty" protobuf:"varint,10,opt,name=singleBranch"` - - // Branch is the branch to fetch when `SingleBranch` is enabled - Branch string `json:"branch,omitempty" protobuf:"bytes,11,opt,name=branch"` -} - -func (g *GitArtifact) HasLocation() bool { - return g != nil && g.Repo != "" -} - -func (g *GitArtifact) GetKey() (string, error) { - return "", fmt.Errorf("key unsupported: git artifact does not have a key") -} - -func (g *GitArtifact) SetKey(string) error { - return fmt.Errorf("key unsupported: cannot set key on git artifact") -} - -func (g *GitArtifact) GetDepth() int { - if g == nil || g.Depth == nil { - return 0 - } - return int(*g.Depth) -} - -// ArtifactoryAuth describes the secret selectors required for authenticating to artifactory -type ArtifactoryAuth struct { - // UsernameSecret is the secret selector to the repository username - UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty" protobuf:"bytes,1,opt,name=usernameSecret"` - - // PasswordSecret is the secret selector to the repository password - PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty" protobuf:"bytes,2,opt,name=passwordSecret"` -} - -// ArtifactoryArtifact is the location of an artifactory artifact -type ArtifactoryArtifact struct { - // URL of the artifact - URL string `json:"url" protobuf:"bytes,1,opt,name=url"` - ArtifactoryAuth `json:",inline" protobuf:"bytes,2,opt,name=artifactoryAuth"` -} - -// func (a *ArtifactoryArtifact) String() string { -// return a.URL -// } -func (a *ArtifactoryArtifact) GetKey() (string, error) { - u, err := url.Parse(a.URL) - if err != nil { - return "", err - } - return u.Path, nil -} - -func (a *ArtifactoryArtifact) SetKey(key string) error { - u, err := url.Parse(a.URL) - if err != nil { - return err - } - u.Path = key - a.URL = u.String() - return nil -} - -func (a *ArtifactoryArtifact) HasLocation() bool { - return a != nil && a.URL != "" && a.UsernameSecret != nil -} - -// AzureBlobContainer contains the access information for interfacing with an Azure Blob Storage container -type AzureBlobContainer struct { - // Endpoint is the service url associated with an account. It is most likely "https://<ACCOUNT_NAME>.blob.core.windows.net" - Endpoint string `json:"endpoint" protobuf:"bytes,1,opt,name=endpoint"` - - // Container is the container where resources will be stored - Container string `json:"container" protobuf:"bytes,2,opt,name=container"` - - // AccountKeySecret is the secret selector to the Azure Blob Storage account access key - AccountKeySecret *apiv1.SecretKeySelector `json:"accountKeySecret,omitempty" protobuf:"bytes,3,opt,name=accountKeySecret"` - - // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. - UseSDKCreds bool `json:"useSDKCreds,omitempty" protobuf:"varint,4,opt,name=useSDKCreds"` -} - -// AzureArtifact is the location of a an Azure Storage artifact -type AzureArtifact struct { - AzureBlobContainer `json:",inline" protobuf:"bytes,1,opt,name=azureBlobContainer"` - - // Blob is the blob name (i.e., path) in the container where the artifact resides - Blob string `json:"blob" protobuf:"bytes,2,opt,name=blob"` -} - -func (a *AzureArtifact) GetKey() (string, error) { - return a.Blob, nil -} - -func (a *AzureArtifact) SetKey(key string) error { - a.Blob = key - return nil -} - -func (a *AzureArtifact) HasLocation() bool { - return a != nil && a.Endpoint != "" && a.Container != "" && a.Blob != "" -} - -// HDFSArtifact is the location of an HDFS artifact -type HDFSArtifact struct { - HDFSConfig `json:",inline" protobuf:"bytes,1,opt,name=hDFSConfig"` - - // Path is a file path in HDFS - Path string `json:"path" protobuf:"bytes,2,opt,name=path"` - - // Force copies a file forcibly even if it exists - Force bool `json:"force,omitempty" protobuf:"varint,3,opt,name=force"` -} - -func (h *HDFSArtifact) GetKey() (string, error) { - return h.Path, nil -} - -func (g *HDFSArtifact) SetKey(key string) error { - g.Path = key - return nil -} - -func (h *HDFSArtifact) HasLocation() bool { - return h != nil && len(h.Addresses) > 0 -} - -// HDFSConfig is configurations for HDFS -type HDFSConfig struct { - HDFSKrbConfig `json:",inline" protobuf:"bytes,1,opt,name=hDFSKrbConfig"` - - // Addresses is accessible addresses of HDFS name nodes - Addresses []string `json:"addresses,omitempty" protobuf:"bytes,2,rep,name=addresses"` - - // HDFSUser is the user to access HDFS file system. - // It is ignored if either ccache or keytab is used. - HDFSUser string `json:"hdfsUser,omitempty" protobuf:"bytes,3,opt,name=hdfsUser"` -} - -// HDFSKrbConfig is auth configurations for Kerberos -type HDFSKrbConfig struct { - // KrbCCacheSecret is the secret selector for Kerberos ccache - // Either ccache or keytab can be set to use Kerberos. - KrbCCacheSecret *apiv1.SecretKeySelector `json:"krbCCacheSecret,omitempty" protobuf:"bytes,1,opt,name=krbCCacheSecret"` - - // KrbKeytabSecret is the secret selector for Kerberos keytab - // Either ccache or keytab can be set to use Kerberos. - KrbKeytabSecret *apiv1.SecretKeySelector `json:"krbKeytabSecret,omitempty" protobuf:"bytes,2,opt,name=krbKeytabSecret"` - - // KrbUsername is the Kerberos username used with Kerberos keytab - // It must be set if keytab is used. - KrbUsername string `json:"krbUsername,omitempty" protobuf:"bytes,3,opt,name=krbUsername"` - - // KrbRealm is the Kerberos realm used with Kerberos keytab - // It must be set if keytab is used. - KrbRealm string `json:"krbRealm,omitempty" protobuf:"bytes,4,opt,name=krbRealm"` - - // KrbConfig is the configmap selector for Kerberos config as string - // It must be set if either ccache or keytab is used. - KrbConfigConfigMap *apiv1.ConfigMapKeySelector `json:"krbConfigConfigMap,omitempty" protobuf:"bytes,5,opt,name=krbConfigConfigMap"` - - // KrbServicePrincipalName is the principal name of Kerberos service - // It must be set if either ccache or keytab is used. - KrbServicePrincipalName string `json:"krbServicePrincipalName,omitempty" protobuf:"bytes,6,opt,name=krbServicePrincipalName"` -} - -// RawArtifact allows raw string content to be placed as an artifact in a container -type RawArtifact struct { - // Data is the string contents of the artifact - Data string `json:"data" protobuf:"bytes,1,opt,name=data"` -} - -func (r *RawArtifact) GetKey() (string, error) { - return "", fmt.Errorf("key unsupported: raw artifat does not have key") -} - -func (r *RawArtifact) SetKey(string) error { - return fmt.Errorf("key unsupported: cannot set key for raw artifact") -} - -func (r *RawArtifact) HasLocation() bool { - return r != nil -} - -// Header indicate a key-value request header to be used when fetching artifacts over HTTP -type Header struct { - // Name is the header name - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // Value is the literal value to use for the header - Value string `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -// BasicAuth describes the secret selectors required for basic authentication -type BasicAuth struct { - // UsernameSecret is the secret selector to the repository username - UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty" protobuf:"bytes,1,opt,name=usernameSecret"` - - // PasswordSecret is the secret selector to the repository password - PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty" protobuf:"bytes,2,opt,name=passwordSecret"` -} - -// ClientCertAuth holds necessary information for client authentication via certificates -type ClientCertAuth struct { - ClientCertSecret *apiv1.SecretKeySelector `json:"clientCertSecret,omitempty" protobuf:"bytes,1,opt,name=clientCertSecret"` - ClientKeySecret *apiv1.SecretKeySelector `json:"clientKeySecret,omitempty" protobuf:"bytes,2,opt,name=clientKeySecret"` -} - -// OAuth2Auth holds all information for client authentication via OAuth2 tokens -type OAuth2Auth struct { - ClientIDSecret *apiv1.SecretKeySelector `json:"clientIDSecret,omitempty" protobuf:"bytes,1,opt,name=clientIDSecret"` - ClientSecretSecret *apiv1.SecretKeySelector `json:"clientSecretSecret,omitempty" protobuf:"bytes,2,opt,name=clientSecretSecret"` - TokenURLSecret *apiv1.SecretKeySelector `json:"tokenURLSecret,omitempty" protobuf:"bytes,3,opt,name=tokenURLSecret"` - Scopes []string `json:"scopes,omitempty" protobuf:"bytes,5,rep,name=scopes"` - EndpointParams []OAuth2EndpointParam `json:"endpointParams,omitempty" protobuf:"bytes,6,rep,name=endpointParams"` -} - -// EndpointParam is for requesting optional fields that should be sent in the oauth request -type OAuth2EndpointParam struct { - // Name is the header name - Key string `json:"key" protobuf:"bytes,1,opt,name=key"` - - // Value is the literal value to use for the header - Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` -} - -type HTTPAuth struct { - ClientCert ClientCertAuth `json:"clientCert,omitempty" protobuf:"bytes,1,opt,name=clientCert"` - OAuth2 OAuth2Auth `json:"oauth2,omitempty" protobuf:"bytes,2,opt,name=oauth2"` - BasicAuth BasicAuth `json:"basicAuth,omitempty" protobuf:"bytes,3,opt,name=basicAuth"` -} - -// HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container -type HTTPArtifact struct { - // URL of the artifact - URL string `json:"url" protobuf:"bytes,1,opt,name=url"` - - // Headers are an optional list of headers to send with HTTP requests for artifacts - Headers []Header `json:"headers,omitempty" protobuf:"bytes,2,rep,name=headers"` - - // Auth contains information for client authentication - Auth *HTTPAuth `json:"auth,omitempty" protobuf:"bytes,3,opt,name=auth"` -} - -func (h *HTTPArtifact) GetKey() (string, error) { - u, err := url.Parse(h.URL) - if err != nil { - return "", err - } - return u.Path, nil -} - -func (g *HTTPArtifact) SetKey(key string) error { - u, err := url.Parse(g.URL) - if err != nil { - return err - } - u.Path = key - g.URL = u.String() - return nil -} - -func (h *HTTPArtifact) HasLocation() bool { - return h != nil && h.URL != "" -} - -// GCSBucket contains the access information for interfacring with a GCS bucket -type GCSBucket struct { - // Bucket is the name of the bucket - Bucket string `json:"bucket,omitempty" protobuf:"bytes,1,opt,name=bucket"` - - // ServiceAccountKeySecret is the secret selector to the bucket's service account key - ServiceAccountKeySecret *apiv1.SecretKeySelector `json:"serviceAccountKeySecret,omitempty" protobuf:"bytes,2,opt,name=serviceAccountKeySecret"` -} - -// GCSArtifact is the location of a GCS artifact -type GCSArtifact struct { - GCSBucket `json:",inline" protobuf:"bytes,1,opt,name=gCSBucket"` - - // Key is the path in the bucket where the artifact resides - Key string `json:"key" protobuf:"bytes,2,opt,name=key"` -} - -func (g *GCSArtifact) GetKey() (string, error) { - return g.Key, nil -} - -func (g *GCSArtifact) SetKey(key string) error { - g.Key = key - return nil -} - -func (g *GCSArtifact) HasLocation() bool { - return g != nil && g.Bucket != "" && g.Key != "" -} - -// OSSBucket contains the access information required for interfacing with an Alibaba Cloud OSS bucket -type OSSBucket struct { - // Endpoint is the hostname of the bucket endpoint - Endpoint string `json:"endpoint,omitempty" protobuf:"bytes,1,opt,name=endpoint"` - - // Bucket is the name of the bucket - Bucket string `json:"bucket,omitempty" protobuf:"bytes,2,opt,name=bucket"` - - // AccessKeySecret is the secret selector to the bucket's access key - AccessKeySecret *apiv1.SecretKeySelector `json:"accessKeySecret,omitempty" protobuf:"bytes,3,opt,name=accessKeySecret"` - - // SecretKeySecret is the secret selector to the bucket's secret key - SecretKeySecret *apiv1.SecretKeySelector `json:"secretKeySecret,omitempty" protobuf:"bytes,4,opt,name=secretKeySecret"` - - // CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist - CreateBucketIfNotPresent bool `json:"createBucketIfNotPresent,omitempty" protobuf:"varint,5,opt,name=createBucketIfNotPresent"` - - // SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm - SecurityToken string `json:"securityToken,omitempty" protobuf:"bytes,6,opt,name=securityToken"` - - // LifecycleRule specifies how to manage bucket's lifecycle - LifecycleRule *OSSLifecycleRule `json:"lifecycleRule,omitempty" protobuf:"bytes,7,opt,name=lifecycleRule"` - - // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. - UseSDKCreds bool `json:"useSDKCreds,omitempty" protobuf:"varint,8,opt,name=useSDKCreds"` -} - -// OSSArtifact is the location of an Alibaba Cloud OSS artifact -type OSSArtifact struct { - OSSBucket `json:",inline" protobuf:"bytes,1,opt,name=oSSBucket"` - - // Key is the path in the bucket where the artifact resides - Key string `json:"key" protobuf:"bytes,2,opt,name=key"` -} - -// OSSLifecycleRule specifies how to manage bucket's lifecycle -type OSSLifecycleRule struct { - // MarkInfrequentAccessAfterDays is the number of days before we convert the objects in the bucket to Infrequent Access (IA) storage type - MarkInfrequentAccessAfterDays int32 `json:"markInfrequentAccessAfterDays,omitempty" protobuf:"varint,1,opt,name=markInfrequentAccessAfterDays"` - - // MarkDeletionAfterDays is the number of days before we delete objects in the bucket - MarkDeletionAfterDays int32 `json:"markDeletionAfterDays,omitempty" protobuf:"varint,2,opt,name=markDeletionAfterDays"` -} - -func (o *OSSArtifact) GetKey() (string, error) { - return o.Key, nil -} - -func (o *OSSArtifact) SetKey(key string) error { - o.Key = key - return nil -} - -func (o *OSSArtifact) HasLocation() bool { - return o != nil && o.Bucket != "" && o.Endpoint != "" && o.Key != "" -} - -// ExecutorConfig holds configurations of an executor container. -type ExecutorConfig struct { - // ServiceAccountName specifies the service account name of the executor container. - ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,1,opt,name=serviceAccountName"` -} - -// ScriptTemplate is a template subtype to enable scripting through code steps -type ScriptTemplate struct { - apiv1.Container `json:",inline" protobuf:"bytes,1,opt,name=container"` - - // Source contains the source code of the script to execute - Source string `json:"source" protobuf:"bytes,2,opt,name=source"` -} - -// ResourceTemplate is a template subtype to manipulate kubernetes resources -type ResourceTemplate struct { - // Action is the action to perform to the resource. - // Must be one of: get, create, apply, delete, replace, patch - Action string `json:"action" protobuf:"bytes,1,opt,name=action"` - - // MergeStrategy is the strategy used to merge a patch. It defaults to "strategic" - // Must be one of: strategic, merge, json - MergeStrategy string `json:"mergeStrategy,omitempty" protobuf:"bytes,2,opt,name=mergeStrategy"` - - // Manifest contains the kubernetes manifest - Manifest string `json:"manifest,omitempty" protobuf:"bytes,3,opt,name=manifest"` - - // ManifestFrom is the source for a single kubernetes manifest - ManifestFrom *ManifestFrom `json:"manifestFrom,omitempty" protobuf:"bytes,8,opt,name=manifestFrom"` - - // SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource. - SetOwnerReference bool `json:"setOwnerReference,omitempty" protobuf:"varint,4,opt,name=setOwnerReference"` - - // SuccessCondition is a label selector expression which describes the conditions - // of the k8s resource in which it is acceptable to proceed to the following step - SuccessCondition string `json:"successCondition,omitempty" protobuf:"bytes,5,opt,name=successCondition"` - - // FailureCondition is a label selector expression which describes the conditions - // of the k8s resource in which the step was considered failed - FailureCondition string `json:"failureCondition,omitempty" protobuf:"bytes,6,opt,name=failureCondition"` - - // Flags is a set of additional options passed to kubectl before submitting a resource - // I.e. to disable resource validation: - // flags: [ - // "--validate=false" # disable resource validation - // ] - Flags []string `json:"flags,omitempty" protobuf:"varint,7,opt,name=flags"` -} - -type ManifestFrom struct { - // Artifact contains the artifact to use - Artifact *Artifact `json:"artifact" protobuf:"bytes,1,opt,name=artifact"` -} - -// GetType returns the type of this template -func (tmpl *Template) GetType() TemplateType { - if tmpl.Container != nil { - return TemplateTypeContainer - } - if tmpl.ContainerSet != nil { - return TemplateTypeContainerSet - } - if tmpl.Steps != nil { - return TemplateTypeSteps - } - if tmpl.DAG != nil { - return TemplateTypeDAG - } - if tmpl.Script != nil { - return TemplateTypeScript - } - if tmpl.Resource != nil { - return TemplateTypeResource - } - if tmpl.Data != nil { - return TemplateTypeData - } - if tmpl.Suspend != nil { - return TemplateTypeSuspend - } - if tmpl.HTTP != nil { - return TemplateTypeHTTP - } - if tmpl.Plugin != nil { - return TemplateTypePlugin - } - return TemplateTypeUnknown -} - -func (tmpl *Template) GetNodeType() NodeType { - if tmpl.RetryStrategy != nil { - return NodeTypeRetry - } - switch tmpl.GetType() { - case TemplateTypeContainer, TemplateTypeContainerSet, TemplateTypeScript, TemplateTypeResource, TemplateTypeData: - return NodeTypePod - case TemplateTypeDAG: - return NodeTypeDAG - case TemplateTypeSteps: - return NodeTypeSteps - case TemplateTypeSuspend: - return NodeTypeSuspend - case TemplateTypeHTTP: - return NodeTypeHTTP - case TemplateTypePlugin: - return NodeTypePlugin - } - return "" -} - -// IsPodType returns whether or not the template is a pod type -func (tmpl *Template) IsPodType() bool { - switch tmpl.GetType() { - case TemplateTypeContainer, TemplateTypeContainerSet, TemplateTypeScript, TemplateTypeResource, TemplateTypeData: - return true - } - return false -} - -// IsLeaf returns whether or not the template is a leaf -func (tmpl *Template) IsLeaf() bool { - switch tmpl.GetType() { - case TemplateTypeContainer, TemplateTypeContainerSet, TemplateTypeScript, TemplateTypeResource, TemplateTypeData, TemplateTypeHTTP, TemplateTypePlugin: - return true - } - return false -} - -func (tmpl *Template) IsMainContainerName(containerName string) bool { - for _, c := range tmpl.GetMainContainerNames() { - if c == containerName { - return true - } - } - return false -} - -func (tmpl *Template) GetMainContainerNames() []string { - if tmpl != nil && tmpl.ContainerSet != nil { - out := make([]string, 0) - for _, c := range tmpl.ContainerSet.GetContainers() { - out = append(out, c.Name) - } - return out - } else { - return []string{"main"} - } -} - -func (tmpl *Template) HasSequencedContainers() bool { - return tmpl != nil && tmpl.ContainerSet.HasSequencedContainers() -} - -func (tmpl *Template) GetVolumeMounts() []apiv1.VolumeMount { - if tmpl.Container != nil { - return tmpl.Container.VolumeMounts - } else if tmpl.Script != nil { - return tmpl.Script.VolumeMounts - } else if tmpl.ContainerSet != nil { - return tmpl.ContainerSet.VolumeMounts - } - return nil -} - -// HasOutput returns true if the template can and will have outputs (i.e. exit code and result). -// In the case of a plugin, we assume it will have outputs because we cannot know at runtime. -func (tmpl *Template) HasOutput() bool { - return tmpl.Container != nil || tmpl.ContainerSet.HasContainerNamed("main") || tmpl.Script != nil || tmpl.Data != nil || tmpl.HTTP != nil || tmpl.Plugin != nil -} - -func (t *Template) IsDaemon() bool { - return t != nil && t.Daemon != nil && *t.Daemon -} - -// if logs should be saved as an artifact -func (tmpl *Template) SaveLogsAsArtifact() bool { - return tmpl != nil && tmpl.ArchiveLocation.IsArchiveLogs() -} - -func (t *Template) GetRetryStrategy() (wait.Backoff, error) { - return t.ContainerSet.GetRetryStrategy() -} - -func (t *Template) HasOutputs() bool { - return t != nil && t.Outputs.HasOutputs() -} - -// DAGTemplate is a template subtype for directed acyclic graph templates -type DAGTemplate struct { - // Target are one or more names of targets to execute in a DAG - Target string `json:"target,omitempty" protobuf:"bytes,1,opt,name=target"` - - // Tasks are a list of DAG tasks - // +patchStrategy=merge - // +patchMergeKey=name - Tasks []DAGTask `json:"tasks" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=tasks"` - - // This flag is for DAG logic. The DAG logic has a built-in "fail fast" feature to stop scheduling new steps, - // as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed - // before failing the DAG itself. - // The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to - // completion (either success or failure), regardless of the failed outcomes of branches in the DAG. - // More info and example about this feature at https://github.com/argoproj/argo-workflows/issues/1442 - FailFast *bool `json:"failFast,omitempty" protobuf:"varint,3,opt,name=failFast"` -} - -// DAGTask represents a node in the graph during DAG execution -type DAGTask struct { - // Name is the name of the target - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // Name of template to execute - Template string `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` - - // Inline is the template. Template must be empty if this is declared (and vice-versa). - Inline *Template `json:"inline,omitempty" protobuf:"bytes,14,opt,name=inline"` - - // Arguments are the parameter and artifact arguments to the template - Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"` - - // TemplateRef is the reference to the template resource to execute. - TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,4,opt,name=templateRef"` - - // Dependencies are name of other targets which this depends on - Dependencies []string `json:"dependencies,omitempty" protobuf:"bytes,5,rep,name=dependencies"` - - // WithItems expands a task into multiple parallel tasks from the items in the list - WithItems []Item `json:"withItems,omitempty" protobuf:"bytes,6,rep,name=withItems"` - - // WithParam expands a task into multiple parallel tasks from the value in the parameter, - // which is expected to be a JSON list. - WithParam string `json:"withParam,omitempty" protobuf:"bytes,7,opt,name=withParam"` - - // WithSequence expands a task into a numeric sequence - WithSequence *Sequence `json:"withSequence,omitempty" protobuf:"bytes,8,opt,name=withSequence"` - - // When is an expression in which the task should conditionally execute - When string `json:"when,omitempty" protobuf:"bytes,9,opt,name=when"` - - // ContinueOn makes argo to proceed with the following step even if this step fails. - // Errors and Failed states can be specified - ContinueOn *ContinueOn `json:"continueOn,omitempty" protobuf:"bytes,10,opt,name=continueOn"` - - // OnExit is a template reference which is invoked at the end of the - // template, irrespective of the success, failure, or error of the - // primary template. - // DEPRECATED: Use Hooks[exit].Template instead. - OnExit string `json:"onExit,omitempty" protobuf:"bytes,11,opt,name=onExit"` - - // Depends are name of other targets which this depends on - Depends string `json:"depends,omitempty" protobuf:"bytes,12,opt,name=depends"` - - // Hooks hold the lifecycle hook which is invoked at lifecycle of - // task, irrespective of the success, failure, or error status of the primary task - Hooks LifecycleHooks `json:"hooks,omitempty" protobuf:"bytes,13,opt,name=hooks"` -} - -func (t *DAGTask) GetName() string { - return t.Name -} - -func (t *DAGTask) IsDAGTask() bool { - return true -} - -func (t *DAGTask) IsWorkflowStep() bool { - return false -} - -var _ TemplateReferenceHolder = &DAGTask{} - -func (t *DAGTask) GetExitHook(args Arguments) *LifecycleHook { - if !t.HasExitHook() { - return nil - } - if t.OnExit != "" { - return &LifecycleHook{Template: t.OnExit, Arguments: args} - } - return t.Hooks.GetExitHook().WithArgs(args) -} - -func (t *DAGTask) HasExitHook() bool { - return (t.Hooks != nil && t.Hooks.HasExitHook()) || t.OnExit != "" -} - -func (t *DAGTask) GetTemplate() *Template { - return t.Inline -} - -func (t *DAGTask) GetTemplateName() string { - return t.Template -} - -func (t *DAGTask) GetTemplateRef() *TemplateRef { - return t.TemplateRef -} - -func (t *DAGTask) ShouldExpand() bool { - return len(t.WithItems) != 0 || t.WithParam != "" || t.WithSequence != nil -} - -// SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time -type SuspendTemplate struct { - // Duration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds. - // Could also be a Duration, e.g.: "2m", "6h" - Duration string `json:"duration,omitempty" protobuf:"bytes,1,opt,name=duration"` -} - -// GetArtifactByName returns an input artifact by its name -func (in *Inputs) GetArtifactByName(name string) *Artifact { - if in == nil { - return nil - } - return in.Artifacts.GetArtifactByName(name) -} - -// GetParameterByName returns an input parameter by its name -func (in *Inputs) GetParameterByName(name string) *Parameter { - for _, param := range in.Parameters { - if param.Name == name { - return ¶m - } - } - return nil -} - -// HasInputs returns whether or not there are any inputs -func (in *Inputs) HasInputs() bool { - if len(in.Artifacts) > 0 { - return true - } - if len(in.Parameters) > 0 { - return true - } - return false -} - -// HasOutputs returns whether or not there are any outputs -func (out *Outputs) HasOutputs() bool { - if out == nil { - return false - } - if out.Result != nil { - return true - } - if out.ExitCode != nil { - return true - } - if len(out.Artifacts) > 0 { - return true - } - if len(out.Parameters) > 0 { - return true - } - return false -} - -func (out *Outputs) GetArtifactByName(name string) *Artifact { - if out == nil { - return nil - } - return out.Artifacts.GetArtifactByName(name) -} - -func (out *Outputs) HasResult() bool { - return out != nil && out.Result != nil -} - -func (out *Outputs) HasArtifacts() bool { - return out != nil && len(out.Artifacts) > 0 -} - -func (out *Outputs) HasParameters() bool { - return out != nil && len(out.Parameters) > 0 -} - -const LogsSuffix = "-logs" - -func (out *Outputs) HasLogs() bool { - if out == nil { - return false - } - for _, a := range out.Artifacts { - if strings.HasSuffix(a.Name, LogsSuffix) { - return true - } - } - return false -} - -// GetArtifactByName retrieves an artifact by its name -func (args *Arguments) GetArtifactByName(name string) *Artifact { - return args.Artifacts.GetArtifactByName(name) -} - -// GetParameterByName retrieves a parameter by its name -func (args *Arguments) GetParameterByName(name string) *Parameter { - for _, param := range args.Parameters { - if param.Name == name { - return ¶m - } - } - return nil -} - -func (a *Artifact) GetArchive() *ArchiveStrategy { - if a == nil || a.Archive == nil { - return &ArchiveStrategy{} - } - return a.Archive -} - -// GetTemplateByName retrieves a defined template by its name -func (wf *Workflow) GetTemplateByName(name string) *Template { - for _, t := range wf.Spec.Templates { - if t.Name == name { - return &t - } - } - if wf.Status.StoredWorkflowSpec != nil { - for _, t := range wf.Status.StoredWorkflowSpec.Templates { - if t.Name == name { - return &t - } - } - } - for _, t := range wf.Status.StoredTemplates { - if t.Name == name { - return &t - } - } - return nil -} - -func (wf *Workflow) GetNodeByName(nodeName string) (*NodeStatus, error) { - nodeID := wf.NodeID(nodeName) - return wf.Status.Nodes.Get(nodeID) -} - -// GetResourceScope returns the template scope of workflow. -func (wf *Workflow) GetResourceScope() ResourceScope { - return ResourceScopeLocal -} - -// GetWorkflowSpec returns the Spec of a workflow. -func (wf *Workflow) GetWorkflowSpec() WorkflowSpec { - return wf.Spec -} - -// NodeID creates a deterministic node ID based on a node name -func (wf *Workflow) NodeID(name string) string { - if name == wf.ObjectMeta.Name { - return wf.ObjectMeta.Name - } - h := fnv.New32a() - _, _ = h.Write([]byte(name)) - return fmt.Sprintf("%s-%v", wf.ObjectMeta.Name, h.Sum32()) -} - -// GetStoredTemplate retrieves a template from stored templates of the workflow. -func (wf *Workflow) GetStoredTemplate(scope ResourceScope, resourceName string, caller TemplateReferenceHolder) *Template { - tmplID, storageNeeded := resolveTemplateReference(scope, resourceName, caller) - if !storageNeeded { - // Local templates aren't stored - return nil - } - if tmpl, ok := wf.Status.StoredTemplates[tmplID]; ok { - return tmpl.DeepCopy() - } - return nil -} - -// SetStoredTemplate stores a new template in stored templates of the workflow. -func (wf *Workflow) SetStoredTemplate(scope ResourceScope, resourceName string, caller TemplateReferenceHolder, tmpl *Template) (bool, error) { - tmplID, storageNeeded := resolveTemplateReference(scope, resourceName, caller) - if !storageNeeded { - // Don't need to store local templates - return false, nil - } - if _, ok := wf.Status.StoredTemplates[tmplID]; !ok { - if wf.Status.StoredTemplates == nil { - wf.Status.StoredTemplates = map[string]Template{} - } - wf.Status.StoredTemplates[tmplID] = *tmpl - return true, nil - } - return false, nil -} - -// SetStoredInlineTemplate stores a inline template in stored templates of the workflow. -func (wf *Workflow) SetStoredInlineTemplate(scope ResourceScope, resourceName string, tmpl *Template) error { - // Store inline templates in steps. - for _, steps := range tmpl.Steps { - for _, step := range steps.Steps { - if step.GetTemplate() != nil { - _, err := wf.SetStoredTemplate(scope, resourceName, &step, step.GetTemplate()) - if err != nil { - return err - } - } - } - } - // Store inline templates in DAG tasks. - if tmpl.DAG != nil { - for _, task := range tmpl.DAG.Tasks { - if task.GetTemplate() != nil { - _, err := wf.SetStoredTemplate(scope, resourceName, &task, task.GetTemplate()) - if err != nil { - return err - } - } - } - } - - return nil -} - -// resolveTemplateReference resolves the stored template name of a given template holder on the template scope and determines -// if it should be stored -func resolveTemplateReference(callerScope ResourceScope, resourceName string, caller TemplateReferenceHolder) (string, bool) { - tmplRef := caller.GetTemplateRef() - if tmplRef != nil { - // We are calling an external WorkflowTemplate or ClusterWorkflowTemplate. Template storage is needed - // We need to determine if we're calling a WorkflowTemplate or a ClusterWorkflowTemplate - referenceScope := ResourceScopeNamespaced - if tmplRef.ClusterScope { - referenceScope = ResourceScopeCluster - } - return fmt.Sprintf("%s/%s/%s", referenceScope, tmplRef.Name, tmplRef.Template), true - } else if callerScope != ResourceScopeLocal { - // Either a WorkflowTemplate or a ClusterWorkflowTemplate is calling a template inside itself. Template storage is needed - if caller.GetTemplate() != nil { - // If we have an inlined template here, use the inlined name - return fmt.Sprintf("%s/%s/inline/%s", callerScope, resourceName, caller.GetName()), true - } - return fmt.Sprintf("%s/%s/%s", callerScope, resourceName, caller.GetTemplateName()), true - } else { - // A Workflow is calling a template inside itself. Template storage is not needed - return "", false - } -} - -// ContinueOn defines if a workflow should continue even if a task or step fails/errors. -// It can be specified if the workflow should continue when the pod errors, fails or both. -type ContinueOn struct { - // +optional - Error bool `json:"error,omitempty" protobuf:"varint,1,opt,name=error"` - // +optional - Failed bool `json:"failed,omitempty" protobuf:"varint,2,opt,name=failed"` -} - -func continues(c *ContinueOn, phase NodePhase) bool { - if c == nil { - return false - } - if c.Error && phase == NodeError { - return true - } - if c.Failed && phase == NodeFailed { - return true - } - return false -} - -// ContinuesOn returns whether the DAG should be proceeded if the task fails or errors. -func (t *DAGTask) ContinuesOn(phase NodePhase) bool { - return continues(t.ContinueOn, phase) -} - -// ContinuesOn returns whether the StepGroup should be proceeded if the task fails or errors. -func (s *WorkflowStep) ContinuesOn(phase NodePhase) bool { - return continues(s.ContinueOn, phase) -} - -type MetricType string - -const ( - MetricTypeGauge MetricType = "Gauge" - MetricTypeHistogram MetricType = "Histogram" - MetricTypeCounter MetricType = "Counter" - MetricTypeUnknown MetricType = "Unknown" -) - -// Metrics are a list of metrics emitted from a Workflow/Template -type Metrics struct { - // Prometheus is a list of prometheus metrics to be emitted - Prometheus []*Prometheus `json:"prometheus" protobuf:"bytes,1,rep,name=prometheus"` -} - -// Prometheus is a prometheus metric to be emitted -type Prometheus struct { - // Name is the name of the metric - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Labels is a list of metric labels - Labels []*MetricLabel `json:"labels,omitempty" protobuf:"bytes,2,rep,name=labels"` - // Help is a string that describes the metric - Help string `json:"help" protobuf:"bytes,3,opt,name=help"` - // When is a conditional statement that decides when to emit the metric - When string `json:"when,omitempty" protobuf:"bytes,4,opt,name=when"` - // Gauge is a gauge metric - Gauge *Gauge `json:"gauge,omitempty" protobuf:"bytes,5,opt,name=gauge"` - // Histogram is a histogram metric - Histogram *Histogram `json:"histogram,omitempty" protobuf:"bytes,6,opt,name=histogram"` - // Counter is a counter metric - Counter *Counter `json:"counter,omitempty" protobuf:"bytes,7,opt,name=counter"` -} - -func (p *Prometheus) GetMetricLabels() map[string]string { - labels := make(map[string]string) - for _, label := range p.Labels { - labels[label.Key] = label.Value - } - return labels -} - -func (p *Prometheus) GetMetricType() MetricType { - if p.Gauge != nil { - return MetricTypeGauge - } - if p.Histogram != nil { - return MetricTypeHistogram - } - if p.Counter != nil { - return MetricTypeCounter - } - return MetricTypeUnknown -} - -func (p *Prometheus) GetValueString() string { - switch p.GetMetricType() { - case MetricTypeGauge: - return p.Gauge.Value - case MetricTypeCounter: - return p.Counter.Value - case MetricTypeHistogram: - return p.Histogram.Value - default: - return "" - } -} - -func (p *Prometheus) SetValueString(val string) { - switch p.GetMetricType() { - case MetricTypeGauge: - p.Gauge.Value = val - case MetricTypeCounter: - p.Counter.Value = val - case MetricTypeHistogram: - p.Histogram.Value = val - } -} - -func (p *Prometheus) GetDesc() string { - // This serves as a hash for the metric - // TODO: Make sure this is what we want to use as the hash - labels := p.GetMetricLabels() - desc := p.Name + "{" - for _, key := range sortedMapStringStringKeys(labels) { - desc += key + "=" + labels[key] + "," - } - if p.Histogram != nil { - sortedBuckets := p.Histogram.GetBuckets() - sort.Float64s(sortedBuckets) - for _, bucket := range sortedBuckets { - desc += "bucket=" + fmt.Sprint(bucket) + "," - } - } - desc += "}" - return desc -} - -func sortedMapStringStringKeys(in map[string]string) []string { - var stringList []string - for key := range in { - stringList = append(stringList, key) - } - sort.Strings(stringList) - return stringList -} - -func (p *Prometheus) IsRealtime() bool { - return p.GetMetricType() == MetricTypeGauge && p.Gauge.Realtime != nil && *p.Gauge.Realtime -} - -// MetricLabel is a single label for a prometheus metric -type MetricLabel struct { - Key string `json:"key" protobuf:"bytes,1,opt,name=key"` - Value string `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -// Gauge is a Gauge prometheus metric -type Gauge struct { - // Value is the value to be used in the operation with the metric's current value. If no operation is set, - // value is the value of the metric - Value string `json:"value" protobuf:"bytes,1,opt,name=value"` - // Realtime emits this metric in real time if applicable - Realtime *bool `json:"realtime" protobuf:"varint,2,opt,name=realtime"` - // Operation defines the operation to apply with value and the metrics' current value - // +optional - Operation GaugeOperation `json:"operation,omitempty" protobuf:"bytes,3,opt,name=operation"` -} - -// A GaugeOperation is the set of operations that can be used in a gauge metric. -type GaugeOperation string - -const ( - GaugeOperationSet GaugeOperation = "Set" - GaugeOperationAdd GaugeOperation = "Add" - GaugeOperationSub GaugeOperation = "Sub" -) - -// Histogram is a Histogram prometheus metric -type Histogram struct { - // Value is the value of the metric - Value string `json:"value" protobuf:"bytes,3,opt,name=value"` - // Buckets is a list of bucket divisors for the histogram - Buckets []Amount `json:"buckets" protobuf:"bytes,4,rep,name=buckets"` -} - -func (in *Histogram) GetBuckets() []float64 { - buckets := make([]float64, len(in.Buckets)) - for i, bucket := range in.Buckets { - buckets[i], _ = bucket.Float64() - } - return buckets -} - -// Counter is a Counter prometheus metric -type Counter struct { - // Value is the value of the metric - Value string `json:"value" protobuf:"bytes,1,opt,name=value"` -} - -// Memoization enables caching for the Outputs of the template -type Memoize struct { - // Key is the key to use as the caching key - Key string `json:"key" protobuf:"bytes,1,opt,name=key"` - // Cache sets and configures the kind of cache - Cache *Cache `json:"cache" protobuf:"bytes,2,opt,name=cache"` - // MaxAge is the maximum age (e.g. "180s", "24h") of an entry that is still considered valid. If an entry is older - // than the MaxAge, it will be ignored. - MaxAge string `json:"maxAge" protobuf:"bytes,3,opt,name=maxAge"` -} - -// MemoizationStatus is the status of this memoized node -type MemoizationStatus struct { - // Hit indicates whether this node was created from a cache entry - Hit bool `json:"hit" protobuf:"bytes,1,opt,name=hit"` - // Key is the name of the key used for this node's cache - Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Cache is the name of the cache that was used - CacheName string `json:"cacheName" protobuf:"bytes,3,opt,name=cacheName"` -} - -// Cache is the configuration for the type of cache to be used -type Cache struct { - // ConfigMap sets a ConfigMap-based cache - ConfigMap *apiv1.ConfigMapKeySelector `json:"configMap" protobuf:"bytes,1,opt,name=configMap"` -} - -type SynchronizationAction interface { - LockWaiting(holderKey, lockKey string, currentHolders []string) bool - LockAcquired(holderKey, lockKey string, currentHolders []string) bool - LockReleased(holderKey, lockKey string) bool -} - -type SemaphoreHolding struct { - // Semaphore stores the semaphore name. - Semaphore string `json:"semaphore,omitempty" protobuf:"bytes,1,opt,name=semaphore"` - // Holders stores the list of current holder names in the workflow. - // +listType=atomic - Holders []string `json:"holders,omitempty" protobuf:"bytes,2,opt,name=holders"` -} - -type SemaphoreStatus struct { - // Holding stores the list of resource acquired synchronization lock for workflows. - Holding []SemaphoreHolding `json:"holding,omitempty" protobuf:"bytes,1,opt,name=holding"` - // Waiting indicates the list of current synchronization lock holders. - Waiting []SemaphoreHolding `json:"waiting,omitempty" protobuf:"bytes,2,opt,name=waiting"` -} - -var _ SynchronizationAction = &SemaphoreStatus{} - -func (ss *SemaphoreStatus) GetHolding(semaphoreName string) (int, SemaphoreHolding) { - for i, holder := range ss.Holding { - if holder.Semaphore == semaphoreName { - return i, holder - } - } - return -1, SemaphoreHolding{} -} - -func (ss *SemaphoreStatus) GetWaiting(semaphoreName string) (int, SemaphoreHolding) { - for i, holder := range ss.Waiting { - if holder.Semaphore == semaphoreName { - return i, holder - } - } - return -1, SemaphoreHolding{} -} - -func (ss *SemaphoreStatus) LockWaiting(holderKey, lockKey string, currentHolders []string) bool { - i, semaphoreWaiting := ss.GetWaiting(lockKey) - if i < 0 { - ss.Waiting = append(ss.Waiting, SemaphoreHolding{Semaphore: lockKey, Holders: currentHolders}) - } else { - semaphoreWaiting.Holders = currentHolders - ss.Waiting[i] = semaphoreWaiting - } - return true -} - -func (ss *SemaphoreStatus) LockAcquired(holderKey, lockKey string, currentHolders []string) bool { - i, semaphoreHolding := ss.GetHolding(lockKey) - holdingName := holderKey - if i < 0 { - ss.Holding = append(ss.Holding, SemaphoreHolding{Semaphore: lockKey, Holders: []string{holdingName}}) - return true - } else if !slice.ContainsString(semaphoreHolding.Holders, holdingName) { - semaphoreHolding.Holders = append(semaphoreHolding.Holders, holdingName) - ss.Holding[i] = semaphoreHolding - return true - } - return false -} - -func (ss *SemaphoreStatus) LockReleased(holderKey, lockKey string) bool { - i, semaphoreHolding := ss.GetHolding(lockKey) - holdingName := holderKey - - if i >= 0 { - semaphoreHolding.Holders = slice.RemoveString(semaphoreHolding.Holders, holdingName) - ss.Holding[i] = semaphoreHolding - return true - } - return false -} - -// MutexHolding describes the mutex and the object which is holding it. -type MutexHolding struct { - // Reference for the mutex - // e.g: ${namespace}/mutex/${mutexName} - Mutex string `json:"mutex,omitempty" protobuf:"bytes,1,opt,name=mutex"` - // Holder is a reference to the object which holds the Mutex. - // Holding Scenario: - // 1. Current workflow's NodeID which is holding the lock. - // e.g: ${NodeID} - // Waiting Scenario: - // 1. Current workflow or other workflow NodeID which is holding the lock. - // e.g: ${WorkflowName}/${NodeID} - Holder string `json:"holder,omitempty" protobuf:"bytes,2,opt,name=holder"` -} - -// MutexStatus contains which objects hold mutex locks, and which objects this workflow is waiting on to release locks. -type MutexStatus struct { - // Holding is a list of mutexes and their respective objects that are held by mutex lock for this workflow. - // +listType=atomic - Holding []MutexHolding `json:"holding,omitempty" protobuf:"bytes,1,opt,name=holding"` - // Waiting is a list of mutexes and their respective objects this workflow is waiting for. - // +listType=atomic - Waiting []MutexHolding `json:"waiting,omitempty" protobuf:"bytes,2,opt,name=waiting"` -} - -var _ SynchronizationAction = &MutexStatus{} - -func (ms *MutexStatus) GetHolding(mutexName string) (int, MutexHolding) { - for i, holder := range ms.Holding { - if holder.Mutex == mutexName { - return i, holder - } - } - return -1, MutexHolding{} -} - -func (ms *MutexStatus) GetWaiting(mutexName string) (int, MutexHolding) { - for i, holder := range ms.Waiting { - if holder.Mutex == mutexName { - return i, holder - } - } - return -1, MutexHolding{} -} - -func (ms *MutexStatus) LockWaiting(holderKey, lockKey string, currentHolders []string) bool { - if len(currentHolders) == 0 { - return false - } - - i, mutexWaiting := ms.GetWaiting(lockKey) - if i < 0 { - ms.Waiting = append(ms.Waiting, MutexHolding{Mutex: lockKey, Holder: currentHolders[0]}) - return true - } else if mutexWaiting.Holder != currentHolders[0] { - mutexWaiting.Holder = currentHolders[0] - ms.Waiting[i] = mutexWaiting - return true - } - return false -} - -func CheckHolderKeyVersion(holderKey string) HoldingNameVersion { - items := strings.Split(holderKey, "/") - if len(items) == 2 || len(items) == 3 { - return HoldingNameV2 - } - return HoldingNameV1 -} - -func (ms *MutexStatus) LockAcquired(holderKey, lockKey string, currentHolders []string) bool { - i, mutexHolding := ms.GetHolding(lockKey) - holdingName := holderKey - if i < 0 { - ms.Holding = append(ms.Holding, MutexHolding{Mutex: lockKey, Holder: holdingName}) - return true - } else if mutexHolding.Holder != holdingName { - mutexHolding.Holder = holdingName - ms.Holding[i] = mutexHolding - return true - } - return false -} - -func (ms *MutexStatus) LockReleased(holderKey, lockKey string) bool { - i, holder := ms.GetHolding(lockKey) - holdingName := holderKey - if i >= 0 && holder.Holder == holdingName { - ms.Holding = append(ms.Holding[:i], ms.Holding[i+1:]...) - return true - } - return false -} - -// SynchronizationStatus stores the status of semaphore and mutex. -type SynchronizationStatus struct { - // Semaphore stores this workflow's Semaphore holder details - Semaphore *SemaphoreStatus `json:"semaphore,omitempty" protobuf:"bytes,1,opt,name=semaphore"` - // Mutex stores this workflow's mutex holder details - Mutex *MutexStatus `json:"mutex,omitempty" protobuf:"bytes,2,opt,name=mutex"` -} - -func (ss *SynchronizationStatus) GetStatus(syncType SynchronizationType) SynchronizationAction { - switch syncType { - case SynchronizationTypeSemaphore: - return ss.Semaphore - case SynchronizationTypeMutex: - return ss.Mutex - default: - panic("invalid syncType in GetStatus") - } -} - -// NodeSynchronizationStatus stores the status of a node -type NodeSynchronizationStatus struct { - // Waiting is the name of the lock that this node is waiting for - Waiting string `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"` -} - -type NodeFlag struct { - // Hooked tracks whether or not this node was triggered by hook or onExit - Hooked bool `json:"hooked,omitempty" protobuf:"varint,1,opt,name=hooked"` - // Retried tracks whether or not this node was retried by retryStrategy - Retried bool `json:"retried,omitempty" protobuf:"varint,2,opt,name=retried"` -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 1b1a82267..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,4332 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - json "encoding/json" - - v1 "k8s.io/api/core/v1" - policyv1 "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Amount) DeepCopyInto(out *Amount) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Amount. -func (in *Amount) DeepCopy() *Amount { - if in == nil { - return nil - } - out := new(Amount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArchiveStrategy) DeepCopyInto(out *ArchiveStrategy) { - *out = *in - if in.Tar != nil { - in, out := &in.Tar, &out.Tar - *out = new(TarStrategy) - (*in).DeepCopyInto(*out) - } - if in.None != nil { - in, out := &in.None, &out.None - *out = new(NoneStrategy) - **out = **in - } - if in.Zip != nil { - in, out := &in.Zip, &out.Zip - *out = new(ZipStrategy) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchiveStrategy. -func (in *ArchiveStrategy) DeepCopy() *ArchiveStrategy { - if in == nil { - return nil - } - out := new(ArchiveStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Arguments) DeepCopyInto(out *Arguments) { - *out = *in - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make([]Parameter, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Artifacts != nil { - in, out := &in.Artifacts, &out.Artifacts - *out = make(Artifacts, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Arguments. -func (in *Arguments) DeepCopy() *Arguments { - if in == nil { - return nil - } - out := new(Arguments) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtGCStatus) DeepCopyInto(out *ArtGCStatus) { - *out = *in - if in.StrategiesProcessed != nil { - in, out := &in.StrategiesProcessed, &out.StrategiesProcessed - *out = make(map[ArtifactGCStrategy]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.PodsRecouped != nil { - in, out := &in.PodsRecouped, &out.PodsRecouped - *out = make(map[string]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtGCStatus. -func (in *ArtGCStatus) DeepCopy() *ArtGCStatus { - if in == nil { - return nil - } - out := new(ArtGCStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Artifact) DeepCopyInto(out *Artifact) { - *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(int32) - **out = **in - } - in.ArtifactLocation.DeepCopyInto(&out.ArtifactLocation) - if in.Archive != nil { - in, out := &in.Archive, &out.Archive - *out = new(ArchiveStrategy) - (*in).DeepCopyInto(*out) - } - if in.ArtifactGC != nil { - in, out := &in.ArtifactGC, &out.ArtifactGC - *out = new(ArtifactGC) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact. -func (in *Artifact) DeepCopy() *Artifact { - if in == nil { - return nil - } - out := new(Artifact) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactGC) DeepCopyInto(out *ArtifactGC) { - *out = *in - if in.PodMetadata != nil { - in, out := &in.PodMetadata, &out.PodMetadata - *out = new(Metadata) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactGC. -func (in *ArtifactGC) DeepCopy() *ArtifactGC { - if in == nil { - return nil - } - out := new(ArtifactGC) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactGCSpec) DeepCopyInto(out *ArtifactGCSpec) { - *out = *in - if in.ArtifactsByNode != nil { - in, out := &in.ArtifactsByNode, &out.ArtifactsByNode - *out = make(map[string]ArtifactNodeSpec, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactGCSpec. -func (in *ArtifactGCSpec) DeepCopy() *ArtifactGCSpec { - if in == nil { - return nil - } - out := new(ArtifactGCSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactGCStatus) DeepCopyInto(out *ArtifactGCStatus) { - *out = *in - if in.ArtifactResultsByNode != nil { - in, out := &in.ArtifactResultsByNode, &out.ArtifactResultsByNode - *out = make(map[string]ArtifactResultNodeStatus, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactGCStatus. -func (in *ArtifactGCStatus) DeepCopy() *ArtifactGCStatus { - if in == nil { - return nil - } - out := new(ArtifactGCStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactLocation) DeepCopyInto(out *ArtifactLocation) { - *out = *in - if in.ArchiveLogs != nil { - in, out := &in.ArchiveLogs, &out.ArchiveLogs - *out = new(bool) - **out = **in - } - if in.S3 != nil { - in, out := &in.S3, &out.S3 - *out = new(S3Artifact) - (*in).DeepCopyInto(*out) - } - if in.Git != nil { - in, out := &in.Git, &out.Git - *out = new(GitArtifact) - (*in).DeepCopyInto(*out) - } - if in.HTTP != nil { - in, out := &in.HTTP, &out.HTTP - *out = new(HTTPArtifact) - (*in).DeepCopyInto(*out) - } - if in.Artifactory != nil { - in, out := &in.Artifactory, &out.Artifactory - *out = new(ArtifactoryArtifact) - (*in).DeepCopyInto(*out) - } - if in.HDFS != nil { - in, out := &in.HDFS, &out.HDFS - *out = new(HDFSArtifact) - (*in).DeepCopyInto(*out) - } - if in.Raw != nil { - in, out := &in.Raw, &out.Raw - *out = new(RawArtifact) - **out = **in - } - if in.OSS != nil { - in, out := &in.OSS, &out.OSS - *out = new(OSSArtifact) - (*in).DeepCopyInto(*out) - } - if in.GCS != nil { - in, out := &in.GCS, &out.GCS - *out = new(GCSArtifact) - (*in).DeepCopyInto(*out) - } - if in.Azure != nil { - in, out := &in.Azure, &out.Azure - *out = new(AzureArtifact) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactLocation. -func (in *ArtifactLocation) DeepCopy() *ArtifactLocation { - if in == nil { - return nil - } - out := new(ArtifactLocation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactNodeSpec) DeepCopyInto(out *ArtifactNodeSpec) { - *out = *in - if in.ArchiveLocation != nil { - in, out := &in.ArchiveLocation, &out.ArchiveLocation - *out = new(ArtifactLocation) - (*in).DeepCopyInto(*out) - } - if in.Artifacts != nil { - in, out := &in.Artifacts, &out.Artifacts - *out = make(map[string]Artifact, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactNodeSpec. -func (in *ArtifactNodeSpec) DeepCopy() *ArtifactNodeSpec { - if in == nil { - return nil - } - out := new(ArtifactNodeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactPaths) DeepCopyInto(out *ArtifactPaths) { - *out = *in - in.Artifact.DeepCopyInto(&out.Artifact) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactPaths. -func (in *ArtifactPaths) DeepCopy() *ArtifactPaths { - if in == nil { - return nil - } - out := new(ArtifactPaths) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactRepository) DeepCopyInto(out *ArtifactRepository) { - *out = *in - if in.ArchiveLogs != nil { - in, out := &in.ArchiveLogs, &out.ArchiveLogs - *out = new(bool) - **out = **in - } - if in.S3 != nil { - in, out := &in.S3, &out.S3 - *out = new(S3ArtifactRepository) - (*in).DeepCopyInto(*out) - } - if in.Artifactory != nil { - in, out := &in.Artifactory, &out.Artifactory - *out = new(ArtifactoryArtifactRepository) - (*in).DeepCopyInto(*out) - } - if in.HDFS != nil { - in, out := &in.HDFS, &out.HDFS - *out = new(HDFSArtifactRepository) - (*in).DeepCopyInto(*out) - } - if in.OSS != nil { - in, out := &in.OSS, &out.OSS - *out = new(OSSArtifactRepository) - (*in).DeepCopyInto(*out) - } - if in.GCS != nil { - in, out := &in.GCS, &out.GCS - *out = new(GCSArtifactRepository) - (*in).DeepCopyInto(*out) - } - if in.Azure != nil { - in, out := &in.Azure, &out.Azure - *out = new(AzureArtifactRepository) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactRepository. -func (in *ArtifactRepository) DeepCopy() *ArtifactRepository { - if in == nil { - return nil - } - out := new(ArtifactRepository) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactRepositoryRef) DeepCopyInto(out *ArtifactRepositoryRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactRepositoryRef. -func (in *ArtifactRepositoryRef) DeepCopy() *ArtifactRepositoryRef { - if in == nil { - return nil - } - out := new(ArtifactRepositoryRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactRepositoryRefStatus) DeepCopyInto(out *ArtifactRepositoryRefStatus) { - *out = *in - out.ArtifactRepositoryRef = in.ArtifactRepositoryRef - if in.ArtifactRepository != nil { - in, out := &in.ArtifactRepository, &out.ArtifactRepository - *out = new(ArtifactRepository) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactRepositoryRefStatus. -func (in *ArtifactRepositoryRefStatus) DeepCopy() *ArtifactRepositoryRefStatus { - if in == nil { - return nil - } - out := new(ArtifactRepositoryRefStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactResult) DeepCopyInto(out *ArtifactResult) { - *out = *in - if in.Error != nil { - in, out := &in.Error, &out.Error - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactResult. -func (in *ArtifactResult) DeepCopy() *ArtifactResult { - if in == nil { - return nil - } - out := new(ArtifactResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactResultNodeStatus) DeepCopyInto(out *ArtifactResultNodeStatus) { - *out = *in - if in.ArtifactResults != nil { - in, out := &in.ArtifactResults, &out.ArtifactResults - *out = make(map[string]ArtifactResult, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactResultNodeStatus. -func (in *ArtifactResultNodeStatus) DeepCopy() *ArtifactResultNodeStatus { - if in == nil { - return nil - } - out := new(ArtifactResultNodeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactSearchQuery) DeepCopyInto(out *ArtifactSearchQuery) { - *out = *in - if in.ArtifactGCStrategies != nil { - in, out := &in.ArtifactGCStrategies, &out.ArtifactGCStrategies - *out = make(map[ArtifactGCStrategy]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Deleted != nil { - in, out := &in.Deleted, &out.Deleted - *out = new(bool) - **out = **in - } - if in.NodeTypes != nil { - in, out := &in.NodeTypes, &out.NodeTypes - *out = make(map[NodeType]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactSearchQuery. -func (in *ArtifactSearchQuery) DeepCopy() *ArtifactSearchQuery { - if in == nil { - return nil - } - out := new(ArtifactSearchQuery) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactSearchResult) DeepCopyInto(out *ArtifactSearchResult) { - *out = *in - in.Artifact.DeepCopyInto(&out.Artifact) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactSearchResult. -func (in *ArtifactSearchResult) DeepCopy() *ArtifactSearchResult { - if in == nil { - return nil - } - out := new(ArtifactSearchResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ArtifactSearchResults) DeepCopyInto(out *ArtifactSearchResults) { - { - in := &in - *out = make(ArtifactSearchResults, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactSearchResults. -func (in ArtifactSearchResults) DeepCopy() ArtifactSearchResults { - if in == nil { - return nil - } - out := new(ArtifactSearchResults) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactoryArtifact) DeepCopyInto(out *ArtifactoryArtifact) { - *out = *in - in.ArtifactoryAuth.DeepCopyInto(&out.ArtifactoryAuth) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactoryArtifact. -func (in *ArtifactoryArtifact) DeepCopy() *ArtifactoryArtifact { - if in == nil { - return nil - } - out := new(ArtifactoryArtifact) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactoryArtifactRepository) DeepCopyInto(out *ArtifactoryArtifactRepository) { - *out = *in - in.ArtifactoryAuth.DeepCopyInto(&out.ArtifactoryAuth) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactoryArtifactRepository. -func (in *ArtifactoryArtifactRepository) DeepCopy() *ArtifactoryArtifactRepository { - if in == nil { - return nil - } - out := new(ArtifactoryArtifactRepository) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ArtifactoryAuth) DeepCopyInto(out *ArtifactoryAuth) { - *out = *in - if in.UsernameSecret != nil { - in, out := &in.UsernameSecret, &out.UsernameSecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.PasswordSecret != nil { - in, out := &in.PasswordSecret, &out.PasswordSecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactoryAuth. -func (in *ArtifactoryAuth) DeepCopy() *ArtifactoryAuth { - if in == nil { - return nil - } - out := new(ArtifactoryAuth) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in Artifacts) DeepCopyInto(out *Artifacts) { - { - in := &in - *out = make(Artifacts, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifacts. -func (in Artifacts) DeepCopy() Artifacts { - if in == nil { - return nil - } - out := new(Artifacts) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureArtifact) DeepCopyInto(out *AzureArtifact) { - *out = *in - in.AzureBlobContainer.DeepCopyInto(&out.AzureBlobContainer) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureArtifact. -func (in *AzureArtifact) DeepCopy() *AzureArtifact { - if in == nil { - return nil - } - out := new(AzureArtifact) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureArtifactRepository) DeepCopyInto(out *AzureArtifactRepository) { - *out = *in - in.AzureBlobContainer.DeepCopyInto(&out.AzureBlobContainer) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureArtifactRepository. -func (in *AzureArtifactRepository) DeepCopy() *AzureArtifactRepository { - if in == nil { - return nil - } - out := new(AzureArtifactRepository) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureBlobContainer) DeepCopyInto(out *AzureBlobContainer) { - *out = *in - if in.AccountKeySecret != nil { - in, out := &in.AccountKeySecret, &out.AccountKeySecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBlobContainer. -func (in *AzureBlobContainer) DeepCopy() *AzureBlobContainer { - if in == nil { - return nil - } - out := new(AzureBlobContainer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Backoff) DeepCopyInto(out *Backoff) { - *out = *in - if in.Factor != nil { - in, out := &in.Factor, &out.Factor - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backoff. -func (in *Backoff) DeepCopy() *Backoff { - if in == nil { - return nil - } - out := new(Backoff) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BasicAuth) DeepCopyInto(out *BasicAuth) { - *out = *in - if in.UsernameSecret != nil { - in, out := &in.UsernameSecret, &out.UsernameSecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.PasswordSecret != nil { - in, out := &in.PasswordSecret, &out.PasswordSecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuth. -func (in *BasicAuth) DeepCopy() *BasicAuth { - if in == nil { - return nil - } - out := new(BasicAuth) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Cache) DeepCopyInto(out *Cache) { - *out = *in - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(v1.ConfigMapKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cache. -func (in *Cache) DeepCopy() *Cache { - if in == nil { - return nil - } - out := new(Cache) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientCertAuth) DeepCopyInto(out *ClientCertAuth) { - *out = *in - if in.ClientCertSecret != nil { - in, out := &in.ClientCertSecret, &out.ClientCertSecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.ClientKeySecret != nil { - in, out := &in.ClientKeySecret, &out.ClientKeySecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientCertAuth. -func (in *ClientCertAuth) DeepCopy() *ClientCertAuth { - if in == nil { - return nil - } - out := new(ClientCertAuth) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterWorkflowTemplate) DeepCopyInto(out *ClusterWorkflowTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkflowTemplate. -func (in *ClusterWorkflowTemplate) DeepCopy() *ClusterWorkflowTemplate { - if in == nil { - return nil - } - out := new(ClusterWorkflowTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterWorkflowTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterWorkflowTemplateList) DeepCopyInto(out *ClusterWorkflowTemplateList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make(ClusterWorkflowTemplates, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkflowTemplateList. -func (in *ClusterWorkflowTemplateList) DeepCopy() *ClusterWorkflowTemplateList { - if in == nil { - return nil - } - out := new(ClusterWorkflowTemplateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterWorkflowTemplateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ClusterWorkflowTemplates) DeepCopyInto(out *ClusterWorkflowTemplates) { - { - in := &in - *out = make(ClusterWorkflowTemplates, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkflowTemplates. -func (in ClusterWorkflowTemplates) DeepCopy() ClusterWorkflowTemplates { - if in == nil { - return nil - } - out := new(ClusterWorkflowTemplates) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Column) DeepCopyInto(out *Column) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Column. -func (in *Column) DeepCopy() *Column { - if in == nil { - return nil - } - out := new(Column) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Condition) DeepCopyInto(out *Condition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. -func (in *Condition) DeepCopy() *Condition { - if in == nil { - return nil - } - out := new(Condition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in Conditions) DeepCopyInto(out *Conditions) { - { - in := &in - *out = make(Conditions, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. -func (in Conditions) DeepCopy() Conditions { - if in == nil { - return nil - } - out := new(Conditions) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerNode) DeepCopyInto(out *ContainerNode) { - *out = *in - in.Container.DeepCopyInto(&out.Container) - if in.Dependencies != nil { - in, out := &in.Dependencies, &out.Dependencies - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerNode. -func (in *ContainerNode) DeepCopy() *ContainerNode { - if in == nil { - return nil - } - out := new(ContainerNode) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerSetRetryStrategy) DeepCopyInto(out *ContainerSetRetryStrategy) { - *out = *in - if in.Retries != nil { - in, out := &in.Retries, &out.Retries - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSetRetryStrategy. -func (in *ContainerSetRetryStrategy) DeepCopy() *ContainerSetRetryStrategy { - if in == nil { - return nil - } - out := new(ContainerSetRetryStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerSetTemplate) DeepCopyInto(out *ContainerSetTemplate) { - *out = *in - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]ContainerNode, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]v1.VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RetryStrategy != nil { - in, out := &in.RetryStrategy, &out.RetryStrategy - *out = new(ContainerSetRetryStrategy) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSetTemplate. -func (in *ContainerSetTemplate) DeepCopy() *ContainerSetTemplate { - if in == nil { - return nil - } - out := new(ContainerSetTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContinueOn) DeepCopyInto(out *ContinueOn) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContinueOn. -func (in *ContinueOn) DeepCopy() *ContinueOn { - if in == nil { - return nil - } - out := new(ContinueOn) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Counter) DeepCopyInto(out *Counter) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Counter. -func (in *Counter) DeepCopy() *Counter { - if in == nil { - return nil - } - out := new(Counter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CreateS3BucketOptions) DeepCopyInto(out *CreateS3BucketOptions) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateS3BucketOptions. -func (in *CreateS3BucketOptions) DeepCopy() *CreateS3BucketOptions { - if in == nil { - return nil - } - out := new(CreateS3BucketOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronWorkflow) DeepCopyInto(out *CronWorkflow) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronWorkflow. -func (in *CronWorkflow) DeepCopy() *CronWorkflow { - if in == nil { - return nil - } - out := new(CronWorkflow) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CronWorkflow) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronWorkflowList) DeepCopyInto(out *CronWorkflowList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CronWorkflow, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronWorkflowList. -func (in *CronWorkflowList) DeepCopy() *CronWorkflowList { - if in == nil { - return nil - } - out := new(CronWorkflowList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CronWorkflowList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronWorkflowSpec) DeepCopyInto(out *CronWorkflowSpec) { - *out = *in - in.WorkflowSpec.DeepCopyInto(&out.WorkflowSpec) - if in.StartingDeadlineSeconds != nil { - in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.SuccessfulJobsHistoryLimit != nil { - in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit - *out = new(int32) - **out = **in - } - if in.FailedJobsHistoryLimit != nil { - in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit - *out = new(int32) - **out = **in - } - if in.WorkflowMetadata != nil { - in, out := &in.WorkflowMetadata, &out.WorkflowMetadata - *out = new(metav1.ObjectMeta) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronWorkflowSpec. -func (in *CronWorkflowSpec) DeepCopy() *CronWorkflowSpec { - if in == nil { - return nil - } - out := new(CronWorkflowSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronWorkflowStatus) DeepCopyInto(out *CronWorkflowStatus) { - *out = *in - if in.Active != nil { - in, out := &in.Active, &out.Active - *out = make([]v1.ObjectReference, len(*in)) - copy(*out, *in) - } - if in.LastScheduledTime != nil { - in, out := &in.LastScheduledTime, &out.LastScheduledTime - *out = (*in).DeepCopy() - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make(Conditions, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronWorkflowStatus. -func (in *CronWorkflowStatus) DeepCopy() *CronWorkflowStatus { - if in == nil { - return nil - } - out := new(CronWorkflowStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DAGTask) DeepCopyInto(out *DAGTask) { - *out = *in - if in.Inline != nil { - in, out := &in.Inline, &out.Inline - *out = new(Template) - (*in).DeepCopyInto(*out) - } - in.Arguments.DeepCopyInto(&out.Arguments) - if in.TemplateRef != nil { - in, out := &in.TemplateRef, &out.TemplateRef - *out = new(TemplateRef) - **out = **in - } - if in.Dependencies != nil { - in, out := &in.Dependencies, &out.Dependencies - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.WithItems != nil { - in, out := &in.WithItems, &out.WithItems - *out = make([]Item, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.WithSequence != nil { - in, out := &in.WithSequence, &out.WithSequence - *out = new(Sequence) - (*in).DeepCopyInto(*out) - } - if in.ContinueOn != nil { - in, out := &in.ContinueOn, &out.ContinueOn - *out = new(ContinueOn) - **out = **in - } - if in.Hooks != nil { - in, out := &in.Hooks, &out.Hooks - *out = make(LifecycleHooks, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DAGTask. -func (in *DAGTask) DeepCopy() *DAGTask { - if in == nil { - return nil - } - out := new(DAGTask) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DAGTemplate) DeepCopyInto(out *DAGTemplate) { - *out = *in - if in.Tasks != nil { - in, out := &in.Tasks, &out.Tasks - *out = make([]DAGTask, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailFast != nil { - in, out := &in.FailFast, &out.FailFast - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DAGTemplate. -func (in *DAGTemplate) DeepCopy() *DAGTemplate { - if in == nil { - return nil - } - out := new(DAGTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Data) DeepCopyInto(out *Data) { - *out = *in - in.Source.DeepCopyInto(&out.Source) - if in.Transformation != nil { - in, out := &in.Transformation, &out.Transformation - *out = make(Transformation, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Data. -func (in *Data) DeepCopy() *Data { - if in == nil { - return nil - } - out := new(Data) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DataSource) DeepCopyInto(out *DataSource) { - *out = *in - if in.ArtifactPaths != nil { - in, out := &in.ArtifactPaths, &out.ArtifactPaths - *out = new(ArtifactPaths) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource. -func (in *DataSource) DeepCopy() *DataSource { - if in == nil { - return nil - } - out := new(DataSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Event) DeepCopyInto(out *Event) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. -func (in *Event) DeepCopy() *Event { - if in == nil { - return nil - } - out := new(Event) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecutorConfig) DeepCopyInto(out *ExecutorConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorConfig. -func (in *ExecutorConfig) DeepCopy() *ExecutorConfig { - if in == nil { - return nil - } - out := new(ExecutorConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCSArtifact) DeepCopyInto(out *GCSArtifact) { - *out = *in - in.GCSBucket.DeepCopyInto(&out.GCSBucket) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSArtifact. -func (in *GCSArtifact) DeepCopy() *GCSArtifact { - if in == nil { - return nil - } - out := new(GCSArtifact) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCSArtifactRepository) DeepCopyInto(out *GCSArtifactRepository) { - *out = *in - in.GCSBucket.DeepCopyInto(&out.GCSBucket) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSArtifactRepository. -func (in *GCSArtifactRepository) DeepCopy() *GCSArtifactRepository { - if in == nil { - return nil - } - out := new(GCSArtifactRepository) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCSBucket) DeepCopyInto(out *GCSBucket) { - *out = *in - if in.ServiceAccountKeySecret != nil { - in, out := &in.ServiceAccountKeySecret, &out.ServiceAccountKeySecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSBucket. -func (in *GCSBucket) DeepCopy() *GCSBucket { - if in == nil { - return nil - } - out := new(GCSBucket) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Gauge) DeepCopyInto(out *Gauge) { - *out = *in - if in.Realtime != nil { - in, out := &in.Realtime, &out.Realtime - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gauge. -func (in *Gauge) DeepCopy() *Gauge { - if in == nil { - return nil - } - out := new(Gauge) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitArtifact) DeepCopyInto(out *GitArtifact) { - *out = *in - if in.Depth != nil { - in, out := &in.Depth, &out.Depth - *out = new(uint64) - **out = **in - } - if in.Fetch != nil { - in, out := &in.Fetch, &out.Fetch - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.UsernameSecret != nil { - in, out := &in.UsernameSecret, &out.UsernameSecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.PasswordSecret != nil { - in, out := &in.PasswordSecret, &out.PasswordSecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.SSHPrivateKeySecret != nil { - in, out := &in.SSHPrivateKeySecret, &out.SSHPrivateKeySecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitArtifact. -func (in *GitArtifact) DeepCopy() *GitArtifact { - if in == nil { - return nil - } - out := new(GitArtifact) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HDFSArtifact) DeepCopyInto(out *HDFSArtifact) { - *out = *in - in.HDFSConfig.DeepCopyInto(&out.HDFSConfig) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSArtifact. -func (in *HDFSArtifact) DeepCopy() *HDFSArtifact { - if in == nil { - return nil - } - out := new(HDFSArtifact) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HDFSArtifactRepository) DeepCopyInto(out *HDFSArtifactRepository) { - *out = *in - in.HDFSConfig.DeepCopyInto(&out.HDFSConfig) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSArtifactRepository. -func (in *HDFSArtifactRepository) DeepCopy() *HDFSArtifactRepository { - if in == nil { - return nil - } - out := new(HDFSArtifactRepository) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HDFSConfig) DeepCopyInto(out *HDFSConfig) { - *out = *in - in.HDFSKrbConfig.DeepCopyInto(&out.HDFSKrbConfig) - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSConfig. -func (in *HDFSConfig) DeepCopy() *HDFSConfig { - if in == nil { - return nil - } - out := new(HDFSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HDFSKrbConfig) DeepCopyInto(out *HDFSKrbConfig) { - *out = *in - if in.KrbCCacheSecret != nil { - in, out := &in.KrbCCacheSecret, &out.KrbCCacheSecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.KrbKeytabSecret != nil { - in, out := &in.KrbKeytabSecret, &out.KrbKeytabSecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.KrbConfigConfigMap != nil { - in, out := &in.KrbConfigConfigMap, &out.KrbConfigConfigMap - *out = new(v1.ConfigMapKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HDFSKrbConfig. -func (in *HDFSKrbConfig) DeepCopy() *HDFSKrbConfig { - if in == nil { - return nil - } - out := new(HDFSKrbConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTP) DeepCopyInto(out *HTTP) { - *out = *in - if in.Headers != nil { - in, out := &in.Headers, &out.Headers - *out = make(HTTPHeaders, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int64) - **out = **in - } - if in.BodyFrom != nil { - in, out := &in.BodyFrom, &out.BodyFrom - *out = new(HTTPBodySource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTP. -func (in *HTTP) DeepCopy() *HTTP { - if in == nil { - return nil - } - out := new(HTTP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPArtifact) DeepCopyInto(out *HTTPArtifact) { - *out = *in - if in.Headers != nil { - in, out := &in.Headers, &out.Headers - *out = make([]Header, len(*in)) - copy(*out, *in) - } - if in.Auth != nil { - in, out := &in.Auth, &out.Auth - *out = new(HTTPAuth) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPArtifact. -func (in *HTTPArtifact) DeepCopy() *HTTPArtifact { - if in == nil { - return nil - } - out := new(HTTPArtifact) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPAuth) DeepCopyInto(out *HTTPAuth) { - *out = *in - in.ClientCert.DeepCopyInto(&out.ClientCert) - in.OAuth2.DeepCopyInto(&out.OAuth2) - in.BasicAuth.DeepCopyInto(&out.BasicAuth) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPAuth. -func (in *HTTPAuth) DeepCopy() *HTTPAuth { - if in == nil { - return nil - } - out := new(HTTPAuth) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPBodySource) DeepCopyInto(out *HTTPBodySource) { - *out = *in - if in.Bytes != nil { - in, out := &in.Bytes, &out.Bytes - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPBodySource. -func (in *HTTPBodySource) DeepCopy() *HTTPBodySource { - if in == nil { - return nil - } - out := new(HTTPBodySource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { - *out = *in - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(HTTPHeaderSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. -func (in *HTTPHeader) DeepCopy() *HTTPHeader { - if in == nil { - return nil - } - out := new(HTTPHeader) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPHeaderSource) DeepCopyInto(out *HTTPHeaderSource) { - *out = *in - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaderSource. -func (in *HTTPHeaderSource) DeepCopy() *HTTPHeaderSource { - if in == nil { - return nil - } - out := new(HTTPHeaderSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in HTTPHeaders) DeepCopyInto(out *HTTPHeaders) { - { - in := &in - *out = make(HTTPHeaders, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeaders. -func (in HTTPHeaders) DeepCopy() HTTPHeaders { - if in == nil { - return nil - } - out := new(HTTPHeaders) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Header) DeepCopyInto(out *Header) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Header. -func (in *Header) DeepCopy() *Header { - if in == nil { - return nil - } - out := new(Header) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Histogram) DeepCopyInto(out *Histogram) { - *out = *in - if in.Buckets != nil { - in, out := &in.Buckets, &out.Buckets - *out = make([]Amount, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Histogram. -func (in *Histogram) DeepCopy() *Histogram { - if in == nil { - return nil - } - out := new(Histogram) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Inputs) DeepCopyInto(out *Inputs) { - *out = *in - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make([]Parameter, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Artifacts != nil { - in, out := &in.Artifacts, &out.Artifacts - *out = make(Artifacts, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Inputs. -func (in *Inputs) DeepCopy() *Inputs { - if in == nil { - return nil - } - out := new(Inputs) - in.DeepCopyInto(out) - return out -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Item. -func (in *Item) DeepCopy() *Item { - if in == nil { - return nil - } - out := new(Item) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LabelKeys) DeepCopyInto(out *LabelKeys) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelKeys. -func (in *LabelKeys) DeepCopy() *LabelKeys { - if in == nil { - return nil - } - out := new(LabelKeys) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LabelValueFrom) DeepCopyInto(out *LabelValueFrom) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelValueFrom. -func (in *LabelValueFrom) DeepCopy() *LabelValueFrom { - if in == nil { - return nil - } - out := new(LabelValueFrom) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LabelValues) DeepCopyInto(out *LabelValues) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelValues. -func (in *LabelValues) DeepCopy() *LabelValues { - if in == nil { - return nil - } - out := new(LabelValues) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) { - *out = *in - in.Arguments.DeepCopyInto(&out.Arguments) - if in.TemplateRef != nil { - in, out := &in.TemplateRef, &out.TemplateRef - *out = new(TemplateRef) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHook. -func (in *LifecycleHook) DeepCopy() *LifecycleHook { - if in == nil { - return nil - } - out := new(LifecycleHook) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in LifecycleHooks) DeepCopyInto(out *LifecycleHooks) { - { - in := &in - *out = make(LifecycleHooks, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHooks. -func (in LifecycleHooks) DeepCopy() LifecycleHooks { - if in == nil { - return nil - } - out := new(LifecycleHooks) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Link) DeepCopyInto(out *Link) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link. -func (in *Link) DeepCopy() *Link { - if in == nil { - return nil - } - out := new(Link) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManifestFrom) DeepCopyInto(out *ManifestFrom) { - *out = *in - if in.Artifact != nil { - in, out := &in.Artifact, &out.Artifact - *out = new(Artifact) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManifestFrom. -func (in *ManifestFrom) DeepCopy() *ManifestFrom { - if in == nil { - return nil - } - out := new(ManifestFrom) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MemoizationStatus) DeepCopyInto(out *MemoizationStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoizationStatus. -func (in *MemoizationStatus) DeepCopy() *MemoizationStatus { - if in == nil { - return nil - } - out := new(MemoizationStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Memoize) DeepCopyInto(out *Memoize) { - *out = *in - if in.Cache != nil { - in, out := &in.Cache, &out.Cache - *out = new(Cache) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Memoize. -func (in *Memoize) DeepCopy() *Memoize { - if in == nil { - return nil - } - out := new(Memoize) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Metadata) DeepCopyInto(out *Metadata) { - *out = *in - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. -func (in *Metadata) DeepCopy() *Metadata { - if in == nil { - return nil - } - out := new(Metadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricLabel) DeepCopyInto(out *MetricLabel) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricLabel. -func (in *MetricLabel) DeepCopy() *MetricLabel { - if in == nil { - return nil - } - out := new(MetricLabel) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Metrics) DeepCopyInto(out *Metrics) { - *out = *in - if in.Prometheus != nil { - in, out := &in.Prometheus, &out.Prometheus - *out = make([]*Prometheus, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Prometheus) - (*in).DeepCopyInto(*out) - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metrics. -func (in *Metrics) DeepCopy() *Metrics { - if in == nil { - return nil - } - out := new(Metrics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Mutex) DeepCopyInto(out *Mutex) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutex. -func (in *Mutex) DeepCopy() *Mutex { - if in == nil { - return nil - } - out := new(Mutex) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MutexHolding) DeepCopyInto(out *MutexHolding) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutexHolding. -func (in *MutexHolding) DeepCopy() *MutexHolding { - if in == nil { - return nil - } - out := new(MutexHolding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MutexStatus) DeepCopyInto(out *MutexStatus) { - *out = *in - if in.Holding != nil { - in, out := &in.Holding, &out.Holding - *out = make([]MutexHolding, len(*in)) - copy(*out, *in) - } - if in.Waiting != nil { - in, out := &in.Waiting, &out.Waiting - *out = make([]MutexHolding, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutexStatus. -func (in *MutexStatus) DeepCopy() *MutexStatus { - if in == nil { - return nil - } - out := new(MutexStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeFlag) DeepCopyInto(out *NodeFlag) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFlag. -func (in *NodeFlag) DeepCopy() *NodeFlag { - if in == nil { - return nil - } - out := new(NodeFlag) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResult) DeepCopyInto(out *NodeResult) { - *out = *in - if in.Outputs != nil { - in, out := &in.Outputs, &out.Outputs - *out = new(Outputs) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResult. -func (in *NodeResult) DeepCopy() *NodeResult { - if in == nil { - return nil - } - out := new(NodeResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { - *out = *in - if in.TemplateRef != nil { - in, out := &in.TemplateRef, &out.TemplateRef - *out = new(TemplateRef) - **out = **in - } - in.StartedAt.DeepCopyInto(&out.StartedAt) - in.FinishedAt.DeepCopyInto(&out.FinishedAt) - if in.ResourcesDuration != nil { - in, out := &in.ResourcesDuration, &out.ResourcesDuration - *out = make(ResourcesDuration, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Daemoned != nil { - in, out := &in.Daemoned, &out.Daemoned - *out = new(bool) - **out = **in - } - if in.NodeFlag != nil { - in, out := &in.NodeFlag, &out.NodeFlag - *out = new(NodeFlag) - **out = **in - } - if in.Inputs != nil { - in, out := &in.Inputs, &out.Inputs - *out = new(Inputs) - (*in).DeepCopyInto(*out) - } - if in.Outputs != nil { - in, out := &in.Outputs, &out.Outputs - *out = new(Outputs) - (*in).DeepCopyInto(*out) - } - if in.Children != nil { - in, out := &in.Children, &out.Children - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.OutboundNodes != nil { - in, out := &in.OutboundNodes, &out.OutboundNodes - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.MemoizationStatus != nil { - in, out := &in.MemoizationStatus, &out.MemoizationStatus - *out = new(MemoizationStatus) - **out = **in - } - if in.SynchronizationStatus != nil { - in, out := &in.SynchronizationStatus, &out.SynchronizationStatus - *out = new(NodeSynchronizationStatus) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. -func (in *NodeStatus) DeepCopy() *NodeStatus { - if in == nil { - return nil - } - out := new(NodeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSynchronizationStatus) DeepCopyInto(out *NodeSynchronizationStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSynchronizationStatus. -func (in *NodeSynchronizationStatus) DeepCopy() *NodeSynchronizationStatus { - if in == nil { - return nil - } - out := new(NodeSynchronizationStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in Nodes) DeepCopyInto(out *Nodes) { - { - in := &in - *out = make(Nodes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Nodes. -func (in Nodes) DeepCopy() Nodes { - if in == nil { - return nil - } - out := new(Nodes) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NoneStrategy) DeepCopyInto(out *NoneStrategy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoneStrategy. -func (in *NoneStrategy) DeepCopy() *NoneStrategy { - if in == nil { - return nil - } - out := new(NoneStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OAuth2Auth) DeepCopyInto(out *OAuth2Auth) { - *out = *in - if in.ClientIDSecret != nil { - in, out := &in.ClientIDSecret, &out.ClientIDSecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.ClientSecretSecret != nil { - in, out := &in.ClientSecretSecret, &out.ClientSecretSecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.TokenURLSecret != nil { - in, out := &in.TokenURLSecret, &out.TokenURLSecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.EndpointParams != nil { - in, out := &in.EndpointParams, &out.EndpointParams - *out = make([]OAuth2EndpointParam, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth2Auth. -func (in *OAuth2Auth) DeepCopy() *OAuth2Auth { - if in == nil { - return nil - } - out := new(OAuth2Auth) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OAuth2EndpointParam) DeepCopyInto(out *OAuth2EndpointParam) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth2EndpointParam. -func (in *OAuth2EndpointParam) DeepCopy() *OAuth2EndpointParam { - if in == nil { - return nil - } - out := new(OAuth2EndpointParam) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OSSArtifact) DeepCopyInto(out *OSSArtifact) { - *out = *in - in.OSSBucket.DeepCopyInto(&out.OSSBucket) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSArtifact. -func (in *OSSArtifact) DeepCopy() *OSSArtifact { - if in == nil { - return nil - } - out := new(OSSArtifact) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OSSArtifactRepository) DeepCopyInto(out *OSSArtifactRepository) { - *out = *in - in.OSSBucket.DeepCopyInto(&out.OSSBucket) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSArtifactRepository. -func (in *OSSArtifactRepository) DeepCopy() *OSSArtifactRepository { - if in == nil { - return nil - } - out := new(OSSArtifactRepository) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OSSBucket) DeepCopyInto(out *OSSBucket) { - *out = *in - if in.AccessKeySecret != nil { - in, out := &in.AccessKeySecret, &out.AccessKeySecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.SecretKeySecret != nil { - in, out := &in.SecretKeySecret, &out.SecretKeySecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.LifecycleRule != nil { - in, out := &in.LifecycleRule, &out.LifecycleRule - *out = new(OSSLifecycleRule) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSBucket. -func (in *OSSBucket) DeepCopy() *OSSBucket { - if in == nil { - return nil - } - out := new(OSSBucket) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OSSLifecycleRule) DeepCopyInto(out *OSSLifecycleRule) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSLifecycleRule. -func (in *OSSLifecycleRule) DeepCopy() *OSSLifecycleRule { - if in == nil { - return nil - } - out := new(OSSLifecycleRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Object) DeepCopyInto(out *Object) { - *out = *in - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = make(json.RawMessage, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Object. -func (in *Object) DeepCopy() *Object { - if in == nil { - return nil - } - out := new(Object) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Outputs) DeepCopyInto(out *Outputs) { - *out = *in - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make([]Parameter, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Artifacts != nil { - in, out := &in.Artifacts, &out.Artifacts - *out = make(Artifacts, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Result != nil { - in, out := &in.Result, &out.Result - *out = new(string) - **out = **in - } - if in.ExitCode != nil { - in, out := &in.ExitCode, &out.ExitCode - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Outputs. -func (in *Outputs) DeepCopy() *Outputs { - if in == nil { - return nil - } - out := new(Outputs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ParallelSteps) DeepCopyInto(out *ParallelSteps) { - *out = *in - if in.Steps != nil { - in, out := &in.Steps, &out.Steps - *out = make([]WorkflowStep, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelSteps. -func (in *ParallelSteps) DeepCopy() *ParallelSteps { - if in == nil { - return nil - } - out := new(ParallelSteps) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Parameter) DeepCopyInto(out *Parameter) { - *out = *in - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = new(AnyString) - **out = **in - } - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = new(AnyString) - **out = **in - } - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(ValueFrom) - (*in).DeepCopyInto(*out) - } - if in.Enum != nil { - in, out := &in.Enum, &out.Enum - *out = make([]AnyString, len(*in)) - copy(*out, *in) - } - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(AnyString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter. -func (in *Parameter) DeepCopy() *Parameter { - if in == nil { - return nil - } - out := new(Parameter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugin) DeepCopyInto(out *Plugin) { - *out = *in - in.Object.DeepCopyInto(&out.Object) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. -func (in *Plugin) DeepCopy() *Plugin { - if in == nil { - return nil - } - out := new(Plugin) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodGC) DeepCopyInto(out *PodGC) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGC. -func (in *PodGC) DeepCopy() *PodGC { - if in == nil { - return nil - } - out := new(PodGC) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Prometheus) DeepCopyInto(out *Prometheus) { - *out = *in - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make([]*MetricLabel, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(MetricLabel) - **out = **in - } - } - } - if in.Gauge != nil { - in, out := &in.Gauge, &out.Gauge - *out = new(Gauge) - (*in).DeepCopyInto(*out) - } - if in.Histogram != nil { - in, out := &in.Histogram, &out.Histogram - *out = new(Histogram) - (*in).DeepCopyInto(*out) - } - if in.Counter != nil { - in, out := &in.Counter, &out.Counter - *out = new(Counter) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus. -func (in *Prometheus) DeepCopy() *Prometheus { - if in == nil { - return nil - } - out := new(Prometheus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RawArtifact) DeepCopyInto(out *RawArtifact) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawArtifact. -func (in *RawArtifact) DeepCopy() *RawArtifact { - if in == nil { - return nil - } - out := new(RawArtifact) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceTemplate) DeepCopyInto(out *ResourceTemplate) { - *out = *in - if in.ManifestFrom != nil { - in, out := &in.ManifestFrom, &out.ManifestFrom - *out = new(ManifestFrom) - (*in).DeepCopyInto(*out) - } - if in.Flags != nil { - in, out := &in.Flags, &out.Flags - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTemplate. -func (in *ResourceTemplate) DeepCopy() *ResourceTemplate { - if in == nil { - return nil - } - out := new(ResourceTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ResourcesDuration) DeepCopyInto(out *ResourcesDuration) { - { - in := &in - *out = make(ResourcesDuration, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesDuration. -func (in ResourcesDuration) DeepCopy() ResourcesDuration { - if in == nil { - return nil - } - out := new(ResourcesDuration) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RetryAffinity) DeepCopyInto(out *RetryAffinity) { - *out = *in - if in.NodeAntiAffinity != nil { - in, out := &in.NodeAntiAffinity, &out.NodeAntiAffinity - *out = new(RetryNodeAntiAffinity) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryAffinity. -func (in *RetryAffinity) DeepCopy() *RetryAffinity { - if in == nil { - return nil - } - out := new(RetryAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RetryNodeAntiAffinity) DeepCopyInto(out *RetryNodeAntiAffinity) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryNodeAntiAffinity. -func (in *RetryNodeAntiAffinity) DeepCopy() *RetryNodeAntiAffinity { - if in == nil { - return nil - } - out := new(RetryNodeAntiAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RetryStrategy) DeepCopyInto(out *RetryStrategy) { - *out = *in - if in.Limit != nil { - in, out := &in.Limit, &out.Limit - *out = new(intstr.IntOrString) - **out = **in - } - if in.Backoff != nil { - in, out := &in.Backoff, &out.Backoff - *out = new(Backoff) - (*in).DeepCopyInto(*out) - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(RetryAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryStrategy. -func (in *RetryStrategy) DeepCopy() *RetryStrategy { - if in == nil { - return nil - } - out := new(RetryStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *S3Artifact) DeepCopyInto(out *S3Artifact) { - *out = *in - in.S3Bucket.DeepCopyInto(&out.S3Bucket) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Artifact. -func (in *S3Artifact) DeepCopy() *S3Artifact { - if in == nil { - return nil - } - out := new(S3Artifact) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *S3ArtifactRepository) DeepCopyInto(out *S3ArtifactRepository) { - *out = *in - in.S3Bucket.DeepCopyInto(&out.S3Bucket) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3ArtifactRepository. -func (in *S3ArtifactRepository) DeepCopy() *S3ArtifactRepository { - if in == nil { - return nil - } - out := new(S3ArtifactRepository) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *S3Bucket) DeepCopyInto(out *S3Bucket) { - *out = *in - if in.Insecure != nil { - in, out := &in.Insecure, &out.Insecure - *out = new(bool) - **out = **in - } - if in.AccessKeySecret != nil { - in, out := &in.AccessKeySecret, &out.AccessKeySecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.SecretKeySecret != nil { - in, out := &in.SecretKeySecret, &out.SecretKeySecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.CreateBucketIfNotPresent != nil { - in, out := &in.CreateBucketIfNotPresent, &out.CreateBucketIfNotPresent - *out = new(CreateS3BucketOptions) - **out = **in - } - if in.EncryptionOptions != nil { - in, out := &in.EncryptionOptions, &out.EncryptionOptions - *out = new(S3EncryptionOptions) - (*in).DeepCopyInto(*out) - } - if in.CASecret != nil { - in, out := &in.CASecret, &out.CASecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Bucket. -func (in *S3Bucket) DeepCopy() *S3Bucket { - if in == nil { - return nil - } - out := new(S3Bucket) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *S3EncryptionOptions) DeepCopyInto(out *S3EncryptionOptions) { - *out = *in - if in.ServerSideCustomerKeySecret != nil { - in, out := &in.ServerSideCustomerKeySecret, &out.ServerSideCustomerKeySecret - *out = new(v1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3EncryptionOptions. -func (in *S3EncryptionOptions) DeepCopy() *S3EncryptionOptions { - if in == nil { - return nil - } - out := new(S3EncryptionOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScriptTemplate) DeepCopyInto(out *ScriptTemplate) { - *out = *in - in.Container.DeepCopyInto(&out.Container) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptTemplate. -func (in *ScriptTemplate) DeepCopy() *ScriptTemplate { - if in == nil { - return nil - } - out := new(ScriptTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SemaphoreHolding) DeepCopyInto(out *SemaphoreHolding) { - *out = *in - if in.Holders != nil { - in, out := &in.Holders, &out.Holders - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SemaphoreHolding. -func (in *SemaphoreHolding) DeepCopy() *SemaphoreHolding { - if in == nil { - return nil - } - out := new(SemaphoreHolding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SemaphoreRef) DeepCopyInto(out *SemaphoreRef) { - *out = *in - if in.ConfigMapKeyRef != nil { - in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - *out = new(v1.ConfigMapKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SemaphoreRef. -func (in *SemaphoreRef) DeepCopy() *SemaphoreRef { - if in == nil { - return nil - } - out := new(SemaphoreRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SemaphoreStatus) DeepCopyInto(out *SemaphoreStatus) { - *out = *in - if in.Holding != nil { - in, out := &in.Holding, &out.Holding - *out = make([]SemaphoreHolding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Waiting != nil { - in, out := &in.Waiting, &out.Waiting - *out = make([]SemaphoreHolding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SemaphoreStatus. -func (in *SemaphoreStatus) DeepCopy() *SemaphoreStatus { - if in == nil { - return nil - } - out := new(SemaphoreStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Sequence) DeepCopyInto(out *Sequence) { - *out = *in - if in.Count != nil { - in, out := &in.Count, &out.Count - *out = new(intstr.IntOrString) - **out = **in - } - if in.Start != nil { - in, out := &in.Start, &out.Start - *out = new(intstr.IntOrString) - **out = **in - } - if in.End != nil { - in, out := &in.End, &out.End - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sequence. -func (in *Sequence) DeepCopy() *Sequence { - if in == nil { - return nil - } - out := new(Sequence) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Submit) DeepCopyInto(out *Submit) { - *out = *in - out.WorkflowTemplateRef = in.WorkflowTemplateRef - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Arguments != nil { - in, out := &in.Arguments, &out.Arguments - *out = new(Arguments) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Submit. -func (in *Submit) DeepCopy() *Submit { - if in == nil { - return nil - } - out := new(Submit) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubmitOpts) DeepCopyInto(out *SubmitOpts) { - *out = *in - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.OwnerReference != nil { - in, out := &in.OwnerReference, &out.OwnerReference - *out = new(metav1.OwnerReference) - (*in).DeepCopyInto(*out) - } - if in.Priority != nil { - in, out := &in.Priority, &out.Priority - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubmitOpts. -func (in *SubmitOpts) DeepCopy() *SubmitOpts { - if in == nil { - return nil - } - out := new(SubmitOpts) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SuppliedValueFrom) DeepCopyInto(out *SuppliedValueFrom) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuppliedValueFrom. -func (in *SuppliedValueFrom) DeepCopy() *SuppliedValueFrom { - if in == nil { - return nil - } - out := new(SuppliedValueFrom) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SuspendTemplate) DeepCopyInto(out *SuspendTemplate) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuspendTemplate. -func (in *SuspendTemplate) DeepCopy() *SuspendTemplate { - if in == nil { - return nil - } - out := new(SuspendTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Synchronization) DeepCopyInto(out *Synchronization) { - *out = *in - if in.Semaphore != nil { - in, out := &in.Semaphore, &out.Semaphore - *out = new(SemaphoreRef) - (*in).DeepCopyInto(*out) - } - if in.Mutex != nil { - in, out := &in.Mutex, &out.Mutex - *out = new(Mutex) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Synchronization. -func (in *Synchronization) DeepCopy() *Synchronization { - if in == nil { - return nil - } - out := new(Synchronization) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SynchronizationStatus) DeepCopyInto(out *SynchronizationStatus) { - *out = *in - if in.Semaphore != nil { - in, out := &in.Semaphore, &out.Semaphore - *out = new(SemaphoreStatus) - (*in).DeepCopyInto(*out) - } - if in.Mutex != nil { - in, out := &in.Mutex, &out.Mutex - *out = new(MutexStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynchronizationStatus. -func (in *SynchronizationStatus) DeepCopy() *SynchronizationStatus { - if in == nil { - return nil - } - out := new(SynchronizationStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TTLStrategy) DeepCopyInto(out *TTLStrategy) { - *out = *in - if in.SecondsAfterCompletion != nil { - in, out := &in.SecondsAfterCompletion, &out.SecondsAfterCompletion - *out = new(int32) - **out = **in - } - if in.SecondsAfterSuccess != nil { - in, out := &in.SecondsAfterSuccess, &out.SecondsAfterSuccess - *out = new(int32) - **out = **in - } - if in.SecondsAfterFailure != nil { - in, out := &in.SecondsAfterFailure, &out.SecondsAfterFailure - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLStrategy. -func (in *TTLStrategy) DeepCopy() *TTLStrategy { - if in == nil { - return nil - } - out := new(TTLStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TarStrategy) DeepCopyInto(out *TarStrategy) { - *out = *in - if in.CompressionLevel != nil { - in, out := &in.CompressionLevel, &out.CompressionLevel - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TarStrategy. -func (in *TarStrategy) DeepCopy() *TarStrategy { - if in == nil { - return nil - } - out := new(TarStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Template) DeepCopyInto(out *Template) { - *out = *in - in.Inputs.DeepCopyInto(&out.Inputs) - in.Outputs.DeepCopyInto(&out.Outputs) - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) - (*in).DeepCopyInto(*out) - } - in.Metadata.DeepCopyInto(&out.Metadata) - if in.Daemon != nil { - in, out := &in.Daemon, &out.Daemon - *out = new(bool) - **out = **in - } - if in.Steps != nil { - in, out := &in.Steps, &out.Steps - *out = make([]ParallelSteps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Container != nil { - in, out := &in.Container, &out.Container - *out = new(v1.Container) - (*in).DeepCopyInto(*out) - } - if in.ContainerSet != nil { - in, out := &in.ContainerSet, &out.ContainerSet - *out = new(ContainerSetTemplate) - (*in).DeepCopyInto(*out) - } - if in.Script != nil { - in, out := &in.Script, &out.Script - *out = new(ScriptTemplate) - (*in).DeepCopyInto(*out) - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceTemplate) - (*in).DeepCopyInto(*out) - } - if in.DAG != nil { - in, out := &in.DAG, &out.DAG - *out = new(DAGTemplate) - (*in).DeepCopyInto(*out) - } - if in.Suspend != nil { - in, out := &in.Suspend, &out.Suspend - *out = new(SuspendTemplate) - **out = **in - } - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = new(Data) - (*in).DeepCopyInto(*out) - } - if in.HTTP != nil { - in, out := &in.HTTP, &out.HTTP - *out = new(HTTP) - (*in).DeepCopyInto(*out) - } - if in.Plugin != nil { - in, out := &in.Plugin, &out.Plugin - *out = new(Plugin) - (*in).DeepCopyInto(*out) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]UserContainer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Sidecars != nil { - in, out := &in.Sidecars, &out.Sidecars - *out = make([]UserContainer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ArchiveLocation != nil { - in, out := &in.ArchiveLocation, &out.ArchiveLocation - *out = new(ArtifactLocation) - (*in).DeepCopyInto(*out) - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(intstr.IntOrString) - **out = **in - } - if in.RetryStrategy != nil { - in, out := &in.RetryStrategy, &out.RetryStrategy - *out = new(RetryStrategy) - (*in).DeepCopyInto(*out) - } - if in.Parallelism != nil { - in, out := &in.Parallelism, &out.Parallelism - *out = new(int64) - **out = **in - } - if in.FailFast != nil { - in, out := &in.FailFast, &out.FailFast - *out = new(bool) - **out = **in - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Priority != nil { - in, out := &in.Priority, &out.Priority - *out = new(int32) - **out = **in - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - *out = new(bool) - **out = **in - } - if in.Executor != nil { - in, out := &in.Executor, &out.Executor - *out = new(ExecutorConfig) - **out = **in - } - if in.HostAliases != nil { - in, out := &in.HostAliases, &out.HostAliases - *out = make([]v1.HostAlias, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(v1.PodSecurityContext) - (*in).DeepCopyInto(*out) - } - if in.Metrics != nil { - in, out := &in.Metrics, &out.Metrics - *out = new(Metrics) - (*in).DeepCopyInto(*out) - } - if in.Synchronization != nil { - in, out := &in.Synchronization, &out.Synchronization - *out = new(Synchronization) - (*in).DeepCopyInto(*out) - } - if in.Memoize != nil { - in, out := &in.Memoize, &out.Memoize - *out = new(Memoize) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Template. -func (in *Template) DeepCopy() *Template { - if in == nil { - return nil - } - out := new(Template) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TemplateRef) DeepCopyInto(out *TemplateRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateRef. -func (in *TemplateRef) DeepCopy() *TemplateRef { - if in == nil { - return nil - } - out := new(TemplateRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in Transformation) DeepCopyInto(out *Transformation) { - { - in := &in - *out = make(Transformation, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transformation. -func (in Transformation) DeepCopy() Transformation { - if in == nil { - return nil - } - out := new(Transformation) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TransformationStep) DeepCopyInto(out *TransformationStep) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformationStep. -func (in *TransformationStep) DeepCopy() *TransformationStep { - if in == nil { - return nil - } - out := new(TransformationStep) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserContainer) DeepCopyInto(out *UserContainer) { - *out = *in - in.Container.DeepCopyInto(&out.Container) - if in.MirrorVolumeMounts != nil { - in, out := &in.MirrorVolumeMounts, &out.MirrorVolumeMounts - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserContainer. -func (in *UserContainer) DeepCopy() *UserContainer { - if in == nil { - return nil - } - out := new(UserContainer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ValueFrom) DeepCopyInto(out *ValueFrom) { - *out = *in - if in.Supplied != nil { - in, out := &in.Supplied, &out.Supplied - *out = new(SuppliedValueFrom) - **out = **in - } - if in.ConfigMapKeyRef != nil { - in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - *out = new(v1.ConfigMapKeySelector) - (*in).DeepCopyInto(*out) - } - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = new(AnyString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFrom. -func (in *ValueFrom) DeepCopy() *ValueFrom { - if in == nil { - return nil - } - out := new(ValueFrom) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Version) DeepCopyInto(out *Version) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Version. -func (in *Version) DeepCopy() *Version { - if in == nil { - return nil - } - out := new(Version) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeClaimGC) DeepCopyInto(out *VolumeClaimGC) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeClaimGC. -func (in *VolumeClaimGC) DeepCopy() *VolumeClaimGC { - if in == nil { - return nil - } - out := new(VolumeClaimGC) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Workflow) DeepCopyInto(out *Workflow) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workflow. -func (in *Workflow) DeepCopy() *Workflow { - if in == nil { - return nil - } - out := new(Workflow) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Workflow) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowArtifactGCTask) DeepCopyInto(out *WorkflowArtifactGCTask) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowArtifactGCTask. -func (in *WorkflowArtifactGCTask) DeepCopy() *WorkflowArtifactGCTask { - if in == nil { - return nil - } - out := new(WorkflowArtifactGCTask) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WorkflowArtifactGCTask) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowArtifactGCTaskList) DeepCopyInto(out *WorkflowArtifactGCTaskList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]WorkflowArtifactGCTask, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowArtifactGCTaskList. -func (in *WorkflowArtifactGCTaskList) DeepCopy() *WorkflowArtifactGCTaskList { - if in == nil { - return nil - } - out := new(WorkflowArtifactGCTaskList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WorkflowArtifactGCTaskList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowEventBinding) DeepCopyInto(out *WorkflowEventBinding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowEventBinding. -func (in *WorkflowEventBinding) DeepCopy() *WorkflowEventBinding { - if in == nil { - return nil - } - out := new(WorkflowEventBinding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WorkflowEventBinding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowEventBindingList) DeepCopyInto(out *WorkflowEventBindingList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]WorkflowEventBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowEventBindingList. -func (in *WorkflowEventBindingList) DeepCopy() *WorkflowEventBindingList { - if in == nil { - return nil - } - out := new(WorkflowEventBindingList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WorkflowEventBindingList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowEventBindingSpec) DeepCopyInto(out *WorkflowEventBindingSpec) { - *out = *in - out.Event = in.Event - if in.Submit != nil { - in, out := &in.Submit, &out.Submit - *out = new(Submit) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowEventBindingSpec. -func (in *WorkflowEventBindingSpec) DeepCopy() *WorkflowEventBindingSpec { - if in == nil { - return nil - } - out := new(WorkflowEventBindingSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowLevelArtifactGC) DeepCopyInto(out *WorkflowLevelArtifactGC) { - *out = *in - in.ArtifactGC.DeepCopyInto(&out.ArtifactGC) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowLevelArtifactGC. -func (in *WorkflowLevelArtifactGC) DeepCopy() *WorkflowLevelArtifactGC { - if in == nil { - return nil - } - out := new(WorkflowLevelArtifactGC) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowList) DeepCopyInto(out *WorkflowList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make(Workflows, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowList. -func (in *WorkflowList) DeepCopy() *WorkflowList { - if in == nil { - return nil - } - out := new(WorkflowList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WorkflowList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowMetadata) DeepCopyInto(out *WorkflowMetadata) { - *out = *in - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.LabelsFrom != nil { - in, out := &in.LabelsFrom, &out.LabelsFrom - *out = make(map[string]LabelValueFrom, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowMetadata. -func (in *WorkflowMetadata) DeepCopy() *WorkflowMetadata { - if in == nil { - return nil - } - out := new(WorkflowMetadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowSpec) DeepCopyInto(out *WorkflowSpec) { - *out = *in - if in.Templates != nil { - in, out := &in.Templates, &out.Templates - *out = make([]Template, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Arguments.DeepCopyInto(&out.Arguments) - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - *out = new(bool) - **out = **in - } - if in.Executor != nil { - in, out := &in.Executor, &out.Executor - *out = new(ExecutorConfig) - **out = **in - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]v1.PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Parallelism != nil { - in, out := &in.Parallelism, &out.Parallelism - *out = new(int64) - **out = **in - } - if in.ArtifactRepositoryRef != nil { - in, out := &in.ArtifactRepositoryRef, &out.ArtifactRepositoryRef - *out = new(ArtifactRepositoryRef) - **out = **in - } - if in.Suspend != nil { - in, out := &in.Suspend, &out.Suspend - *out = new(bool) - **out = **in - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(v1.Affinity) - (*in).DeepCopyInto(*out) - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]v1.LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.HostNetwork != nil { - in, out := &in.HostNetwork, &out.HostNetwork - *out = new(bool) - **out = **in - } - if in.DNSPolicy != nil { - in, out := &in.DNSPolicy, &out.DNSPolicy - *out = new(v1.DNSPolicy) - **out = **in - } - if in.DNSConfig != nil { - in, out := &in.DNSConfig, &out.DNSConfig - *out = new(v1.PodDNSConfig) - (*in).DeepCopyInto(*out) - } - if in.TTLStrategy != nil { - in, out := &in.TTLStrategy, &out.TTLStrategy - *out = new(TTLStrategy) - (*in).DeepCopyInto(*out) - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.Priority != nil { - in, out := &in.Priority, &out.Priority - *out = new(int32) - **out = **in - } - if in.PodGC != nil { - in, out := &in.PodGC, &out.PodGC - *out = new(PodGC) - (*in).DeepCopyInto(*out) - } - if in.PodPriority != nil { - in, out := &in.PodPriority, &out.PodPriority - *out = new(int32) - **out = **in - } - if in.HostAliases != nil { - in, out := &in.HostAliases, &out.HostAliases - *out = make([]v1.HostAlias, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(v1.PodSecurityContext) - (*in).DeepCopyInto(*out) - } - if in.PodDisruptionBudget != nil { - in, out := &in.PodDisruptionBudget, &out.PodDisruptionBudget - *out = new(policyv1.PodDisruptionBudgetSpec) - (*in).DeepCopyInto(*out) - } - if in.Metrics != nil { - in, out := &in.Metrics, &out.Metrics - *out = new(Metrics) - (*in).DeepCopyInto(*out) - } - if in.WorkflowTemplateRef != nil { - in, out := &in.WorkflowTemplateRef, &out.WorkflowTemplateRef - *out = new(WorkflowTemplateRef) - **out = **in - } - if in.Synchronization != nil { - in, out := &in.Synchronization, &out.Synchronization - *out = new(Synchronization) - (*in).DeepCopyInto(*out) - } - if in.VolumeClaimGC != nil { - in, out := &in.VolumeClaimGC, &out.VolumeClaimGC - *out = new(VolumeClaimGC) - **out = **in - } - if in.RetryStrategy != nil { - in, out := &in.RetryStrategy, &out.RetryStrategy - *out = new(RetryStrategy) - (*in).DeepCopyInto(*out) - } - if in.PodMetadata != nil { - in, out := &in.PodMetadata, &out.PodMetadata - *out = new(Metadata) - (*in).DeepCopyInto(*out) - } - if in.TemplateDefaults != nil { - in, out := &in.TemplateDefaults, &out.TemplateDefaults - *out = new(Template) - (*in).DeepCopyInto(*out) - } - if in.ArchiveLogs != nil { - in, out := &in.ArchiveLogs, &out.ArchiveLogs - *out = new(bool) - **out = **in - } - if in.Hooks != nil { - in, out := &in.Hooks, &out.Hooks - *out = make(LifecycleHooks, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.WorkflowMetadata != nil { - in, out := &in.WorkflowMetadata, &out.WorkflowMetadata - *out = new(WorkflowMetadata) - (*in).DeepCopyInto(*out) - } - if in.ArtifactGC != nil { - in, out := &in.ArtifactGC, &out.ArtifactGC - *out = new(WorkflowLevelArtifactGC) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowSpec. -func (in *WorkflowSpec) DeepCopy() *WorkflowSpec { - if in == nil { - return nil - } - out := new(WorkflowSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowStatus) DeepCopyInto(out *WorkflowStatus) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - in.FinishedAt.DeepCopyInto(&out.FinishedAt) - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = make(Nodes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.StoredTemplates != nil { - in, out := &in.StoredTemplates, &out.StoredTemplates - *out = make(map[string]Template, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.PersistentVolumeClaims != nil { - in, out := &in.PersistentVolumeClaims, &out.PersistentVolumeClaims - *out = make([]v1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Outputs != nil { - in, out := &in.Outputs, &out.Outputs - *out = new(Outputs) - (*in).DeepCopyInto(*out) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make(Conditions, len(*in)) - copy(*out, *in) - } - if in.ResourcesDuration != nil { - in, out := &in.ResourcesDuration, &out.ResourcesDuration - *out = make(ResourcesDuration, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.StoredWorkflowSpec != nil { - in, out := &in.StoredWorkflowSpec, &out.StoredWorkflowSpec - *out = new(WorkflowSpec) - (*in).DeepCopyInto(*out) - } - if in.Synchronization != nil { - in, out := &in.Synchronization, &out.Synchronization - *out = new(SynchronizationStatus) - (*in).DeepCopyInto(*out) - } - if in.ArtifactRepositoryRef != nil { - in, out := &in.ArtifactRepositoryRef, &out.ArtifactRepositoryRef - *out = new(ArtifactRepositoryRefStatus) - (*in).DeepCopyInto(*out) - } - if in.ArtifactGCStatus != nil { - in, out := &in.ArtifactGCStatus, &out.ArtifactGCStatus - *out = new(ArtGCStatus) - (*in).DeepCopyInto(*out) - } - if in.TaskResultsCompletionStatus != nil { - in, out := &in.TaskResultsCompletionStatus, &out.TaskResultsCompletionStatus - *out = make(map[string]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStatus. -func (in *WorkflowStatus) DeepCopy() *WorkflowStatus { - if in == nil { - return nil - } - out := new(WorkflowStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowStep) DeepCopyInto(out *WorkflowStep) { - *out = *in - if in.Inline != nil { - in, out := &in.Inline, &out.Inline - *out = new(Template) - (*in).DeepCopyInto(*out) - } - in.Arguments.DeepCopyInto(&out.Arguments) - if in.TemplateRef != nil { - in, out := &in.TemplateRef, &out.TemplateRef - *out = new(TemplateRef) - **out = **in - } - if in.WithItems != nil { - in, out := &in.WithItems, &out.WithItems - *out = make([]Item, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.WithSequence != nil { - in, out := &in.WithSequence, &out.WithSequence - *out = new(Sequence) - (*in).DeepCopyInto(*out) - } - if in.ContinueOn != nil { - in, out := &in.ContinueOn, &out.ContinueOn - *out = new(ContinueOn) - **out = **in - } - if in.Hooks != nil { - in, out := &in.Hooks, &out.Hooks - *out = make(LifecycleHooks, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStep. -func (in *WorkflowStep) DeepCopy() *WorkflowStep { - if in == nil { - return nil - } - out := new(WorkflowStep) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowTaskResult) DeepCopyInto(out *WorkflowTaskResult) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.NodeResult.DeepCopyInto(&out.NodeResult) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTaskResult. -func (in *WorkflowTaskResult) DeepCopy() *WorkflowTaskResult { - if in == nil { - return nil - } - out := new(WorkflowTaskResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WorkflowTaskResult) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowTaskResultList) DeepCopyInto(out *WorkflowTaskResultList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]WorkflowTaskResult, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTaskResultList. -func (in *WorkflowTaskResultList) DeepCopy() *WorkflowTaskResultList { - if in == nil { - return nil - } - out := new(WorkflowTaskResultList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WorkflowTaskResultList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowTaskSet) DeepCopyInto(out *WorkflowTaskSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTaskSet. -func (in *WorkflowTaskSet) DeepCopy() *WorkflowTaskSet { - if in == nil { - return nil - } - out := new(WorkflowTaskSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WorkflowTaskSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowTaskSetList) DeepCopyInto(out *WorkflowTaskSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]WorkflowTaskSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTaskSetList. -func (in *WorkflowTaskSetList) DeepCopy() *WorkflowTaskSetList { - if in == nil { - return nil - } - out := new(WorkflowTaskSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WorkflowTaskSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowTaskSetSpec) DeepCopyInto(out *WorkflowTaskSetSpec) { - *out = *in - if in.Tasks != nil { - in, out := &in.Tasks, &out.Tasks - *out = make(map[string]Template, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTaskSetSpec. -func (in *WorkflowTaskSetSpec) DeepCopy() *WorkflowTaskSetSpec { - if in == nil { - return nil - } - out := new(WorkflowTaskSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowTaskSetStatus) DeepCopyInto(out *WorkflowTaskSetStatus) { - *out = *in - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = make(map[string]NodeResult, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTaskSetStatus. -func (in *WorkflowTaskSetStatus) DeepCopy() *WorkflowTaskSetStatus { - if in == nil { - return nil - } - out := new(WorkflowTaskSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowTemplate) DeepCopyInto(out *WorkflowTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplate. -func (in *WorkflowTemplate) DeepCopy() *WorkflowTemplate { - if in == nil { - return nil - } - out := new(WorkflowTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WorkflowTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowTemplateList) DeepCopyInto(out *WorkflowTemplateList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make(WorkflowTemplates, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplateList. -func (in *WorkflowTemplateList) DeepCopy() *WorkflowTemplateList { - if in == nil { - return nil - } - out := new(WorkflowTemplateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WorkflowTemplateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WorkflowTemplateRef) DeepCopyInto(out *WorkflowTemplateRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplateRef. -func (in *WorkflowTemplateRef) DeepCopy() *WorkflowTemplateRef { - if in == nil { - return nil - } - out := new(WorkflowTemplateRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in WorkflowTemplates) DeepCopyInto(out *WorkflowTemplates) { - { - in := &in - *out = make(WorkflowTemplates, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplates. -func (in WorkflowTemplates) DeepCopy() WorkflowTemplates { - if in == nil { - return nil - } - out := new(WorkflowTemplates) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in Workflows) DeepCopyInto(out *Workflows) { - { - in := &in - *out = make(Workflows, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workflows. -func (in Workflows) DeepCopy() Workflows { - if in == nil { - return nil - } - out := new(Workflows) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ZipStrategy) DeepCopyInto(out *ZipStrategy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZipStrategy. -func (in *ZipStrategy) DeepCopy() *ZipStrategy { - if in == nil { - return nil - } - out := new(ZipStrategy) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/clientset.go deleted file mode 100644 index e1475cc2e..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/clientset.go +++ /dev/null @@ -1,81 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package versioned - -import ( - "fmt" - - argoprojv1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1" - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - argoprojV1alpha1 *argoprojv1alpha1.ArgoprojV1alpha1Client -} - -// ArgoprojV1alpha1 retrieves the ArgoprojV1alpha1Client -func (c *Clientset) ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface { - return c.argoprojV1alpha1 -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -// If config's RateLimiter is not set and QPS and Burst are acceptable, -// NewForConfig will generate a rate-limiter in configShallowCopy. -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - if configShallowCopy.Burst <= 0 { - return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") - } - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var cs Clientset - var err error - cs.argoprojV1alpha1, err = argoprojv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.argoprojV1alpha1 = argoprojv1alpha1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.argoprojV1alpha1 = argoprojv1alpha1.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/doc.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/doc.go deleted file mode 100644 index 0e0c2a890..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/doc.go deleted file mode 100644 index 14db57a58..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/register.go deleted file mode 100644 index e97269603..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme/register.go +++ /dev/null @@ -1,40 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - argoprojv1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - argoprojv1alpha1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(Scheme)) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/clusterworkflowtemplate.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/clusterworkflowtemplate.go deleted file mode 100644 index 698f3c0f0..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/clusterworkflowtemplate.go +++ /dev/null @@ -1,152 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ClusterWorkflowTemplatesGetter has a method to return a ClusterWorkflowTemplateInterface. -// A group's client should implement this interface. -type ClusterWorkflowTemplatesGetter interface { - ClusterWorkflowTemplates() ClusterWorkflowTemplateInterface -} - -// ClusterWorkflowTemplateInterface has methods to work with ClusterWorkflowTemplate resources. -type ClusterWorkflowTemplateInterface interface { - Create(ctx context.Context, clusterWorkflowTemplate *v1alpha1.ClusterWorkflowTemplate, opts v1.CreateOptions) (*v1alpha1.ClusterWorkflowTemplate, error) - Update(ctx context.Context, clusterWorkflowTemplate *v1alpha1.ClusterWorkflowTemplate, opts v1.UpdateOptions) (*v1alpha1.ClusterWorkflowTemplate, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterWorkflowTemplate, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterWorkflowTemplateList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterWorkflowTemplate, err error) - ClusterWorkflowTemplateExpansion -} - -// clusterWorkflowTemplates implements ClusterWorkflowTemplateInterface -type clusterWorkflowTemplates struct { - client rest.Interface -} - -// newClusterWorkflowTemplates returns a ClusterWorkflowTemplates -func newClusterWorkflowTemplates(c *ArgoprojV1alpha1Client) *clusterWorkflowTemplates { - return &clusterWorkflowTemplates{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterWorkflowTemplate, and returns the corresponding clusterWorkflowTemplate object, and an error if there is any. -func (c *clusterWorkflowTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterWorkflowTemplate, err error) { - result = &v1alpha1.ClusterWorkflowTemplate{} - err = c.client.Get(). - Resource("clusterworkflowtemplates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterWorkflowTemplates that match those selectors. -func (c *clusterWorkflowTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterWorkflowTemplateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterWorkflowTemplateList{} - err = c.client.Get(). - Resource("clusterworkflowtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterWorkflowTemplates. -func (c *clusterWorkflowTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterworkflowtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterWorkflowTemplate and creates it. Returns the server's representation of the clusterWorkflowTemplate, and an error, if there is any. -func (c *clusterWorkflowTemplates) Create(ctx context.Context, clusterWorkflowTemplate *v1alpha1.ClusterWorkflowTemplate, opts v1.CreateOptions) (result *v1alpha1.ClusterWorkflowTemplate, err error) { - result = &v1alpha1.ClusterWorkflowTemplate{} - err = c.client.Post(). - Resource("clusterworkflowtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterWorkflowTemplate). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterWorkflowTemplate and updates it. Returns the server's representation of the clusterWorkflowTemplate, and an error, if there is any. -func (c *clusterWorkflowTemplates) Update(ctx context.Context, clusterWorkflowTemplate *v1alpha1.ClusterWorkflowTemplate, opts v1.UpdateOptions) (result *v1alpha1.ClusterWorkflowTemplate, err error) { - result = &v1alpha1.ClusterWorkflowTemplate{} - err = c.client.Put(). - Resource("clusterworkflowtemplates"). - Name(clusterWorkflowTemplate.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterWorkflowTemplate). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterWorkflowTemplate and deletes it. Returns an error if one occurs. -func (c *clusterWorkflowTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterworkflowtemplates"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterWorkflowTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterworkflowtemplates"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterWorkflowTemplate. -func (c *clusterWorkflowTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterWorkflowTemplate, err error) { - result = &v1alpha1.ClusterWorkflowTemplate{} - err = c.client.Patch(pt). - Resource("clusterworkflowtemplates"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/cronworkflow.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/cronworkflow.go deleted file mode 100644 index 785680401..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/cronworkflow.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// CronWorkflowsGetter has a method to return a CronWorkflowInterface. -// A group's client should implement this interface. -type CronWorkflowsGetter interface { - CronWorkflows(namespace string) CronWorkflowInterface -} - -// CronWorkflowInterface has methods to work with CronWorkflow resources. -type CronWorkflowInterface interface { - Create(ctx context.Context, cronWorkflow *v1alpha1.CronWorkflow, opts v1.CreateOptions) (*v1alpha1.CronWorkflow, error) - Update(ctx context.Context, cronWorkflow *v1alpha1.CronWorkflow, opts v1.UpdateOptions) (*v1alpha1.CronWorkflow, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CronWorkflow, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CronWorkflowList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CronWorkflow, err error) - CronWorkflowExpansion -} - -// cronWorkflows implements CronWorkflowInterface -type cronWorkflows struct { - client rest.Interface - ns string -} - -// newCronWorkflows returns a CronWorkflows -func newCronWorkflows(c *ArgoprojV1alpha1Client, namespace string) *cronWorkflows { - return &cronWorkflows{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cronWorkflow, and returns the corresponding cronWorkflow object, and an error if there is any. -func (c *cronWorkflows) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CronWorkflow, err error) { - result = &v1alpha1.CronWorkflow{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronworkflows"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronWorkflows that match those selectors. -func (c *cronWorkflows) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CronWorkflowList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.CronWorkflowList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronworkflows"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronWorkflows. -func (c *cronWorkflows) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronworkflows"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cronWorkflow and creates it. Returns the server's representation of the cronWorkflow, and an error, if there is any. -func (c *cronWorkflows) Create(ctx context.Context, cronWorkflow *v1alpha1.CronWorkflow, opts v1.CreateOptions) (result *v1alpha1.CronWorkflow, err error) { - result = &v1alpha1.CronWorkflow{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronworkflows"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronWorkflow). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cronWorkflow and updates it. Returns the server's representation of the cronWorkflow, and an error, if there is any. -func (c *cronWorkflows) Update(ctx context.Context, cronWorkflow *v1alpha1.CronWorkflow, opts v1.UpdateOptions) (result *v1alpha1.CronWorkflow, err error) { - result = &v1alpha1.CronWorkflow{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronworkflows"). - Name(cronWorkflow.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronWorkflow). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cronWorkflow and deletes it. Returns an error if one occurs. -func (c *cronWorkflows) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronworkflows"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronWorkflows) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cronworkflows"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cronWorkflow. -func (c *cronWorkflows) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CronWorkflow, err error) { - result = &v1alpha1.CronWorkflow{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronworkflows"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/doc.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/doc.go deleted file mode 100644 index 93a7ca4e0..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/generated_expansion.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/generated_expansion.go deleted file mode 100644 index eb6fc2098..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type ClusterWorkflowTemplateExpansion interface{} - -type CronWorkflowExpansion interface{} - -type WorkflowExpansion interface{} - -type WorkflowArtifactGCTaskExpansion interface{} - -type WorkflowEventBindingExpansion interface{} - -type WorkflowTaskResultExpansion interface{} - -type WorkflowTaskSetExpansion interface{} - -type WorkflowTemplateExpansion interface{} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow.go deleted file mode 100644 index 3761f3c33..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// WorkflowsGetter has a method to return a WorkflowInterface. -// A group's client should implement this interface. -type WorkflowsGetter interface { - Workflows(namespace string) WorkflowInterface -} - -// WorkflowInterface has methods to work with Workflow resources. -type WorkflowInterface interface { - Create(ctx context.Context, workflow *v1alpha1.Workflow, opts v1.CreateOptions) (*v1alpha1.Workflow, error) - Update(ctx context.Context, workflow *v1alpha1.Workflow, opts v1.UpdateOptions) (*v1alpha1.Workflow, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Workflow, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkflowList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Workflow, err error) - WorkflowExpansion -} - -// workflows implements WorkflowInterface -type workflows struct { - client rest.Interface - ns string -} - -// newWorkflows returns a Workflows -func newWorkflows(c *ArgoprojV1alpha1Client, namespace string) *workflows { - return &workflows{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the workflow, and returns the corresponding workflow object, and an error if there is any. -func (c *workflows) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Workflow, err error) { - result = &v1alpha1.Workflow{} - err = c.client.Get(). - Namespace(c.ns). - Resource("workflows"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Workflows that match those selectors. -func (c *workflows) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkflowList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.WorkflowList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("workflows"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested workflows. -func (c *workflows) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("workflows"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a workflow and creates it. Returns the server's representation of the workflow, and an error, if there is any. -func (c *workflows) Create(ctx context.Context, workflow *v1alpha1.Workflow, opts v1.CreateOptions) (result *v1alpha1.Workflow, err error) { - result = &v1alpha1.Workflow{} - err = c.client.Post(). - Namespace(c.ns). - Resource("workflows"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflow). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a workflow and updates it. Returns the server's representation of the workflow, and an error, if there is any. -func (c *workflows) Update(ctx context.Context, workflow *v1alpha1.Workflow, opts v1.UpdateOptions) (result *v1alpha1.Workflow, err error) { - result = &v1alpha1.Workflow{} - err = c.client.Put(). - Namespace(c.ns). - Resource("workflows"). - Name(workflow.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflow). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the workflow and deletes it. Returns an error if one occurs. -func (c *workflows) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("workflows"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *workflows) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("workflows"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched workflow. -func (c *workflows) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Workflow, err error) { - result = &v1alpha1.Workflow{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("workflows"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow_client.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow_client.go deleted file mode 100644 index 49da7948a..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflow_client.go +++ /dev/null @@ -1,108 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" - rest "k8s.io/client-go/rest" -) - -type ArgoprojV1alpha1Interface interface { - RESTClient() rest.Interface - ClusterWorkflowTemplatesGetter - CronWorkflowsGetter - WorkflowsGetter - WorkflowArtifactGCTasksGetter - WorkflowEventBindingsGetter - WorkflowTaskResultsGetter - WorkflowTaskSetsGetter - WorkflowTemplatesGetter -} - -// ArgoprojV1alpha1Client is used to interact with features provided by the argoproj.io group. -type ArgoprojV1alpha1Client struct { - restClient rest.Interface -} - -func (c *ArgoprojV1alpha1Client) ClusterWorkflowTemplates() ClusterWorkflowTemplateInterface { - return newClusterWorkflowTemplates(c) -} - -func (c *ArgoprojV1alpha1Client) CronWorkflows(namespace string) CronWorkflowInterface { - return newCronWorkflows(c, namespace) -} - -func (c *ArgoprojV1alpha1Client) Workflows(namespace string) WorkflowInterface { - return newWorkflows(c, namespace) -} - -func (c *ArgoprojV1alpha1Client) WorkflowArtifactGCTasks(namespace string) WorkflowArtifactGCTaskInterface { - return newWorkflowArtifactGCTasks(c, namespace) -} - -func (c *ArgoprojV1alpha1Client) WorkflowEventBindings(namespace string) WorkflowEventBindingInterface { - return newWorkflowEventBindings(c, namespace) -} - -func (c *ArgoprojV1alpha1Client) WorkflowTaskResults(namespace string) WorkflowTaskResultInterface { - return newWorkflowTaskResults(c, namespace) -} - -func (c *ArgoprojV1alpha1Client) WorkflowTaskSets(namespace string) WorkflowTaskSetInterface { - return newWorkflowTaskSets(c, namespace) -} - -func (c *ArgoprojV1alpha1Client) WorkflowTemplates(namespace string) WorkflowTemplateInterface { - return newWorkflowTemplates(c, namespace) -} - -// NewForConfig creates a new ArgoprojV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*ArgoprojV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &ArgoprojV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new ArgoprojV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ArgoprojV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new ArgoprojV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *ArgoprojV1alpha1Client { - return &ArgoprojV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *ArgoprojV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowartifactgctask.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowartifactgctask.go deleted file mode 100644 index 3d8e3c1f1..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowartifactgctask.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// WorkflowArtifactGCTasksGetter has a method to return a WorkflowArtifactGCTaskInterface. -// A group's client should implement this interface. -type WorkflowArtifactGCTasksGetter interface { - WorkflowArtifactGCTasks(namespace string) WorkflowArtifactGCTaskInterface -} - -// WorkflowArtifactGCTaskInterface has methods to work with WorkflowArtifactGCTask resources. -type WorkflowArtifactGCTaskInterface interface { - Create(ctx context.Context, workflowArtifactGCTask *v1alpha1.WorkflowArtifactGCTask, opts v1.CreateOptions) (*v1alpha1.WorkflowArtifactGCTask, error) - Update(ctx context.Context, workflowArtifactGCTask *v1alpha1.WorkflowArtifactGCTask, opts v1.UpdateOptions) (*v1alpha1.WorkflowArtifactGCTask, error) - UpdateStatus(ctx context.Context, workflowArtifactGCTask *v1alpha1.WorkflowArtifactGCTask, opts v1.UpdateOptions) (*v1alpha1.WorkflowArtifactGCTask, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.WorkflowArtifactGCTask, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkflowArtifactGCTaskList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowArtifactGCTask, err error) - WorkflowArtifactGCTaskExpansion -} - -// workflowArtifactGCTasks implements WorkflowArtifactGCTaskInterface -type workflowArtifactGCTasks struct { - client rest.Interface - ns string -} - -// newWorkflowArtifactGCTasks returns a WorkflowArtifactGCTasks -func newWorkflowArtifactGCTasks(c *ArgoprojV1alpha1Client, namespace string) *workflowArtifactGCTasks { - return &workflowArtifactGCTasks{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the workflowArtifactGCTask, and returns the corresponding workflowArtifactGCTask object, and an error if there is any. -func (c *workflowArtifactGCTasks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.WorkflowArtifactGCTask, err error) { - result = &v1alpha1.WorkflowArtifactGCTask{} - err = c.client.Get(). - Namespace(c.ns). - Resource("workflowartifactgctasks"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of WorkflowArtifactGCTasks that match those selectors. -func (c *workflowArtifactGCTasks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkflowArtifactGCTaskList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.WorkflowArtifactGCTaskList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("workflowartifactgctasks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested workflowArtifactGCTasks. -func (c *workflowArtifactGCTasks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("workflowartifactgctasks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a workflowArtifactGCTask and creates it. Returns the server's representation of the workflowArtifactGCTask, and an error, if there is any. -func (c *workflowArtifactGCTasks) Create(ctx context.Context, workflowArtifactGCTask *v1alpha1.WorkflowArtifactGCTask, opts v1.CreateOptions) (result *v1alpha1.WorkflowArtifactGCTask, err error) { - result = &v1alpha1.WorkflowArtifactGCTask{} - err = c.client.Post(). - Namespace(c.ns). - Resource("workflowartifactgctasks"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflowArtifactGCTask). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a workflowArtifactGCTask and updates it. Returns the server's representation of the workflowArtifactGCTask, and an error, if there is any. -func (c *workflowArtifactGCTasks) Update(ctx context.Context, workflowArtifactGCTask *v1alpha1.WorkflowArtifactGCTask, opts v1.UpdateOptions) (result *v1alpha1.WorkflowArtifactGCTask, err error) { - result = &v1alpha1.WorkflowArtifactGCTask{} - err = c.client.Put(). - Namespace(c.ns). - Resource("workflowartifactgctasks"). - Name(workflowArtifactGCTask.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflowArtifactGCTask). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *workflowArtifactGCTasks) UpdateStatus(ctx context.Context, workflowArtifactGCTask *v1alpha1.WorkflowArtifactGCTask, opts v1.UpdateOptions) (result *v1alpha1.WorkflowArtifactGCTask, err error) { - result = &v1alpha1.WorkflowArtifactGCTask{} - err = c.client.Put(). - Namespace(c.ns). - Resource("workflowartifactgctasks"). - Name(workflowArtifactGCTask.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflowArtifactGCTask). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the workflowArtifactGCTask and deletes it. Returns an error if one occurs. -func (c *workflowArtifactGCTasks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("workflowartifactgctasks"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *workflowArtifactGCTasks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("workflowartifactgctasks"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched workflowArtifactGCTask. -func (c *workflowArtifactGCTasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowArtifactGCTask, err error) { - result = &v1alpha1.WorkflowArtifactGCTask{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("workflowartifactgctasks"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workfloweventbinding.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workfloweventbinding.go deleted file mode 100644 index c1dee2278..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workfloweventbinding.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// WorkflowEventBindingsGetter has a method to return a WorkflowEventBindingInterface. -// A group's client should implement this interface. -type WorkflowEventBindingsGetter interface { - WorkflowEventBindings(namespace string) WorkflowEventBindingInterface -} - -// WorkflowEventBindingInterface has methods to work with WorkflowEventBinding resources. -type WorkflowEventBindingInterface interface { - Create(ctx context.Context, workflowEventBinding *v1alpha1.WorkflowEventBinding, opts v1.CreateOptions) (*v1alpha1.WorkflowEventBinding, error) - Update(ctx context.Context, workflowEventBinding *v1alpha1.WorkflowEventBinding, opts v1.UpdateOptions) (*v1alpha1.WorkflowEventBinding, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.WorkflowEventBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkflowEventBindingList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowEventBinding, err error) - WorkflowEventBindingExpansion -} - -// workflowEventBindings implements WorkflowEventBindingInterface -type workflowEventBindings struct { - client rest.Interface - ns string -} - -// newWorkflowEventBindings returns a WorkflowEventBindings -func newWorkflowEventBindings(c *ArgoprojV1alpha1Client, namespace string) *workflowEventBindings { - return &workflowEventBindings{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the workflowEventBinding, and returns the corresponding workflowEventBinding object, and an error if there is any. -func (c *workflowEventBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.WorkflowEventBinding, err error) { - result = &v1alpha1.WorkflowEventBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("workfloweventbindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of WorkflowEventBindings that match those selectors. -func (c *workflowEventBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkflowEventBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.WorkflowEventBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("workfloweventbindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested workflowEventBindings. -func (c *workflowEventBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("workfloweventbindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a workflowEventBinding and creates it. Returns the server's representation of the workflowEventBinding, and an error, if there is any. -func (c *workflowEventBindings) Create(ctx context.Context, workflowEventBinding *v1alpha1.WorkflowEventBinding, opts v1.CreateOptions) (result *v1alpha1.WorkflowEventBinding, err error) { - result = &v1alpha1.WorkflowEventBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("workfloweventbindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflowEventBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a workflowEventBinding and updates it. Returns the server's representation of the workflowEventBinding, and an error, if there is any. -func (c *workflowEventBindings) Update(ctx context.Context, workflowEventBinding *v1alpha1.WorkflowEventBinding, opts v1.UpdateOptions) (result *v1alpha1.WorkflowEventBinding, err error) { - result = &v1alpha1.WorkflowEventBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("workfloweventbindings"). - Name(workflowEventBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflowEventBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the workflowEventBinding and deletes it. Returns an error if one occurs. -func (c *workflowEventBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("workfloweventbindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *workflowEventBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("workfloweventbindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched workflowEventBinding. -func (c *workflowEventBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowEventBinding, err error) { - result = &v1alpha1.WorkflowEventBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("workfloweventbindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskresult.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskresult.go deleted file mode 100644 index a833fa6f3..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskresult.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// WorkflowTaskResultsGetter has a method to return a WorkflowTaskResultInterface. -// A group's client should implement this interface. -type WorkflowTaskResultsGetter interface { - WorkflowTaskResults(namespace string) WorkflowTaskResultInterface -} - -// WorkflowTaskResultInterface has methods to work with WorkflowTaskResult resources. -type WorkflowTaskResultInterface interface { - Create(ctx context.Context, workflowTaskResult *v1alpha1.WorkflowTaskResult, opts v1.CreateOptions) (*v1alpha1.WorkflowTaskResult, error) - Update(ctx context.Context, workflowTaskResult *v1alpha1.WorkflowTaskResult, opts v1.UpdateOptions) (*v1alpha1.WorkflowTaskResult, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.WorkflowTaskResult, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkflowTaskResultList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowTaskResult, err error) - WorkflowTaskResultExpansion -} - -// workflowTaskResults implements WorkflowTaskResultInterface -type workflowTaskResults struct { - client rest.Interface - ns string -} - -// newWorkflowTaskResults returns a WorkflowTaskResults -func newWorkflowTaskResults(c *ArgoprojV1alpha1Client, namespace string) *workflowTaskResults { - return &workflowTaskResults{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the workflowTaskResult, and returns the corresponding workflowTaskResult object, and an error if there is any. -func (c *workflowTaskResults) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.WorkflowTaskResult, err error) { - result = &v1alpha1.WorkflowTaskResult{} - err = c.client.Get(). - Namespace(c.ns). - Resource("workflowtaskresults"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of WorkflowTaskResults that match those selectors. -func (c *workflowTaskResults) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkflowTaskResultList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.WorkflowTaskResultList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("workflowtaskresults"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested workflowTaskResults. -func (c *workflowTaskResults) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("workflowtaskresults"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a workflowTaskResult and creates it. Returns the server's representation of the workflowTaskResult, and an error, if there is any. -func (c *workflowTaskResults) Create(ctx context.Context, workflowTaskResult *v1alpha1.WorkflowTaskResult, opts v1.CreateOptions) (result *v1alpha1.WorkflowTaskResult, err error) { - result = &v1alpha1.WorkflowTaskResult{} - err = c.client.Post(). - Namespace(c.ns). - Resource("workflowtaskresults"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflowTaskResult). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a workflowTaskResult and updates it. Returns the server's representation of the workflowTaskResult, and an error, if there is any. -func (c *workflowTaskResults) Update(ctx context.Context, workflowTaskResult *v1alpha1.WorkflowTaskResult, opts v1.UpdateOptions) (result *v1alpha1.WorkflowTaskResult, err error) { - result = &v1alpha1.WorkflowTaskResult{} - err = c.client.Put(). - Namespace(c.ns). - Resource("workflowtaskresults"). - Name(workflowTaskResult.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflowTaskResult). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the workflowTaskResult and deletes it. Returns an error if one occurs. -func (c *workflowTaskResults) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("workflowtaskresults"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *workflowTaskResults) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("workflowtaskresults"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched workflowTaskResult. -func (c *workflowTaskResults) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowTaskResult, err error) { - result = &v1alpha1.WorkflowTaskResult{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("workflowtaskresults"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskset.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskset.go deleted file mode 100644 index d0d9e48fa..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtaskset.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// WorkflowTaskSetsGetter has a method to return a WorkflowTaskSetInterface. -// A group's client should implement this interface. -type WorkflowTaskSetsGetter interface { - WorkflowTaskSets(namespace string) WorkflowTaskSetInterface -} - -// WorkflowTaskSetInterface has methods to work with WorkflowTaskSet resources. -type WorkflowTaskSetInterface interface { - Create(ctx context.Context, workflowTaskSet *v1alpha1.WorkflowTaskSet, opts v1.CreateOptions) (*v1alpha1.WorkflowTaskSet, error) - Update(ctx context.Context, workflowTaskSet *v1alpha1.WorkflowTaskSet, opts v1.UpdateOptions) (*v1alpha1.WorkflowTaskSet, error) - UpdateStatus(ctx context.Context, workflowTaskSet *v1alpha1.WorkflowTaskSet, opts v1.UpdateOptions) (*v1alpha1.WorkflowTaskSet, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.WorkflowTaskSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkflowTaskSetList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowTaskSet, err error) - WorkflowTaskSetExpansion -} - -// workflowTaskSets implements WorkflowTaskSetInterface -type workflowTaskSets struct { - client rest.Interface - ns string -} - -// newWorkflowTaskSets returns a WorkflowTaskSets -func newWorkflowTaskSets(c *ArgoprojV1alpha1Client, namespace string) *workflowTaskSets { - return &workflowTaskSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the workflowTaskSet, and returns the corresponding workflowTaskSet object, and an error if there is any. -func (c *workflowTaskSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.WorkflowTaskSet, err error) { - result = &v1alpha1.WorkflowTaskSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("workflowtasksets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of WorkflowTaskSets that match those selectors. -func (c *workflowTaskSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkflowTaskSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.WorkflowTaskSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("workflowtasksets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested workflowTaskSets. -func (c *workflowTaskSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("workflowtasksets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a workflowTaskSet and creates it. Returns the server's representation of the workflowTaskSet, and an error, if there is any. -func (c *workflowTaskSets) Create(ctx context.Context, workflowTaskSet *v1alpha1.WorkflowTaskSet, opts v1.CreateOptions) (result *v1alpha1.WorkflowTaskSet, err error) { - result = &v1alpha1.WorkflowTaskSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("workflowtasksets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflowTaskSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a workflowTaskSet and updates it. Returns the server's representation of the workflowTaskSet, and an error, if there is any. -func (c *workflowTaskSets) Update(ctx context.Context, workflowTaskSet *v1alpha1.WorkflowTaskSet, opts v1.UpdateOptions) (result *v1alpha1.WorkflowTaskSet, err error) { - result = &v1alpha1.WorkflowTaskSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("workflowtasksets"). - Name(workflowTaskSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflowTaskSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *workflowTaskSets) UpdateStatus(ctx context.Context, workflowTaskSet *v1alpha1.WorkflowTaskSet, opts v1.UpdateOptions) (result *v1alpha1.WorkflowTaskSet, err error) { - result = &v1alpha1.WorkflowTaskSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("workflowtasksets"). - Name(workflowTaskSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflowTaskSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the workflowTaskSet and deletes it. Returns an error if one occurs. -func (c *workflowTaskSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("workflowtasksets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *workflowTaskSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("workflowtasksets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched workflowTaskSet. -func (c *workflowTaskSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowTaskSet, err error) { - result = &v1alpha1.WorkflowTaskSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("workflowtasksets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtemplate.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtemplate.go deleted file mode 100644 index 992d044c8..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1/workflowtemplate.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - scheme "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// WorkflowTemplatesGetter has a method to return a WorkflowTemplateInterface. -// A group's client should implement this interface. -type WorkflowTemplatesGetter interface { - WorkflowTemplates(namespace string) WorkflowTemplateInterface -} - -// WorkflowTemplateInterface has methods to work with WorkflowTemplate resources. -type WorkflowTemplateInterface interface { - Create(ctx context.Context, workflowTemplate *v1alpha1.WorkflowTemplate, opts v1.CreateOptions) (*v1alpha1.WorkflowTemplate, error) - Update(ctx context.Context, workflowTemplate *v1alpha1.WorkflowTemplate, opts v1.UpdateOptions) (*v1alpha1.WorkflowTemplate, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.WorkflowTemplate, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.WorkflowTemplateList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowTemplate, err error) - WorkflowTemplateExpansion -} - -// workflowTemplates implements WorkflowTemplateInterface -type workflowTemplates struct { - client rest.Interface - ns string -} - -// newWorkflowTemplates returns a WorkflowTemplates -func newWorkflowTemplates(c *ArgoprojV1alpha1Client, namespace string) *workflowTemplates { - return &workflowTemplates{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the workflowTemplate, and returns the corresponding workflowTemplate object, and an error if there is any. -func (c *workflowTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.WorkflowTemplate, err error) { - result = &v1alpha1.WorkflowTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("workflowtemplates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of WorkflowTemplates that match those selectors. -func (c *workflowTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.WorkflowTemplateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.WorkflowTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("workflowtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested workflowTemplates. -func (c *workflowTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("workflowtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a workflowTemplate and creates it. Returns the server's representation of the workflowTemplate, and an error, if there is any. -func (c *workflowTemplates) Create(ctx context.Context, workflowTemplate *v1alpha1.WorkflowTemplate, opts v1.CreateOptions) (result *v1alpha1.WorkflowTemplate, err error) { - result = &v1alpha1.WorkflowTemplate{} - err = c.client.Post(). - Namespace(c.ns). - Resource("workflowtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflowTemplate). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a workflowTemplate and updates it. Returns the server's representation of the workflowTemplate, and an error, if there is any. -func (c *workflowTemplates) Update(ctx context.Context, workflowTemplate *v1alpha1.WorkflowTemplate, opts v1.UpdateOptions) (result *v1alpha1.WorkflowTemplate, err error) { - result = &v1alpha1.WorkflowTemplate{} - err = c.client.Put(). - Namespace(c.ns). - Resource("workflowtemplates"). - Name(workflowTemplate.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(workflowTemplate). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the workflowTemplate and deletes it. Returns an error if one occurs. -func (c *workflowTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("workflowtemplates"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *workflowTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("workflowtemplates"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched workflowTemplate. -func (c *workflowTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.WorkflowTemplate, err error) { - result = &v1alpha1.WorkflowTemplate{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("workflowtemplates"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/json/fix.go b/vendor/github.com/argoproj/argo-workflows/v3/util/json/fix.go deleted file mode 100644 index 6a7ee41ef..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/json/fix.go +++ /dev/null @@ -1,11 +0,0 @@ -package json - -import "strings" - -func Fix(s string) string { - // https://stackoverflow.com/questions/28595664/how-to-stop-json-marshal-from-escaping-and/28596225 - s = strings.Replace(s, "\\u003c", "<", -1) - s = strings.Replace(s, "\\u003e", ">", -1) - s = strings.Replace(s, "\\u0026", "&", -1) - return s -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/json/json.go b/vendor/github.com/argoproj/argo-workflows/v3/util/json/json.go deleted file mode 100644 index fda3296e4..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/json/json.go +++ /dev/null @@ -1,36 +0,0 @@ -package json - -import ( - "encoding/json" - "io" - - gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" -) - -// JSONMarshaler is a type which satisfies the grpc-gateway Marshaler interface -type JSONMarshaler struct{} - -// ContentType implements gwruntime.Marshaler. -func (j *JSONMarshaler) ContentType() string { - return "application/json" -} - -// Marshal implements gwruntime.Marshaler. -func (j *JSONMarshaler) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// NewDecoder implements gwruntime.Marshaler. -func (j *JSONMarshaler) NewDecoder(r io.Reader) gwruntime.Decoder { - return json.NewDecoder(r) -} - -// NewEncoder implements gwruntime.Marshaler. -func (j *JSONMarshaler) NewEncoder(w io.Writer) gwruntime.Encoder { - return json.NewEncoder(w) -} - -// Unmarshal implements gwruntime.Marshaler. -func (j *JSONMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/json/jsonify.go b/vendor/github.com/argoproj/argo-workflows/v3/util/json/jsonify.go deleted file mode 100644 index bdb25bce5..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/json/jsonify.go +++ /dev/null @@ -1,12 +0,0 @@ -package json - -import "encoding/json" - -func Jsonify(v interface{}) (map[string]interface{}, error) { - data, err := json.Marshal(v) - if err != nil { - return nil, err - } - x := make(map[string]interface{}) - return x, json.Unmarshal(data, &x) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/slice/slice.go b/vendor/github.com/argoproj/argo-workflows/v3/util/slice/slice.go deleted file mode 100644 index 2787cce42..000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/slice/slice.go +++ /dev/null @@ -1,21 +0,0 @@ -package slice - -func RemoveString(slice []string, element string) []string { - for i, v := range slice { - if element == v { - ret := make([]string, 0, len(slice)-1) - ret = append(ret, slice[:i]...) - return append(ret, slice[i+1:]...) - } - } - return slice -} - -func ContainsString(slice []string, element string) bool { - for _, item := range slice { - if item == element { - return true - } - } - return false -} |
